linux/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
<<
>>
Prefs
   1/*
   2 * AMD 10Gb Ethernet driver
   3 *
   4 * This file is available to you under your choice of the following two
   5 * licenses:
   6 *
   7 * License 1: GPLv2
   8 *
   9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
  10 *
  11 * This file is free software; you may copy, redistribute and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation, either version 2 of the License, or (at
  14 * your option) any later version.
  15 *
  16 * This file is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19 * General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  23 *
  24 * This file incorporates work covered by the following copyright and
  25 * permission notice:
  26 *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
  27 *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  28 *     Inc. unless otherwise expressly agreed to in writing between Synopsys
  29 *     and you.
  30 *
  31 *     The Software IS NOT an item of Licensed Software or Licensed Product
  32 *     under any End User Software License Agreement or Agreement for Licensed
  33 *     Product with Synopsys or any supplement thereto.  Permission is hereby
  34 *     granted, free of charge, to any person obtaining a copy of this software
  35 *     annotated with this license and the Software, to deal in the Software
  36 *     without restriction, including without limitation the rights to use,
  37 *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  38 *     of the Software, and to permit persons to whom the Software is furnished
  39 *     to do so, subject to the following conditions:
  40 *
  41 *     The above copyright notice and this permission notice shall be included
  42 *     in all copies or substantial portions of the Software.
  43 *
  44 *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
  45 *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  46 *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  47 *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
  48 *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  49 *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  50 *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  51 *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  52 *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  53 *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
  54 *     THE POSSIBILITY OF SUCH DAMAGE.
  55 *
  56 *
  57 * License 2: Modified BSD
  58 *
  59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
  60 * All rights reserved.
  61 *
  62 * Redistribution and use in source and binary forms, with or without
  63 * modification, are permitted provided that the following conditions are met:
  64 *     * Redistributions of source code must retain the above copyright
  65 *       notice, this list of conditions and the following disclaimer.
  66 *     * Redistributions in binary form must reproduce the above copyright
  67 *       notice, this list of conditions and the following disclaimer in the
  68 *       documentation and/or other materials provided with the distribution.
  69 *     * Neither the name of Advanced Micro Devices, Inc. nor the
  70 *       names of its contributors may be used to endorse or promote products
  71 *       derived from this software without specific prior written permission.
  72 *
  73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
  77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  83 *
  84 * This file incorporates work covered by the following copyright and
  85 * permission notice:
  86 *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
  87 *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
  88 *     Inc. unless otherwise expressly agreed to in writing between Synopsys
  89 *     and you.
  90 *
  91 *     The Software IS NOT an item of Licensed Software or Licensed Product
  92 *     under any End User Software License Agreement or Agreement for Licensed
  93 *     Product with Synopsys or any supplement thereto.  Permission is hereby
  94 *     granted, free of charge, to any person obtaining a copy of this software
  95 *     annotated with this license and the Software, to deal in the Software
  96 *     without restriction, including without limitation the rights to use,
  97 *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  98 *     of the Software, and to permit persons to whom the Software is furnished
  99 *     to do so, subject to the following conditions:
 100 *
 101 *     The above copyright notice and this permission notice shall be included
 102 *     in all copies or substantial portions of the Software.
 103 *
 104 *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
 105 *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 106 *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
 107 *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
 108 *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 109 *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 110 *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 111 *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 112 *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 113 *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 114 *     THE POSSIBILITY OF SUCH DAMAGE.
 115 */
 116
 117#include <linux/phy.h>
 118#include <linux/mdio.h>
 119#include <linux/clk.h>
 120#include <linux/bitrev.h>
 121#include <linux/crc32.h>
 122
 123#include "xgbe.h"
 124#include "xgbe-common.h"
 125
 126static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
 127                                      unsigned int usec)
 128{
 129        unsigned long rate;
 130        unsigned int ret;
 131
 132        DBGPR("-->xgbe_usec_to_riwt\n");
 133
 134        rate = pdata->sysclk_rate;
 135
 136        /*
 137         * Convert the input usec value to the watchdog timer value. Each
 138         * watchdog timer value is equivalent to 256 clock cycles.
 139         * Calculate the required value as:
 140         *   ( usec * ( system_clock_mhz / 10^6 ) / 256
 141         */
 142        ret = (usec * (rate / 1000000)) / 256;
 143
 144        DBGPR("<--xgbe_usec_to_riwt\n");
 145
 146        return ret;
 147}
 148
 149static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
 150                                      unsigned int riwt)
 151{
 152        unsigned long rate;
 153        unsigned int ret;
 154
 155        DBGPR("-->xgbe_riwt_to_usec\n");
 156
 157        rate = pdata->sysclk_rate;
 158
 159        /*
 160         * Convert the input watchdog timer value to the usec value. Each
 161         * watchdog timer value is equivalent to 256 clock cycles.
 162         * Calculate the required value as:
 163         *   ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
 164         */
 165        ret = (riwt * 256) / (rate / 1000000);
 166
 167        DBGPR("<--xgbe_riwt_to_usec\n");
 168
 169        return ret;
 170}
 171
 172static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
 173{
 174        struct xgbe_channel *channel;
 175        unsigned int i;
 176
 177        channel = pdata->channel;
 178        for (i = 0; i < pdata->channel_count; i++, channel++)
 179                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
 180                                       pdata->pblx8);
 181
 182        return 0;
 183}
 184
 185static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
 186{
 187        return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
 188}
 189
 190static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
 191{
 192        struct xgbe_channel *channel;
 193        unsigned int i;
 194
 195        channel = pdata->channel;
 196        for (i = 0; i < pdata->channel_count; i++, channel++) {
 197                if (!channel->tx_ring)
 198                        break;
 199
 200                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
 201                                       pdata->tx_pbl);
 202        }
 203
 204        return 0;
 205}
 206
 207static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
 208{
 209        return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
 210}
 211
 212static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
 213{
 214        struct xgbe_channel *channel;
 215        unsigned int i;
 216
 217        channel = pdata->channel;
 218        for (i = 0; i < pdata->channel_count; i++, channel++) {
 219                if (!channel->rx_ring)
 220                        break;
 221
 222                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
 223                                       pdata->rx_pbl);
 224        }
 225
 226        return 0;
 227}
 228
 229static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
 230{
 231        struct xgbe_channel *channel;
 232        unsigned int i;
 233
 234        channel = pdata->channel;
 235        for (i = 0; i < pdata->channel_count; i++, channel++) {
 236                if (!channel->tx_ring)
 237                        break;
 238
 239                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
 240                                       pdata->tx_osp_mode);
 241        }
 242
 243        return 0;
 244}
 245
 246static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
 247{
 248        unsigned int i;
 249
 250        for (i = 0; i < pdata->rx_q_count; i++)
 251                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
 252
 253        return 0;
 254}
 255
 256static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
 257{
 258        unsigned int i;
 259
 260        for (i = 0; i < pdata->tx_q_count; i++)
 261                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
 262
 263        return 0;
 264}
 265
 266static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
 267                                    unsigned int val)
 268{
 269        unsigned int i;
 270
 271        for (i = 0; i < pdata->rx_q_count; i++)
 272                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
 273
 274        return 0;
 275}
 276
 277static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
 278                                    unsigned int val)
 279{
 280        unsigned int i;
 281
 282        for (i = 0; i < pdata->tx_q_count; i++)
 283                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
 284
 285        return 0;
 286}
 287
 288static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
 289{
 290        struct xgbe_channel *channel;
 291        unsigned int i;
 292
 293        channel = pdata->channel;
 294        for (i = 0; i < pdata->channel_count; i++, channel++) {
 295                if (!channel->rx_ring)
 296                        break;
 297
 298                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
 299                                       pdata->rx_riwt);
 300        }
 301
 302        return 0;
 303}
 304
 305static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
 306{
 307        return 0;
 308}
 309
 310static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
 311{
 312        struct xgbe_channel *channel;
 313        unsigned int i;
 314
 315        channel = pdata->channel;
 316        for (i = 0; i < pdata->channel_count; i++, channel++) {
 317                if (!channel->rx_ring)
 318                        break;
 319
 320                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
 321                                       pdata->rx_buf_size);
 322        }
 323}
 324
 325static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
 326{
 327        struct xgbe_channel *channel;
 328        unsigned int i;
 329
 330        channel = pdata->channel;
 331        for (i = 0; i < pdata->channel_count; i++, channel++) {
 332                if (!channel->tx_ring)
 333                        break;
 334
 335                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
 336        }
 337}
 338
 339static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
 340{
 341        struct xgbe_channel *channel;
 342        unsigned int i;
 343
 344        channel = pdata->channel;
 345        for (i = 0; i < pdata->channel_count; i++, channel++) {
 346                if (!channel->rx_ring)
 347                        break;
 348
 349                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
 350        }
 351
 352        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
 353}
 354
 355static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
 356                              unsigned int index, unsigned int val)
 357{
 358        unsigned int wait;
 359        int ret = 0;
 360
 361        mutex_lock(&pdata->rss_mutex);
 362
 363        if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
 364                ret = -EBUSY;
 365                goto unlock;
 366        }
 367
 368        XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
 369
 370        XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
 371        XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
 372        XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
 373        XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
 374
 375        wait = 1000;
 376        while (wait--) {
 377                if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
 378                        goto unlock;
 379
 380                usleep_range(1000, 1500);
 381        }
 382
 383        ret = -EBUSY;
 384
 385unlock:
 386        mutex_unlock(&pdata->rss_mutex);
 387
 388        return ret;
 389}
 390
 391static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
 392{
 393        unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
 394        unsigned int *key = (unsigned int *)&pdata->rss_key;
 395        int ret;
 396
 397        while (key_regs--) {
 398                ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
 399                                         key_regs, *key++);
 400                if (ret)
 401                        return ret;
 402        }
 403
 404        return 0;
 405}
 406
 407static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
 408{
 409        unsigned int i;
 410        int ret;
 411
 412        for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
 413                ret = xgbe_write_rss_reg(pdata,
 414                                         XGBE_RSS_LOOKUP_TABLE_TYPE, i,
 415                                         pdata->rss_table[i]);
 416                if (ret)
 417                        return ret;
 418        }
 419
 420        return 0;
 421}
 422
 423static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
 424{
 425        memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
 426
 427        return xgbe_write_rss_hash_key(pdata);
 428}
 429
 430static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
 431                                     const u32 *table)
 432{
 433        unsigned int i;
 434
 435        for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
 436                XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
 437
 438        return xgbe_write_rss_lookup_table(pdata);
 439}
 440
 441static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
 442{
 443        int ret;
 444
 445        if (!pdata->hw_feat.rss)
 446                return -EOPNOTSUPP;
 447
 448        /* Program the hash key */
 449        ret = xgbe_write_rss_hash_key(pdata);
 450        if (ret)
 451                return ret;
 452
 453        /* Program the lookup table */
 454        ret = xgbe_write_rss_lookup_table(pdata);
 455        if (ret)
 456                return ret;
 457
 458        /* Set the RSS options */
 459        XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
 460
 461        /* Enable RSS */
 462        XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
 463
 464        return 0;
 465}
 466
 467static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
 468{
 469        if (!pdata->hw_feat.rss)
 470                return -EOPNOTSUPP;
 471
 472        XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
 473
 474        return 0;
 475}
 476
 477static void xgbe_config_rss(struct xgbe_prv_data *pdata)
 478{
 479        int ret;
 480
 481        if (!pdata->hw_feat.rss)
 482                return;
 483
 484        if (pdata->netdev->features & NETIF_F_RXHASH)
 485                ret = xgbe_enable_rss(pdata);
 486        else
 487                ret = xgbe_disable_rss(pdata);
 488
 489        if (ret)
 490                netdev_err(pdata->netdev,
 491                           "error configuring RSS, RSS disabled\n");
 492}
 493
 494static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
 495{
 496        unsigned int max_q_count, q_count;
 497        unsigned int reg, reg_val;
 498        unsigned int i;
 499
 500        /* Clear MTL flow control */
 501        for (i = 0; i < pdata->rx_q_count; i++)
 502                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
 503
 504        /* Clear MAC flow control */
 505        max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
 506        q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
 507        reg = MAC_Q0TFCR;
 508        for (i = 0; i < q_count; i++) {
 509                reg_val = XGMAC_IOREAD(pdata, reg);
 510                XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
 511                XGMAC_IOWRITE(pdata, reg, reg_val);
 512
 513                reg += MAC_QTFCR_INC;
 514        }
 515
 516        return 0;
 517}
 518
 519static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
 520{
 521        struct ieee_pfc *pfc = pdata->pfc;
 522        struct ieee_ets *ets = pdata->ets;
 523        unsigned int max_q_count, q_count;
 524        unsigned int reg, reg_val;
 525        unsigned int i;
 526
 527        /* Set MTL flow control */
 528        for (i = 0; i < pdata->rx_q_count; i++) {
 529                unsigned int ehfc = 0;
 530
 531                if (pfc && ets) {
 532                        unsigned int prio;
 533
 534                        for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
 535                                unsigned int tc;
 536
 537                                /* Does this queue handle the priority? */
 538                                if (pdata->prio2q_map[prio] != i)
 539                                        continue;
 540
 541                                /* Get the Traffic Class for this priority */
 542                                tc = ets->prio_tc[prio];
 543
 544                                /* Check if flow control should be enabled */
 545                                if (pfc->pfc_en & (1 << tc)) {
 546                                        ehfc = 1;
 547                                        break;
 548                                }
 549                        }
 550                } else {
 551                        ehfc = 1;
 552                }
 553
 554                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
 555
 556                netif_dbg(pdata, drv, pdata->netdev,
 557                          "flow control %s for RXq%u\n",
 558                          ehfc ? "enabled" : "disabled", i);
 559        }
 560
 561        /* Set MAC flow control */
 562        max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
 563        q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
 564        reg = MAC_Q0TFCR;
 565        for (i = 0; i < q_count; i++) {
 566                reg_val = XGMAC_IOREAD(pdata, reg);
 567
 568                /* Enable transmit flow control */
 569                XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
 570                /* Set pause time */
 571                XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
 572
 573                XGMAC_IOWRITE(pdata, reg, reg_val);
 574
 575                reg += MAC_QTFCR_INC;
 576        }
 577
 578        return 0;
 579}
 580
 581static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
 582{
 583        XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
 584
 585        return 0;
 586}
 587
 588static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
 589{
 590        XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
 591
 592        return 0;
 593}
 594
 595static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
 596{
 597        struct ieee_pfc *pfc = pdata->pfc;
 598
 599        if (pdata->tx_pause || (pfc && pfc->pfc_en))
 600                xgbe_enable_tx_flow_control(pdata);
 601        else
 602                xgbe_disable_tx_flow_control(pdata);
 603
 604        return 0;
 605}
 606
 607static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
 608{
 609        struct ieee_pfc *pfc = pdata->pfc;
 610
 611        if (pdata->rx_pause || (pfc && pfc->pfc_en))
 612                xgbe_enable_rx_flow_control(pdata);
 613        else
 614                xgbe_disable_rx_flow_control(pdata);
 615
 616        return 0;
 617}
 618
 619static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
 620{
 621        struct ieee_pfc *pfc = pdata->pfc;
 622
 623        xgbe_config_tx_flow_control(pdata);
 624        xgbe_config_rx_flow_control(pdata);
 625
 626        XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
 627                           (pfc && pfc->pfc_en) ? 1 : 0);
 628}
 629
 630static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
 631{
 632        struct xgbe_channel *channel;
 633        unsigned int dma_ch_isr, dma_ch_ier;
 634        unsigned int i;
 635
 636        channel = pdata->channel;
 637        for (i = 0; i < pdata->channel_count; i++, channel++) {
 638                /* Clear all the interrupts which are set */
 639                dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
 640                XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
 641
 642                /* Clear all interrupt enable bits */
 643                dma_ch_ier = 0;
 644
 645                /* Enable following interrupts
 646                 *   NIE  - Normal Interrupt Summary Enable
 647                 *   AIE  - Abnormal Interrupt Summary Enable
 648                 *   FBEE - Fatal Bus Error Enable
 649                 */
 650                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
 651                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
 652                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
 653
 654                if (channel->tx_ring) {
 655                        /* Enable the following Tx interrupts
 656                         *   TIE  - Transmit Interrupt Enable (unless using
 657                         *          per channel interrupts)
 658                         */
 659                        if (!pdata->per_channel_irq)
 660                                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
 661                }
 662                if (channel->rx_ring) {
 663                        /* Enable following Rx interrupts
 664                         *   RBUE - Receive Buffer Unavailable Enable
 665                         *   RIE  - Receive Interrupt Enable (unless using
 666                         *          per channel interrupts)
 667                         */
 668                        XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
 669                        if (!pdata->per_channel_irq)
 670                                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
 671                }
 672
 673                XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
 674        }
 675}
 676
 677static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
 678{
 679        unsigned int mtl_q_isr;
 680        unsigned int q_count, i;
 681
 682        q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
 683        for (i = 0; i < q_count; i++) {
 684                /* Clear all the interrupts which are set */
 685                mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
 686                XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
 687
 688                /* No MTL interrupts to be enabled */
 689                XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
 690        }
 691}
 692
 693static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
 694{
 695        unsigned int mac_ier = 0;
 696
 697        /* Enable Timestamp interrupt */
 698        XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
 699
 700        XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
 701
 702        /* Enable all counter interrupts */
 703        XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
 704        XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
 705}
 706
 707static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
 708{
 709        if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
 710                return 0;
 711
 712        XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
 713
 714        return 0;
 715}
 716
 717static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
 718{
 719        if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
 720                return 0;
 721
 722        XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
 723
 724        return 0;
 725}
 726
 727static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
 728{
 729        if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
 730                return 0;
 731
 732        XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
 733
 734        return 0;
 735}
 736
 737static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
 738{
 739        /* Put the VLAN tag in the Rx descriptor */
 740        XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
 741
 742        /* Don't check the VLAN type */
 743        XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
 744
 745        /* Check only C-TAG (0x8100) packets */
 746        XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
 747
 748        /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
 749        XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
 750
 751        /* Enable VLAN tag stripping */
 752        XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
 753
 754        return 0;
 755}
 756
 757static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
 758{
 759        XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
 760
 761        return 0;
 762}
 763
 764static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
 765{
 766        /* Enable VLAN filtering */
 767        XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
 768
 769        /* Enable VLAN Hash Table filtering */
 770        XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
 771
 772        /* Disable VLAN tag inverse matching */
 773        XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
 774
 775        /* Only filter on the lower 12-bits of the VLAN tag */
 776        XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
 777
 778        /* In order for the VLAN Hash Table filtering to be effective,
 779         * the VLAN tag identifier in the VLAN Tag Register must not
 780         * be zero.  Set the VLAN tag identifier to "1" to enable the
 781         * VLAN Hash Table filtering.  This implies that a VLAN tag of
 782         * 1 will always pass filtering.
 783         */
 784        XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
 785
 786        return 0;
 787}
 788
 789static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
 790{
 791        /* Disable VLAN filtering */
 792        XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
 793
 794        return 0;
 795}
 796
 797static u32 xgbe_vid_crc32_le(__le16 vid_le)
 798{
 799        u32 poly = 0xedb88320;  /* CRCPOLY_LE */
 800        u32 crc = ~0;
 801        u32 temp = 0;
 802        unsigned char *data = (unsigned char *)&vid_le;
 803        unsigned char data_byte = 0;
 804        int i, bits;
 805
 806        bits = get_bitmask_order(VLAN_VID_MASK);
 807        for (i = 0; i < bits; i++) {
 808                if ((i % 8) == 0)
 809                        data_byte = data[i / 8];
 810
 811                temp = ((crc & 1) ^ data_byte) & 1;
 812                crc >>= 1;
 813                data_byte >>= 1;
 814
 815                if (temp)
 816                        crc ^= poly;
 817        }
 818
 819        return crc;
 820}
 821
 822static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
 823{
 824        u32 crc;
 825        u16 vid;
 826        __le16 vid_le;
 827        u16 vlan_hash_table = 0;
 828
 829        /* Generate the VLAN Hash Table value */
 830        for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
 831                /* Get the CRC32 value of the VLAN ID */
 832                vid_le = cpu_to_le16(vid);
 833                crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
 834
 835                vlan_hash_table |= (1 << crc);
 836        }
 837
 838        /* Set the VLAN Hash Table filtering register */
 839        XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
 840
 841        return 0;
 842}
 843
 844static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
 845                                     unsigned int enable)
 846{
 847        unsigned int val = enable ? 1 : 0;
 848
 849        if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
 850                return 0;
 851
 852        netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
 853                  enable ? "entering" : "leaving");
 854        XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
 855
 856        /* Hardware will still perform VLAN filtering in promiscuous mode */
 857        if (enable) {
 858                xgbe_disable_rx_vlan_filtering(pdata);
 859        } else {
 860                if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
 861                        xgbe_enable_rx_vlan_filtering(pdata);
 862        }
 863
 864        return 0;
 865}
 866
 867static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
 868                                       unsigned int enable)
 869{
 870        unsigned int val = enable ? 1 : 0;
 871
 872        if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
 873                return 0;
 874
 875        netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
 876                  enable ? "entering" : "leaving");
 877        XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
 878
 879        return 0;
 880}
 881
 882static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
 883                             struct netdev_hw_addr *ha, unsigned int *mac_reg)
 884{
 885        unsigned int mac_addr_hi, mac_addr_lo;
 886        u8 *mac_addr;
 887
 888        mac_addr_lo = 0;
 889        mac_addr_hi = 0;
 890
 891        if (ha) {
 892                mac_addr = (u8 *)&mac_addr_lo;
 893                mac_addr[0] = ha->addr[0];
 894                mac_addr[1] = ha->addr[1];
 895                mac_addr[2] = ha->addr[2];
 896                mac_addr[3] = ha->addr[3];
 897                mac_addr = (u8 *)&mac_addr_hi;
 898                mac_addr[0] = ha->addr[4];
 899                mac_addr[1] = ha->addr[5];
 900
 901                netif_dbg(pdata, drv, pdata->netdev,
 902                          "adding mac address %pM at %#x\n",
 903                          ha->addr, *mac_reg);
 904
 905                XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
 906        }
 907
 908        XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
 909        *mac_reg += MAC_MACA_INC;
 910        XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
 911        *mac_reg += MAC_MACA_INC;
 912}
 913
 914static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
 915{
 916        struct net_device *netdev = pdata->netdev;
 917        struct netdev_hw_addr *ha;
 918        unsigned int mac_reg;
 919        unsigned int addn_macs;
 920
 921        mac_reg = MAC_MACA1HR;
 922        addn_macs = pdata->hw_feat.addn_mac;
 923
 924        if (netdev_uc_count(netdev) > addn_macs) {
 925                xgbe_set_promiscuous_mode(pdata, 1);
 926        } else {
 927                netdev_for_each_uc_addr(ha, netdev) {
 928                        xgbe_set_mac_reg(pdata, ha, &mac_reg);
 929                        addn_macs--;
 930                }
 931
 932                if (netdev_mc_count(netdev) > addn_macs) {
 933                        xgbe_set_all_multicast_mode(pdata, 1);
 934                } else {
 935                        netdev_for_each_mc_addr(ha, netdev) {
 936                                xgbe_set_mac_reg(pdata, ha, &mac_reg);
 937                                addn_macs--;
 938                        }
 939                }
 940        }
 941
 942        /* Clear remaining additional MAC address entries */
 943        while (addn_macs--)
 944                xgbe_set_mac_reg(pdata, NULL, &mac_reg);
 945}
 946
 947static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
 948{
 949        struct net_device *netdev = pdata->netdev;
 950        struct netdev_hw_addr *ha;
 951        unsigned int hash_reg;
 952        unsigned int hash_table_shift, hash_table_count;
 953        u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
 954        u32 crc;
 955        unsigned int i;
 956
 957        hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
 958        hash_table_count = pdata->hw_feat.hash_table_size / 32;
 959        memset(hash_table, 0, sizeof(hash_table));
 960
 961        /* Build the MAC Hash Table register values */
 962        netdev_for_each_uc_addr(ha, netdev) {
 963                crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
 964                crc >>= hash_table_shift;
 965                hash_table[crc >> 5] |= (1 << (crc & 0x1f));
 966        }
 967
 968        netdev_for_each_mc_addr(ha, netdev) {
 969                crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
 970                crc >>= hash_table_shift;
 971                hash_table[crc >> 5] |= (1 << (crc & 0x1f));
 972        }
 973
 974        /* Set the MAC Hash Table registers */
 975        hash_reg = MAC_HTR0;
 976        for (i = 0; i < hash_table_count; i++) {
 977                XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
 978                hash_reg += MAC_HTR_INC;
 979        }
 980}
 981
 982static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
 983{
 984        if (pdata->hw_feat.hash_table_size)
 985                xgbe_set_mac_hash_table(pdata);
 986        else
 987                xgbe_set_mac_addn_addrs(pdata);
 988
 989        return 0;
 990}
 991
 992static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
 993{
 994        unsigned int mac_addr_hi, mac_addr_lo;
 995
 996        mac_addr_hi = (addr[5] <<  8) | (addr[4] <<  0);
 997        mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
 998                      (addr[1] <<  8) | (addr[0] <<  0);
 999
1000        XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1001        XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1002
1003        return 0;
1004}
1005
1006static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
1007{
1008        struct net_device *netdev = pdata->netdev;
1009        unsigned int pr_mode, am_mode;
1010
1011        pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1012        am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1013
1014        xgbe_set_promiscuous_mode(pdata, pr_mode);
1015        xgbe_set_all_multicast_mode(pdata, am_mode);
1016
1017        xgbe_add_mac_addresses(pdata);
1018
1019        return 0;
1020}
1021
1022static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1023                              int mmd_reg)
1024{
1025        unsigned long flags;
1026        unsigned int mmd_address;
1027        int mmd_data;
1028
1029        if (mmd_reg & MII_ADDR_C45)
1030                mmd_address = mmd_reg & ~MII_ADDR_C45;
1031        else
1032                mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1033
1034        /* The PCS registers are accessed using mmio. The underlying APB3
1035         * management interface uses indirect addressing to access the MMD
1036         * register sets. This requires accessing of the PCS register in two
1037         * phases, an address phase and a data phase.
1038         *
1039         * The mmio interface is based on 32-bit offsets and values. All
1040         * register offsets must therefore be adjusted by left shifting the
1041         * offset 2 bits and reading 32 bits of data.
1042         */
1043        spin_lock_irqsave(&pdata->xpcs_lock, flags);
1044        XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
1045        mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
1046        spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1047
1048        return mmd_data;
1049}
1050
1051static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1052                                int mmd_reg, int mmd_data)
1053{
1054        unsigned int mmd_address;
1055        unsigned long flags;
1056
1057        if (mmd_reg & MII_ADDR_C45)
1058                mmd_address = mmd_reg & ~MII_ADDR_C45;
1059        else
1060                mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1061
1062        /* The PCS registers are accessed using mmio. The underlying APB3
1063         * management interface uses indirect addressing to access the MMD
1064         * register sets. This requires accessing of the PCS register in two
1065         * phases, an address phase and a data phase.
1066         *
1067         * The mmio interface is based on 32-bit offsets and values. All
1068         * register offsets must therefore be adjusted by left shifting the
1069         * offset 2 bits and reading 32 bits of data.
1070         */
1071        spin_lock_irqsave(&pdata->xpcs_lock, flags);
1072        XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
1073        XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
1074        spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1075}
1076
1077static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
1078{
1079        return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
1080}
1081
1082static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
1083{
1084        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1085
1086        return 0;
1087}
1088
1089static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
1090{
1091        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1092
1093        return 0;
1094}
1095
1096static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1097{
1098        struct xgbe_ring_desc *rdesc = rdata->rdesc;
1099
1100        /* Reset the Tx descriptor
1101         *   Set buffer 1 (lo) address to zero
1102         *   Set buffer 1 (hi) address to zero
1103         *   Reset all other control bits (IC, TTSE, B2L & B1L)
1104         *   Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1105         */
1106        rdesc->desc0 = 0;
1107        rdesc->desc1 = 0;
1108        rdesc->desc2 = 0;
1109        rdesc->desc3 = 0;
1110
1111        /* Make sure ownership is written to the descriptor */
1112        dma_wmb();
1113}
1114
1115static void xgbe_tx_desc_init(struct xgbe_channel *channel)
1116{
1117        struct xgbe_ring *ring = channel->tx_ring;
1118        struct xgbe_ring_data *rdata;
1119        int i;
1120        int start_index = ring->cur;
1121
1122        DBGPR("-->tx_desc_init\n");
1123
1124        /* Initialze all descriptors */
1125        for (i = 0; i < ring->rdesc_count; i++) {
1126                rdata = XGBE_GET_DESC_DATA(ring, i);
1127
1128                /* Initialize Tx descriptor */
1129                xgbe_tx_desc_reset(rdata);
1130        }
1131
1132        /* Update the total number of Tx descriptors */
1133        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1134
1135        /* Update the starting address of descriptor ring */
1136        rdata = XGBE_GET_DESC_DATA(ring, start_index);
1137        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1138                          upper_32_bits(rdata->rdesc_dma));
1139        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1140                          lower_32_bits(rdata->rdesc_dma));
1141
1142        DBGPR("<--tx_desc_init\n");
1143}
1144
1145static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1146                               struct xgbe_ring_data *rdata, unsigned int index)
1147{
1148        struct xgbe_ring_desc *rdesc = rdata->rdesc;
1149        unsigned int rx_usecs = pdata->rx_usecs;
1150        unsigned int rx_frames = pdata->rx_frames;
1151        unsigned int inte;
1152        dma_addr_t hdr_dma, buf_dma;
1153
1154        if (!rx_usecs && !rx_frames) {
1155                /* No coalescing, interrupt for every descriptor */
1156                inte = 1;
1157        } else {
1158                /* Set interrupt based on Rx frame coalescing setting */
1159                if (rx_frames && !((index + 1) % rx_frames))
1160                        inte = 1;
1161                else
1162                        inte = 0;
1163        }
1164
1165        /* Reset the Rx descriptor
1166         *   Set buffer 1 (lo) address to header dma address (lo)
1167         *   Set buffer 1 (hi) address to header dma address (hi)
1168         *   Set buffer 2 (lo) address to buffer dma address (lo)
1169         *   Set buffer 2 (hi) address to buffer dma address (hi) and
1170         *     set control bits OWN and INTE
1171         */
1172        hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
1173        buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
1174        rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1175        rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1176        rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1177        rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1178
1179        XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
1180
1181        /* Since the Rx DMA engine is likely running, make sure everything
1182         * is written to the descriptor(s) before setting the OWN bit
1183         * for the descriptor
1184         */
1185        dma_wmb();
1186
1187        XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
1188
1189        /* Make sure ownership is written to the descriptor */
1190        dma_wmb();
1191}
1192
1193static void xgbe_rx_desc_init(struct xgbe_channel *channel)
1194{
1195        struct xgbe_prv_data *pdata = channel->pdata;
1196        struct xgbe_ring *ring = channel->rx_ring;
1197        struct xgbe_ring_data *rdata;
1198        unsigned int start_index = ring->cur;
1199        unsigned int i;
1200
1201        DBGPR("-->rx_desc_init\n");
1202
1203        /* Initialize all descriptors */
1204        for (i = 0; i < ring->rdesc_count; i++) {
1205                rdata = XGBE_GET_DESC_DATA(ring, i);
1206
1207                /* Initialize Rx descriptor */
1208                xgbe_rx_desc_reset(pdata, rdata, i);
1209        }
1210
1211        /* Update the total number of Rx descriptors */
1212        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1213
1214        /* Update the starting address of descriptor ring */
1215        rdata = XGBE_GET_DESC_DATA(ring, start_index);
1216        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1217                          upper_32_bits(rdata->rdesc_dma));
1218        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1219                          lower_32_bits(rdata->rdesc_dma));
1220
1221        /* Update the Rx Descriptor Tail Pointer */
1222        rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
1223        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1224                          lower_32_bits(rdata->rdesc_dma));
1225
1226        DBGPR("<--rx_desc_init\n");
1227}
1228
1229static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
1230                                      unsigned int addend)
1231{
1232        /* Set the addend register value and tell the device */
1233        XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1234        XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1235
1236        /* Wait for addend update to complete */
1237        while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1238                udelay(5);
1239}
1240
1241static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
1242                                 unsigned int nsec)
1243{
1244        /* Set the time values and tell the device */
1245        XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1246        XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1247        XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1248
1249        /* Wait for time update to complete */
1250        while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1251                udelay(5);
1252}
1253
1254static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
1255{
1256        u64 nsec;
1257
1258        nsec = XGMAC_IOREAD(pdata, MAC_STSR);
1259        nsec *= NSEC_PER_SEC;
1260        nsec += XGMAC_IOREAD(pdata, MAC_STNR);
1261
1262        return nsec;
1263}
1264
1265static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
1266{
1267        unsigned int tx_snr;
1268        u64 nsec;
1269
1270        tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
1271        if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
1272                return 0;
1273
1274        nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
1275        nsec *= NSEC_PER_SEC;
1276        nsec += tx_snr;
1277
1278        return nsec;
1279}
1280
1281static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
1282                               struct xgbe_ring_desc *rdesc)
1283{
1284        u64 nsec;
1285
1286        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
1287            !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
1288                nsec = le32_to_cpu(rdesc->desc1);
1289                nsec <<= 32;
1290                nsec |= le32_to_cpu(rdesc->desc0);
1291                if (nsec != 0xffffffffffffffffULL) {
1292                        packet->rx_tstamp = nsec;
1293                        XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1294                                       RX_TSTAMP, 1);
1295                }
1296        }
1297}
1298
1299static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
1300                              unsigned int mac_tscr)
1301{
1302        /* Set one nano-second accuracy */
1303        XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1304
1305        /* Set fine timestamp update */
1306        XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1307
1308        /* Overwrite earlier timestamps */
1309        XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1310
1311        XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1312
1313        /* Exit if timestamping is not enabled */
1314        if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
1315                return 0;
1316
1317        /* Initialize time registers */
1318        XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
1319        XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
1320        xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1321        xgbe_set_tstamp_time(pdata, 0, 0);
1322
1323        /* Initialize the timecounter */
1324        timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
1325                         ktime_to_ns(ktime_get_real()));
1326
1327        return 0;
1328}
1329
1330static void xgbe_config_tc(struct xgbe_prv_data *pdata)
1331{
1332        unsigned int offset, queue, prio;
1333        u8 i;
1334
1335        netdev_reset_tc(pdata->netdev);
1336        if (!pdata->num_tcs)
1337                return;
1338
1339        netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
1340
1341        for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
1342                while ((queue < pdata->tx_q_count) &&
1343                       (pdata->q2tc_map[queue] == i))
1344                        queue++;
1345
1346                netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
1347                          i, offset, queue - 1);
1348                netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
1349                offset = queue;
1350        }
1351
1352        if (!pdata->ets)
1353                return;
1354
1355        for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
1356                netdev_set_prio_tc_map(pdata->netdev, prio,
1357                                       pdata->ets->prio_tc[prio]);
1358}
1359
1360static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
1361{
1362        struct ieee_ets *ets = pdata->ets;
1363        unsigned int total_weight, min_weight, weight;
1364        unsigned int mask, reg, reg_val;
1365        unsigned int i, prio;
1366
1367        if (!ets)
1368                return;
1369
1370        /* Set Tx to deficit weighted round robin scheduling algorithm (when
1371         * traffic class is using ETS algorithm)
1372         */
1373        XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
1374
1375        /* Set Traffic Class algorithms */
1376        total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
1377        min_weight = total_weight / 100;
1378        if (!min_weight)
1379                min_weight = 1;
1380
1381        for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1382                /* Map the priorities to the traffic class */
1383                mask = 0;
1384                for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
1385                        if (ets->prio_tc[prio] == i)
1386                                mask |= (1 << prio);
1387                }
1388                mask &= 0xff;
1389
1390                netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
1391                          i, mask);
1392                reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
1393                reg_val = XGMAC_IOREAD(pdata, reg);
1394
1395                reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
1396                reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
1397
1398                XGMAC_IOWRITE(pdata, reg, reg_val);
1399
1400                /* Set the traffic class algorithm */
1401                switch (ets->tc_tsa[i]) {
1402                case IEEE_8021QAZ_TSA_STRICT:
1403                        netif_dbg(pdata, drv, pdata->netdev,
1404                                  "TC%u using SP\n", i);
1405                        XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1406                                               MTL_TSA_SP);
1407                        break;
1408                case IEEE_8021QAZ_TSA_ETS:
1409                        weight = total_weight * ets->tc_tx_bw[i] / 100;
1410                        weight = clamp(weight, min_weight, total_weight);
1411
1412                        netif_dbg(pdata, drv, pdata->netdev,
1413                                  "TC%u using DWRR (weight %u)\n", i, weight);
1414                        XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1415                                               MTL_TSA_ETS);
1416                        XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
1417                                               weight);
1418                        break;
1419                }
1420        }
1421
1422        xgbe_config_tc(pdata);
1423}
1424
1425static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
1426{
1427        xgbe_config_flow_control(pdata);
1428}
1429
1430static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
1431                               struct xgbe_ring *ring)
1432{
1433        struct xgbe_prv_data *pdata = channel->pdata;
1434        struct xgbe_ring_data *rdata;
1435
1436        /* Make sure everything is written before the register write */
1437        wmb();
1438
1439        /* Issue a poll command to Tx DMA by writing address
1440         * of next immediate free descriptor */
1441        rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1442        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1443                          lower_32_bits(rdata->rdesc_dma));
1444
1445        /* Start the Tx timer */
1446        if (pdata->tx_usecs && !channel->tx_timer_active) {
1447                channel->tx_timer_active = 1;
1448                mod_timer(&channel->tx_timer,
1449                          jiffies + usecs_to_jiffies(pdata->tx_usecs));
1450        }
1451
1452        ring->tx.xmit_more = 0;
1453}
1454
1455static void xgbe_dev_xmit(struct xgbe_channel *channel)
1456{
1457        struct xgbe_prv_data *pdata = channel->pdata;
1458        struct xgbe_ring *ring = channel->tx_ring;
1459        struct xgbe_ring_data *rdata;
1460        struct xgbe_ring_desc *rdesc;
1461        struct xgbe_packet_data *packet = &ring->packet_data;
1462        unsigned int csum, tso, vlan;
1463        unsigned int tso_context, vlan_context;
1464        unsigned int tx_set_ic;
1465        int start_index = ring->cur;
1466        int cur_index = ring->cur;
1467        int i;
1468
1469        DBGPR("-->xgbe_dev_xmit\n");
1470
1471        csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1472                              CSUM_ENABLE);
1473        tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1474                             TSO_ENABLE);
1475        vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1476                              VLAN_CTAG);
1477
1478        if (tso && (packet->mss != ring->tx.cur_mss))
1479                tso_context = 1;
1480        else
1481                tso_context = 0;
1482
1483        if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1484                vlan_context = 1;
1485        else
1486                vlan_context = 0;
1487
1488        /* Determine if an interrupt should be generated for this Tx:
1489         *   Interrupt:
1490         *     - Tx frame count exceeds the frame count setting
1491         *     - Addition of Tx frame count to the frame count since the
1492         *       last interrupt was set exceeds the frame count setting
1493         *   No interrupt:
1494         *     - No frame count setting specified (ethtool -C ethX tx-frames 0)
1495         *     - Addition of Tx frame count to the frame count since the
1496         *       last interrupt was set does not exceed the frame count setting
1497         */
1498        ring->coalesce_count += packet->tx_packets;
1499        if (!pdata->tx_frames)
1500                tx_set_ic = 0;
1501        else if (packet->tx_packets > pdata->tx_frames)
1502                tx_set_ic = 1;
1503        else if ((ring->coalesce_count % pdata->tx_frames) <
1504                 packet->tx_packets)
1505                tx_set_ic = 1;
1506        else
1507                tx_set_ic = 0;
1508
1509        rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1510        rdesc = rdata->rdesc;
1511
1512        /* Create a context descriptor if this is a TSO packet */
1513        if (tso_context || vlan_context) {
1514                if (tso_context) {
1515                        netif_dbg(pdata, tx_queued, pdata->netdev,
1516                                  "TSO context descriptor, mss=%u\n",
1517                                  packet->mss);
1518
1519                        /* Set the MSS size */
1520                        XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1521                                          MSS, packet->mss);
1522
1523                        /* Mark it as a CONTEXT descriptor */
1524                        XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1525                                          CTXT, 1);
1526
1527                        /* Indicate this descriptor contains the MSS */
1528                        XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1529                                          TCMSSV, 1);
1530
1531                        ring->tx.cur_mss = packet->mss;
1532                }
1533
1534                if (vlan_context) {
1535                        netif_dbg(pdata, tx_queued, pdata->netdev,
1536                                  "VLAN context descriptor, ctag=%u\n",
1537                                  packet->vlan_ctag);
1538
1539                        /* Mark it as a CONTEXT descriptor */
1540                        XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1541                                          CTXT, 1);
1542
1543                        /* Set the VLAN tag */
1544                        XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1545                                          VT, packet->vlan_ctag);
1546
1547                        /* Indicate this descriptor contains the VLAN tag */
1548                        XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1549                                          VLTV, 1);
1550
1551                        ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1552                }
1553
1554                cur_index++;
1555                rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1556                rdesc = rdata->rdesc;
1557        }
1558
1559        /* Update buffer address (for TSO this is the header) */
1560        rdesc->desc0 =  cpu_to_le32(lower_32_bits(rdata->skb_dma));
1561        rdesc->desc1 =  cpu_to_le32(upper_32_bits(rdata->skb_dma));
1562
1563        /* Update the buffer length */
1564        XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1565                          rdata->skb_dma_len);
1566
1567        /* VLAN tag insertion check */
1568        if (vlan)
1569                XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1570                                  TX_NORMAL_DESC2_VLAN_INSERT);
1571
1572        /* Timestamp enablement check */
1573        if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1574                XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
1575
1576        /* Mark it as First Descriptor */
1577        XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1578
1579        /* Mark it as a NORMAL descriptor */
1580        XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1581
1582        /* Set OWN bit if not the first descriptor */
1583        if (cur_index != start_index)
1584                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1585
1586        if (tso) {
1587                /* Enable TSO */
1588                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1589                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1590                                  packet->tcp_payload_len);
1591                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1592                                  packet->tcp_header_len / 4);
1593
1594                pdata->ext_stats.tx_tso_packets++;
1595        } else {
1596                /* Enable CRC and Pad Insertion */
1597                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1598
1599                /* Enable HW CSUM */
1600                if (csum)
1601                        XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1602                                          CIC, 0x3);
1603
1604                /* Set the total length to be transmitted */
1605                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1606                                  packet->length);
1607        }
1608
1609        for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
1610                cur_index++;
1611                rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1612                rdesc = rdata->rdesc;
1613
1614                /* Update buffer address */
1615                rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1616                rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1617
1618                /* Update the buffer length */
1619                XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1620                                  rdata->skb_dma_len);
1621
1622                /* Set OWN bit */
1623                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1624
1625                /* Mark it as NORMAL descriptor */
1626                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1627
1628                /* Enable HW CSUM */
1629                if (csum)
1630                        XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1631                                          CIC, 0x3);
1632        }
1633
1634        /* Set LAST bit for the last descriptor */
1635        XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1636
1637        /* Set IC bit based on Tx coalescing settings */
1638        if (tx_set_ic)
1639                XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1640
1641        /* Save the Tx info to report back during cleanup */
1642        rdata->tx.packets = packet->tx_packets;
1643        rdata->tx.bytes = packet->tx_bytes;
1644
1645        /* In case the Tx DMA engine is running, make sure everything
1646         * is written to the descriptor(s) before setting the OWN bit
1647         * for the first descriptor
1648         */
1649        dma_wmb();
1650
1651        /* Set OWN bit for the first descriptor */
1652        rdata = XGBE_GET_DESC_DATA(ring, start_index);
1653        rdesc = rdata->rdesc;
1654        XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1655
1656        if (netif_msg_tx_queued(pdata))
1657                xgbe_dump_tx_desc(pdata, ring, start_index,
1658                                  packet->rdesc_count, 1);
1659
1660        /* Make sure ownership is written to the descriptor */
1661        smp_wmb();
1662
1663        ring->cur = cur_index + 1;
1664        if (!packet->skb->xmit_more ||
1665            netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1666                                                   channel->queue_index)))
1667                xgbe_tx_start_xmit(channel, ring);
1668        else
1669                ring->tx.xmit_more = 1;
1670
1671        DBGPR("  %s: descriptors %u to %u written\n",
1672              channel->name, start_index & (ring->rdesc_count - 1),
1673              (ring->cur - 1) & (ring->rdesc_count - 1));
1674
1675        DBGPR("<--xgbe_dev_xmit\n");
1676}
1677
1678static int xgbe_dev_read(struct xgbe_channel *channel)
1679{
1680        struct xgbe_prv_data *pdata = channel->pdata;
1681        struct xgbe_ring *ring = channel->rx_ring;
1682        struct xgbe_ring_data *rdata;
1683        struct xgbe_ring_desc *rdesc;
1684        struct xgbe_packet_data *packet = &ring->packet_data;
1685        struct net_device *netdev = pdata->netdev;
1686        unsigned int err, etlt, l34t;
1687
1688        DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1689
1690        rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1691        rdesc = rdata->rdesc;
1692
1693        /* Check for data availability */
1694        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1695                return 1;
1696
1697        /* Make sure descriptor fields are read after reading the OWN bit */
1698        dma_rmb();
1699
1700        if (netif_msg_rx_status(pdata))
1701                xgbe_dump_rx_desc(pdata, ring, ring->cur);
1702
1703        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1704                /* Timestamp Context Descriptor */
1705                xgbe_get_rx_tstamp(packet, rdesc);
1706
1707                XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1708                               CONTEXT, 1);
1709                XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1710                               CONTEXT_NEXT, 0);
1711                return 0;
1712        }
1713
1714        /* Normal Descriptor, be sure Context Descriptor bit is off */
1715        XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1716
1717        /* Indicate if a Context Descriptor is next */
1718        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1719                XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1720                               CONTEXT_NEXT, 1);
1721
1722        /* Get the header length */
1723        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1724                rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1725                                                      RX_NORMAL_DESC2, HL);
1726                if (rdata->rx.hdr_len)
1727                        pdata->ext_stats.rx_split_header_packets++;
1728        }
1729
1730        /* Get the RSS hash */
1731        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1732                XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1733                               RSS_HASH, 1);
1734
1735                packet->rss_hash = le32_to_cpu(rdesc->desc1);
1736
1737                l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1738                switch (l34t) {
1739                case RX_DESC3_L34T_IPV4_TCP:
1740                case RX_DESC3_L34T_IPV4_UDP:
1741                case RX_DESC3_L34T_IPV6_TCP:
1742                case RX_DESC3_L34T_IPV6_UDP:
1743                        packet->rss_hash_type = PKT_HASH_TYPE_L4;
1744                        break;
1745                default:
1746                        packet->rss_hash_type = PKT_HASH_TYPE_L3;
1747                }
1748        }
1749
1750        /* Get the packet length */
1751        rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1752
1753        if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1754                /* Not all the data has been transferred for this packet */
1755                XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1756                               INCOMPLETE, 1);
1757                return 0;
1758        }
1759
1760        /* This is the last of the data for this packet */
1761        XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1762                       INCOMPLETE, 0);
1763
1764        /* Set checksum done indicator as appropriate */
1765        if (netdev->features & NETIF_F_RXCSUM)
1766                XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1767                               CSUM_DONE, 1);
1768
1769        /* Check for errors (only valid in last descriptor) */
1770        err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1771        etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1772        netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
1773
1774        if (!err || !etlt) {
1775                /* No error if err is 0 or etlt is 0 */
1776                if ((etlt == 0x09) &&
1777                    (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1778                        XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1779                                       VLAN_CTAG, 1);
1780                        packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1781                                                              RX_NORMAL_DESC0,
1782                                                              OVT);
1783                        netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
1784                                  packet->vlan_ctag);
1785                }
1786        } else {
1787                if ((etlt == 0x05) || (etlt == 0x06))
1788                        XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1789                                       CSUM_DONE, 0);
1790                else
1791                        XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1792                                       FRAME, 1);
1793        }
1794
1795        DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1796              ring->cur & (ring->rdesc_count - 1), ring->cur);
1797
1798        return 0;
1799}
1800
1801static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1802{
1803        /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1804        return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1805}
1806
1807static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1808{
1809        /* Rx and Tx share LD bit, so check TDES3.LD bit */
1810        return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1811}
1812
1813static int xgbe_enable_int(struct xgbe_channel *channel,
1814                           enum xgbe_int int_id)
1815{
1816        unsigned int dma_ch_ier;
1817
1818        dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1819
1820        switch (int_id) {
1821        case XGMAC_INT_DMA_CH_SR_TI:
1822                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
1823                break;
1824        case XGMAC_INT_DMA_CH_SR_TPS:
1825                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
1826                break;
1827        case XGMAC_INT_DMA_CH_SR_TBU:
1828                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
1829                break;
1830        case XGMAC_INT_DMA_CH_SR_RI:
1831                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
1832                break;
1833        case XGMAC_INT_DMA_CH_SR_RBU:
1834                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
1835                break;
1836        case XGMAC_INT_DMA_CH_SR_RPS:
1837                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
1838                break;
1839        case XGMAC_INT_DMA_CH_SR_TI_RI:
1840                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
1841                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
1842                break;
1843        case XGMAC_INT_DMA_CH_SR_FBE:
1844                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
1845                break;
1846        case XGMAC_INT_DMA_ALL:
1847                dma_ch_ier |= channel->saved_ier;
1848                break;
1849        default:
1850                return -1;
1851        }
1852
1853        XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1854
1855        return 0;
1856}
1857
1858static int xgbe_disable_int(struct xgbe_channel *channel,
1859                            enum xgbe_int int_id)
1860{
1861        unsigned int dma_ch_ier;
1862
1863        dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
1864
1865        switch (int_id) {
1866        case XGMAC_INT_DMA_CH_SR_TI:
1867                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
1868                break;
1869        case XGMAC_INT_DMA_CH_SR_TPS:
1870                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
1871                break;
1872        case XGMAC_INT_DMA_CH_SR_TBU:
1873                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
1874                break;
1875        case XGMAC_INT_DMA_CH_SR_RI:
1876                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
1877                break;
1878        case XGMAC_INT_DMA_CH_SR_RBU:
1879                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
1880                break;
1881        case XGMAC_INT_DMA_CH_SR_RPS:
1882                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
1883                break;
1884        case XGMAC_INT_DMA_CH_SR_TI_RI:
1885                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
1886                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
1887                break;
1888        case XGMAC_INT_DMA_CH_SR_FBE:
1889                XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
1890                break;
1891        case XGMAC_INT_DMA_ALL:
1892                channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
1893                dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
1894                break;
1895        default:
1896                return -1;
1897        }
1898
1899        XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
1900
1901        return 0;
1902}
1903
1904static int xgbe_exit(struct xgbe_prv_data *pdata)
1905{
1906        unsigned int count = 2000;
1907
1908        DBGPR("-->xgbe_exit\n");
1909
1910        /* Issue a software reset */
1911        XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
1912        usleep_range(10, 15);
1913
1914        /* Poll Until Poll Condition */
1915        while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1916                usleep_range(500, 600);
1917
1918        if (!count)
1919                return -EBUSY;
1920
1921        DBGPR("<--xgbe_exit\n");
1922
1923        return 0;
1924}
1925
1926static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1927{
1928        unsigned int i, count;
1929
1930        if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
1931                return 0;
1932
1933        for (i = 0; i < pdata->tx_q_count; i++)
1934                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1935
1936        /* Poll Until Poll Condition */
1937        for (i = 0; i < pdata->tx_q_count; i++) {
1938                count = 2000;
1939                while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
1940                                                        MTL_Q_TQOMR, FTQ))
1941                        usleep_range(500, 600);
1942
1943                if (!count)
1944                        return -EBUSY;
1945        }
1946
1947        return 0;
1948}
1949
1950static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
1951{
1952        /* Set enhanced addressing mode */
1953        XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
1954
1955        /* Set the System Bus mode */
1956        XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
1957        XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
1958}
1959
1960static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
1961{
1962        unsigned int arcache, awcache;
1963
1964        arcache = 0;
1965        XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
1966        XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
1967        XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
1968        XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
1969        XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
1970        XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
1971        XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
1972
1973        awcache = 0;
1974        XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
1975        XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
1976        XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
1977        XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
1978        XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
1979        XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
1980        XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
1981        XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
1982        XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
1983}
1984
1985static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1986{
1987        unsigned int i;
1988
1989        /* Set Tx to weighted round robin scheduling algorithm */
1990        XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1991
1992        /* Set Tx traffic classes to use WRR algorithm with equal weights */
1993        for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1994                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1995                                       MTL_TSA_ETS);
1996                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
1997        }
1998
1999        /* Set Rx to strict priority algorithm */
2000        XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
2001}
2002
2003static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
2004                                                  unsigned int queue_count)
2005{
2006        unsigned int q_fifo_size;
2007        unsigned int p_fifo;
2008
2009        /* Calculate the configured fifo size */
2010        q_fifo_size = 1 << (fifo_size + 7);
2011
2012        /* The configured value may not be the actual amount of fifo RAM */
2013        q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
2014
2015        q_fifo_size = q_fifo_size / queue_count;
2016
2017        /* Each increment in the queue fifo size represents 256 bytes of
2018         * fifo, with 0 representing 256 bytes. Distribute the fifo equally
2019         * between the queues.
2020         */
2021        p_fifo = q_fifo_size / 256;
2022        if (p_fifo)
2023                p_fifo--;
2024
2025        return p_fifo;
2026}
2027
2028static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
2029{
2030        unsigned int fifo_size;
2031        unsigned int i;
2032
2033        fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
2034                                                  pdata->tx_q_count);
2035
2036        for (i = 0; i < pdata->tx_q_count; i++)
2037                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
2038
2039        netif_info(pdata, drv, pdata->netdev,
2040                   "%d Tx hardware queues, %d byte fifo per queue\n",
2041                   pdata->tx_q_count, ((fifo_size + 1) * 256));
2042}
2043
2044static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
2045{
2046        unsigned int fifo_size;
2047        unsigned int i;
2048
2049        fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
2050                                                  pdata->rx_q_count);
2051
2052        for (i = 0; i < pdata->rx_q_count; i++)
2053                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
2054
2055        netif_info(pdata, drv, pdata->netdev,
2056                   "%d Rx hardware queues, %d byte fifo per queue\n",
2057                   pdata->rx_q_count, ((fifo_size + 1) * 256));
2058}
2059
2060static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2061{
2062        unsigned int qptc, qptc_extra, queue;
2063        unsigned int prio_queues;
2064        unsigned int ppq, ppq_extra, prio;
2065        unsigned int mask;
2066        unsigned int i, j, reg, reg_val;
2067
2068        /* Map the MTL Tx Queues to Traffic Classes
2069         *   Note: Tx Queues >= Traffic Classes
2070         */
2071        qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
2072        qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
2073
2074        for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
2075                for (j = 0; j < qptc; j++) {
2076                        netif_dbg(pdata, drv, pdata->netdev,
2077                                  "TXq%u mapped to TC%u\n", queue, i);
2078                        XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2079                                               Q2TCMAP, i);
2080                        pdata->q2tc_map[queue++] = i;
2081                }
2082
2083                if (i < qptc_extra) {
2084                        netif_dbg(pdata, drv, pdata->netdev,
2085                                  "TXq%u mapped to TC%u\n", queue, i);
2086                        XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2087                                               Q2TCMAP, i);
2088                        pdata->q2tc_map[queue++] = i;
2089                }
2090        }
2091
2092        /* Map the 8 VLAN priority values to available MTL Rx queues */
2093        prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
2094                            pdata->rx_q_count);
2095        ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
2096        ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
2097
2098        reg = MAC_RQC2R;
2099        reg_val = 0;
2100        for (i = 0, prio = 0; i < prio_queues;) {
2101                mask = 0;
2102                for (j = 0; j < ppq; j++) {
2103                        netif_dbg(pdata, drv, pdata->netdev,
2104                                  "PRIO%u mapped to RXq%u\n", prio, i);
2105                        mask |= (1 << prio);
2106                        pdata->prio2q_map[prio++] = i;
2107                }
2108
2109                if (i < ppq_extra) {
2110                        netif_dbg(pdata, drv, pdata->netdev,
2111                                  "PRIO%u mapped to RXq%u\n", prio, i);
2112                        mask |= (1 << prio);
2113                        pdata->prio2q_map[prio++] = i;
2114                }
2115
2116                reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2117
2118                if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2119                        continue;
2120
2121                XGMAC_IOWRITE(pdata, reg, reg_val);
2122                reg += MAC_RQC2_INC;
2123                reg_val = 0;
2124        }
2125
2126        /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2127        reg = MTL_RQDCM0R;
2128        reg_val = 0;
2129        for (i = 0; i < pdata->rx_q_count;) {
2130                reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2131
2132                if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2133                        continue;
2134
2135                XGMAC_IOWRITE(pdata, reg, reg_val);
2136
2137                reg += MTL_RQDCM_INC;
2138                reg_val = 0;
2139        }
2140}
2141
2142static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2143{
2144        unsigned int i;
2145
2146        for (i = 0; i < pdata->rx_q_count; i++) {
2147                /* Activate flow control when less than 4k left in fifo */
2148                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
2149
2150                /* De-activate flow control when more than 6k left in fifo */
2151                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
2152        }
2153}
2154
2155static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2156{
2157        xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
2158
2159        /* Filtering is done using perfect filtering and hash filtering */
2160        if (pdata->hw_feat.hash_table_size) {
2161                XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2162                XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2163                XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2164        }
2165}
2166
2167static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2168{
2169        unsigned int val;
2170
2171        val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2172
2173        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2174}
2175
2176static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2177{
2178        switch (pdata->phy_speed) {
2179        case SPEED_10000:
2180                xgbe_set_xgmii_speed(pdata);
2181                break;
2182
2183        case SPEED_2500:
2184                xgbe_set_gmii_2500_speed(pdata);
2185                break;
2186
2187        case SPEED_1000:
2188                xgbe_set_gmii_speed(pdata);
2189                break;
2190        }
2191}
2192
2193static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2194{
2195        if (pdata->netdev->features & NETIF_F_RXCSUM)
2196                xgbe_enable_rx_csum(pdata);
2197        else
2198                xgbe_disable_rx_csum(pdata);
2199}
2200
2201static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2202{
2203        /* Indicate that VLAN Tx CTAGs come from context descriptors */
2204        XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2205        XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2206
2207        /* Set the current VLAN Hash Table register value */
2208        xgbe_update_vlan_hash_table(pdata);
2209
2210        if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
2211                xgbe_enable_rx_vlan_filtering(pdata);
2212        else
2213                xgbe_disable_rx_vlan_filtering(pdata);
2214
2215        if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2216                xgbe_enable_rx_vlan_stripping(pdata);
2217        else
2218                xgbe_disable_rx_vlan_stripping(pdata);
2219}
2220
2221static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2222{
2223        bool read_hi;
2224        u64 val;
2225
2226        switch (reg_lo) {
2227        /* These registers are always 64 bit */
2228        case MMC_TXOCTETCOUNT_GB_LO:
2229        case MMC_TXOCTETCOUNT_G_LO:
2230        case MMC_RXOCTETCOUNT_GB_LO:
2231        case MMC_RXOCTETCOUNT_G_LO:
2232                read_hi = true;
2233                break;
2234
2235        default:
2236                read_hi = false;
2237        }
2238
2239        val = XGMAC_IOREAD(pdata, reg_lo);
2240
2241        if (read_hi)
2242                val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2243
2244        return val;
2245}
2246
2247static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2248{
2249        struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2250        unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2251
2252        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2253                stats->txoctetcount_gb +=
2254                        xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2255
2256        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2257                stats->txframecount_gb +=
2258                        xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2259
2260        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2261                stats->txbroadcastframes_g +=
2262                        xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2263
2264        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2265                stats->txmulticastframes_g +=
2266                        xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2267
2268        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2269                stats->tx64octets_gb +=
2270                        xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2271
2272        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2273                stats->tx65to127octets_gb +=
2274                        xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2275
2276        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2277                stats->tx128to255octets_gb +=
2278                        xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2279
2280        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2281                stats->tx256to511octets_gb +=
2282                        xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2283
2284        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2285                stats->tx512to1023octets_gb +=
2286                        xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2287
2288        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2289                stats->tx1024tomaxoctets_gb +=
2290                        xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2291
2292        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2293                stats->txunicastframes_gb +=
2294                        xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2295
2296        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2297                stats->txmulticastframes_gb +=
2298                        xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2299
2300        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2301                stats->txbroadcastframes_g +=
2302                        xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2303
2304        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2305                stats->txunderflowerror +=
2306                        xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2307
2308        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2309                stats->txoctetcount_g +=
2310                        xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2311
2312        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2313                stats->txframecount_g +=
2314                        xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2315
2316        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2317                stats->txpauseframes +=
2318                        xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2319
2320        if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2321                stats->txvlanframes_g +=
2322                        xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2323}
2324
2325static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2326{
2327        struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2328        unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2329
2330        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2331                stats->rxframecount_gb +=
2332                        xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2333
2334        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2335                stats->rxoctetcount_gb +=
2336                        xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2337
2338        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2339                stats->rxoctetcount_g +=
2340                        xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2341
2342        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2343                stats->rxbroadcastframes_g +=
2344                        xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2345
2346        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2347                stats->rxmulticastframes_g +=
2348                        xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2349
2350        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2351                stats->rxcrcerror +=
2352                        xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2353
2354        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2355                stats->rxrunterror +=
2356                        xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2357
2358        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2359                stats->rxjabbererror +=
2360                        xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2361
2362        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2363                stats->rxundersize_g +=
2364                        xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2365
2366        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2367                stats->rxoversize_g +=
2368                        xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2369
2370        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
2371                stats->rx64octets_gb +=
2372                        xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2373
2374        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
2375                stats->rx65to127octets_gb +=
2376                        xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2377
2378        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
2379                stats->rx128to255octets_gb +=
2380                        xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2381
2382        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
2383                stats->rx256to511octets_gb +=
2384                        xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2385
2386        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
2387                stats->rx512to1023octets_gb +=
2388                        xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2389
2390        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
2391                stats->rx1024tomaxoctets_gb +=
2392                        xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2393
2394        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
2395                stats->rxunicastframes_g +=
2396                        xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2397
2398        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
2399                stats->rxlengtherror +=
2400                        xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2401
2402        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
2403                stats->rxoutofrangetype +=
2404                        xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2405
2406        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
2407                stats->rxpauseframes +=
2408                        xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2409
2410        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
2411                stats->rxfifooverflow +=
2412                        xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2413
2414        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
2415                stats->rxvlanframes_gb +=
2416                        xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2417
2418        if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
2419                stats->rxwatchdogerror +=
2420                        xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2421}
2422
2423static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
2424{
2425        struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2426
2427        /* Freeze counters */
2428        XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
2429
2430        stats->txoctetcount_gb +=
2431                xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2432
2433        stats->txframecount_gb +=
2434                xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2435
2436        stats->txbroadcastframes_g +=
2437                xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2438
2439        stats->txmulticastframes_g +=
2440                xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2441
2442        stats->tx64octets_gb +=
2443                xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2444
2445        stats->tx65to127octets_gb +=
2446                xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2447
2448        stats->tx128to255octets_gb +=
2449                xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2450
2451        stats->tx256to511octets_gb +=
2452                xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2453
2454        stats->tx512to1023octets_gb +=
2455                xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2456
2457        stats->tx1024tomaxoctets_gb +=
2458                xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2459
2460        stats->txunicastframes_gb +=
2461                xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2462
2463        stats->txmulticastframes_gb +=
2464                xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2465
2466        stats->txbroadcastframes_g +=
2467                xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2468
2469        stats->txunderflowerror +=
2470                xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2471
2472        stats->txoctetcount_g +=
2473                xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2474
2475        stats->txframecount_g +=
2476                xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2477
2478        stats->txpauseframes +=
2479                xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2480
2481        stats->txvlanframes_g +=
2482                xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2483
2484        stats->rxframecount_gb +=
2485                xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2486
2487        stats->rxoctetcount_gb +=
2488                xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2489
2490        stats->rxoctetcount_g +=
2491                xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2492
2493        stats->rxbroadcastframes_g +=
2494                xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2495
2496        stats->rxmulticastframes_g +=
2497                xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2498
2499        stats->rxcrcerror +=
2500                xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2501
2502        stats->rxrunterror +=
2503                xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2504
2505        stats->rxjabbererror +=
2506                xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2507
2508        stats->rxundersize_g +=
2509                xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2510
2511        stats->rxoversize_g +=
2512                xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2513
2514        stats->rx64octets_gb +=
2515                xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2516
2517        stats->rx65to127octets_gb +=
2518                xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2519
2520        stats->rx128to255octets_gb +=
2521                xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2522
2523        stats->rx256to511octets_gb +=
2524                xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2525
2526        stats->rx512to1023octets_gb +=
2527                xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2528
2529        stats->rx1024tomaxoctets_gb +=
2530                xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2531
2532        stats->rxunicastframes_g +=
2533                xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2534
2535        stats->rxlengtherror +=
2536                xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2537
2538        stats->rxoutofrangetype +=
2539                xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2540
2541        stats->rxpauseframes +=
2542                xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2543
2544        stats->rxfifooverflow +=
2545                xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2546
2547        stats->rxvlanframes_gb +=
2548                xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2549
2550        stats->rxwatchdogerror +=
2551                xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2552
2553        /* Un-freeze counters */
2554        XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
2555}
2556
2557static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
2558{
2559        /* Set counters to reset on read */
2560        XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
2561
2562        /* Reset the counters */
2563        XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
2564}
2565
2566static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
2567                                 struct xgbe_channel *channel)
2568{
2569        unsigned int tx_dsr, tx_pos, tx_qidx;
2570        unsigned int tx_status;
2571        unsigned long tx_timeout;
2572
2573        /* Calculate the status register to read and the position within */
2574        if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
2575                tx_dsr = DMA_DSR0;
2576                tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
2577                         DMA_DSR0_TPS_START;
2578        } else {
2579                tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
2580
2581                tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
2582                tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
2583                         DMA_DSRX_TPS_START;
2584        }
2585
2586        /* The Tx engine cannot be stopped if it is actively processing
2587         * descriptors. Wait for the Tx engine to enter the stopped or
2588         * suspended state.  Don't wait forever though...
2589         */
2590        tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
2591        while (time_before(jiffies, tx_timeout)) {
2592                tx_status = XGMAC_IOREAD(pdata, tx_dsr);
2593                tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
2594                if ((tx_status == DMA_TPS_STOPPED) ||
2595                    (tx_status == DMA_TPS_SUSPENDED))
2596                        break;
2597
2598                usleep_range(500, 1000);
2599        }
2600
2601        if (!time_before(jiffies, tx_timeout))
2602                netdev_info(pdata->netdev,
2603                            "timed out waiting for Tx DMA channel %u to stop\n",
2604                            channel->queue_index);
2605}
2606
2607static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
2608{
2609        struct xgbe_channel *channel;
2610        unsigned int i;
2611
2612        /* Enable each Tx DMA channel */
2613        channel = pdata->channel;
2614        for (i = 0; i < pdata->channel_count; i++, channel++) {
2615                if (!channel->tx_ring)
2616                        break;
2617
2618                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2619        }
2620
2621        /* Enable each Tx queue */
2622        for (i = 0; i < pdata->tx_q_count; i++)
2623                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
2624                                       MTL_Q_ENABLED);
2625
2626        /* Enable MAC Tx */
2627        XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2628}
2629
2630static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
2631{
2632        struct xgbe_channel *channel;
2633        unsigned int i;
2634
2635        /* Prepare for Tx DMA channel stop */
2636        channel = pdata->channel;
2637        for (i = 0; i < pdata->channel_count; i++, channel++) {
2638                if (!channel->tx_ring)
2639                        break;
2640
2641                xgbe_prepare_tx_stop(pdata, channel);
2642        }
2643
2644        /* Disable MAC Tx */
2645        XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2646
2647        /* Disable each Tx queue */
2648        for (i = 0; i < pdata->tx_q_count; i++)
2649                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
2650
2651        /* Disable each Tx DMA channel */
2652        channel = pdata->channel;
2653        for (i = 0; i < pdata->channel_count; i++, channel++) {
2654                if (!channel->tx_ring)
2655                        break;
2656
2657                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2658        }
2659}
2660
2661static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
2662                                 unsigned int queue)
2663{
2664        unsigned int rx_status;
2665        unsigned long rx_timeout;
2666
2667        /* The Rx engine cannot be stopped if it is actively processing
2668         * packets. Wait for the Rx queue to empty the Rx fifo.  Don't
2669         * wait forever though...
2670         */
2671        rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
2672        while (time_before(jiffies, rx_timeout)) {
2673                rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
2674                if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
2675                    (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
2676                        break;
2677
2678                usleep_range(500, 1000);
2679        }
2680
2681        if (!time_before(jiffies, rx_timeout))
2682                netdev_info(pdata->netdev,
2683                            "timed out waiting for Rx queue %u to empty\n",
2684                            queue);
2685}
2686
2687static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
2688{
2689        struct xgbe_channel *channel;
2690        unsigned int reg_val, i;
2691
2692        /* Enable each Rx DMA channel */
2693        channel = pdata->channel;
2694        for (i = 0; i < pdata->channel_count; i++, channel++) {
2695                if (!channel->rx_ring)
2696                        break;
2697
2698                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2699        }
2700
2701        /* Enable each Rx queue */
2702        reg_val = 0;
2703        for (i = 0; i < pdata->rx_q_count; i++)
2704                reg_val |= (0x02 << (i << 1));
2705        XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
2706
2707        /* Enable MAC Rx */
2708        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
2709        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
2710        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
2711        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
2712}
2713
2714static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
2715{
2716        struct xgbe_channel *channel;
2717        unsigned int i;
2718
2719        /* Disable MAC Rx */
2720        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
2721        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
2722        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
2723        XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
2724
2725        /* Prepare for Rx DMA channel stop */
2726        for (i = 0; i < pdata->rx_q_count; i++)
2727                xgbe_prepare_rx_stop(pdata, i);
2728
2729        /* Disable each Rx queue */
2730        XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
2731
2732        /* Disable each Rx DMA channel */
2733        channel = pdata->channel;
2734        for (i = 0; i < pdata->channel_count; i++, channel++) {
2735                if (!channel->rx_ring)
2736                        break;
2737
2738                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2739        }
2740}
2741
2742static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
2743{
2744        struct xgbe_channel *channel;
2745        unsigned int i;
2746
2747        /* Enable each Tx DMA channel */
2748        channel = pdata->channel;
2749        for (i = 0; i < pdata->channel_count; i++, channel++) {
2750                if (!channel->tx_ring)
2751                        break;
2752
2753                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
2754        }
2755
2756        /* Enable MAC Tx */
2757        XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2758}
2759
2760static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
2761{
2762        struct xgbe_channel *channel;
2763        unsigned int i;
2764
2765        /* Prepare for Tx DMA channel stop */
2766        channel = pdata->channel;
2767        for (i = 0; i < pdata->channel_count; i++, channel++) {
2768                if (!channel->tx_ring)
2769                        break;
2770
2771                xgbe_prepare_tx_stop(pdata, channel);
2772        }
2773
2774        /* Disable MAC Tx */
2775        XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2776
2777        /* Disable each Tx DMA channel */
2778        channel = pdata->channel;
2779        for (i = 0; i < pdata->channel_count; i++, channel++) {
2780                if (!channel->tx_ring)
2781                        break;
2782
2783                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
2784        }
2785}
2786
2787static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
2788{
2789        struct xgbe_channel *channel;
2790        unsigned int i;
2791
2792        /* Enable each Rx DMA channel */
2793        channel = pdata->channel;
2794        for (i = 0; i < pdata->channel_count; i++, channel++) {
2795                if (!channel->rx_ring)
2796                        break;
2797
2798                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
2799        }
2800}
2801
2802static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
2803{
2804        struct xgbe_channel *channel;
2805        unsigned int i;
2806
2807        /* Disable each Rx DMA channel */
2808        channel = pdata->channel;
2809        for (i = 0; i < pdata->channel_count; i++, channel++) {
2810                if (!channel->rx_ring)
2811                        break;
2812
2813                XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
2814        }
2815}
2816
2817static int xgbe_init(struct xgbe_prv_data *pdata)
2818{
2819        struct xgbe_desc_if *desc_if = &pdata->desc_if;
2820        int ret;
2821
2822        DBGPR("-->xgbe_init\n");
2823
2824        /* Flush Tx queues */
2825        ret = xgbe_flush_tx_queues(pdata);
2826        if (ret)
2827                return ret;
2828
2829        /*
2830         * Initialize DMA related features
2831         */
2832        xgbe_config_dma_bus(pdata);
2833        xgbe_config_dma_cache(pdata);
2834        xgbe_config_osp_mode(pdata);
2835        xgbe_config_pblx8(pdata);
2836        xgbe_config_tx_pbl_val(pdata);
2837        xgbe_config_rx_pbl_val(pdata);
2838        xgbe_config_rx_coalesce(pdata);
2839        xgbe_config_tx_coalesce(pdata);
2840        xgbe_config_rx_buffer_size(pdata);
2841        xgbe_config_tso_mode(pdata);
2842        xgbe_config_sph_mode(pdata);
2843        xgbe_config_rss(pdata);
2844        desc_if->wrapper_tx_desc_init(pdata);
2845        desc_if->wrapper_rx_desc_init(pdata);
2846        xgbe_enable_dma_interrupts(pdata);
2847
2848        /*
2849         * Initialize MTL related features
2850         */
2851        xgbe_config_mtl_mode(pdata);
2852        xgbe_config_queue_mapping(pdata);
2853        xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
2854        xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
2855        xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
2856        xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
2857        xgbe_config_tx_fifo_size(pdata);
2858        xgbe_config_rx_fifo_size(pdata);
2859        xgbe_config_flow_control_threshold(pdata);
2860        /*TODO: Error Packet and undersized good Packet forwarding enable
2861                (FEP and FUP)
2862         */
2863        xgbe_config_dcb_tc(pdata);
2864        xgbe_config_dcb_pfc(pdata);
2865        xgbe_enable_mtl_interrupts(pdata);
2866
2867        /*
2868         * Initialize MAC related features
2869         */
2870        xgbe_config_mac_address(pdata);
2871        xgbe_config_rx_mode(pdata);
2872        xgbe_config_jumbo_enable(pdata);
2873        xgbe_config_flow_control(pdata);
2874        xgbe_config_mac_speed(pdata);
2875        xgbe_config_checksum_offload(pdata);
2876        xgbe_config_vlan_support(pdata);
2877        xgbe_config_mmc(pdata);
2878        xgbe_enable_mac_interrupts(pdata);
2879
2880        DBGPR("<--xgbe_init\n");
2881
2882        return 0;
2883}
2884
2885void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2886{
2887        DBGPR("-->xgbe_init_function_ptrs\n");
2888
2889        hw_if->tx_complete = xgbe_tx_complete;
2890
2891        hw_if->set_mac_address = xgbe_set_mac_address;
2892        hw_if->config_rx_mode = xgbe_config_rx_mode;
2893
2894        hw_if->enable_rx_csum = xgbe_enable_rx_csum;
2895        hw_if->disable_rx_csum = xgbe_disable_rx_csum;
2896
2897        hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
2898        hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
2899        hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
2900        hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
2901        hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
2902
2903        hw_if->read_mmd_regs = xgbe_read_mmd_regs;
2904        hw_if->write_mmd_regs = xgbe_write_mmd_regs;
2905
2906        hw_if->set_gmii_speed = xgbe_set_gmii_speed;
2907        hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
2908        hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
2909
2910        hw_if->enable_tx = xgbe_enable_tx;
2911        hw_if->disable_tx = xgbe_disable_tx;
2912        hw_if->enable_rx = xgbe_enable_rx;
2913        hw_if->disable_rx = xgbe_disable_rx;
2914
2915        hw_if->powerup_tx = xgbe_powerup_tx;
2916        hw_if->powerdown_tx = xgbe_powerdown_tx;
2917        hw_if->powerup_rx = xgbe_powerup_rx;
2918        hw_if->powerdown_rx = xgbe_powerdown_rx;
2919
2920        hw_if->dev_xmit = xgbe_dev_xmit;
2921        hw_if->dev_read = xgbe_dev_read;
2922        hw_if->enable_int = xgbe_enable_int;
2923        hw_if->disable_int = xgbe_disable_int;
2924        hw_if->init = xgbe_init;
2925        hw_if->exit = xgbe_exit;
2926
2927        /* Descriptor related Sequences have to be initialized here */
2928        hw_if->tx_desc_init = xgbe_tx_desc_init;
2929        hw_if->rx_desc_init = xgbe_rx_desc_init;
2930        hw_if->tx_desc_reset = xgbe_tx_desc_reset;
2931        hw_if->rx_desc_reset = xgbe_rx_desc_reset;
2932        hw_if->is_last_desc = xgbe_is_last_desc;
2933        hw_if->is_context_desc = xgbe_is_context_desc;
2934        hw_if->tx_start_xmit = xgbe_tx_start_xmit;
2935
2936        /* For FLOW ctrl */
2937        hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
2938        hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
2939
2940        /* For RX coalescing */
2941        hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
2942        hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
2943        hw_if->usec_to_riwt = xgbe_usec_to_riwt;
2944        hw_if->riwt_to_usec = xgbe_riwt_to_usec;
2945
2946        /* For RX and TX threshold config */
2947        hw_if->config_rx_threshold = xgbe_config_rx_threshold;
2948        hw_if->config_tx_threshold = xgbe_config_tx_threshold;
2949
2950        /* For RX and TX Store and Forward Mode config */
2951        hw_if->config_rsf_mode = xgbe_config_rsf_mode;
2952        hw_if->config_tsf_mode = xgbe_config_tsf_mode;
2953
2954        /* For TX DMA Operating on Second Frame config */
2955        hw_if->config_osp_mode = xgbe_config_osp_mode;
2956
2957        /* For RX and TX PBL config */
2958        hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
2959        hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
2960        hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
2961        hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
2962        hw_if->config_pblx8 = xgbe_config_pblx8;
2963
2964        /* For MMC statistics support */
2965        hw_if->tx_mmc_int = xgbe_tx_mmc_int;
2966        hw_if->rx_mmc_int = xgbe_rx_mmc_int;
2967        hw_if->read_mmc_stats = xgbe_read_mmc_stats;
2968
2969        /* For PTP config */
2970        hw_if->config_tstamp = xgbe_config_tstamp;
2971        hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
2972        hw_if->set_tstamp_time = xgbe_set_tstamp_time;
2973        hw_if->get_tstamp_time = xgbe_get_tstamp_time;
2974        hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
2975
2976        /* For Data Center Bridging config */
2977        hw_if->config_tc = xgbe_config_tc;
2978        hw_if->config_dcb_tc = xgbe_config_dcb_tc;
2979        hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
2980
2981        /* For Receive Side Scaling */
2982        hw_if->enable_rss = xgbe_enable_rss;
2983        hw_if->disable_rss = xgbe_disable_rss;
2984        hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
2985        hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
2986
2987        DBGPR("<--xgbe_init_function_ptrs\n");
2988}
2989