linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
<<
>>
Prefs
   1/*******************************************************************************
   2  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
   3  ST Ethernet IPs are built around a Synopsys IP Core.
   4
   5        Copyright(C) 2007-2011 STMicroelectronics Ltd
   6
   7  This program is free software; you can redistribute it and/or modify it
   8  under the terms and conditions of the GNU General Public License,
   9  version 2, as published by the Free Software Foundation.
  10
  11  This program is distributed in the hope it will be useful, but WITHOUT
  12  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14  more details.
  15
  16  The full GNU General Public License is included in this distribution in
  17  the file called "COPYING".
  18
  19  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  20
  21  Documentation available at:
  22        http://www.stlinux.com
  23  Support available at:
  24        https://bugzilla.stlinux.com/
  25*******************************************************************************/
  26
  27#include <linux/clk.h>
  28#include <linux/kernel.h>
  29#include <linux/interrupt.h>
  30#include <linux/ip.h>
  31#include <linux/tcp.h>
  32#include <linux/skbuff.h>
  33#include <linux/ethtool.h>
  34#include <linux/if_ether.h>
  35#include <linux/crc32.h>
  36#include <linux/mii.h>
  37#include <linux/if.h>
  38#include <linux/if_vlan.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/slab.h>
  41#include <linux/prefetch.h>
  42#include <linux/pinctrl/consumer.h>
  43#ifdef CONFIG_DEBUG_FS
  44#include <linux/debugfs.h>
  45#include <linux/seq_file.h>
  46#endif /* CONFIG_DEBUG_FS */
  47#include <linux/net_tstamp.h>
  48#include <net/pkt_cls.h>
  49#include "stmmac_ptp.h"
  50#include "stmmac.h"
  51#include <linux/reset.h>
  52#include <linux/of_mdio.h>
  53#include "dwmac1000.h"
  54#include "hwif.h"
  55
  56#define STMMAC_ALIGN(x)         __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
  57#define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
  58
  59/* Module parameters */
  60#define TX_TIMEO        5000
  61static int watchdog = TX_TIMEO;
  62module_param(watchdog, int, 0644);
  63MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
  64
  65static int debug = -1;
  66module_param(debug, int, 0644);
  67MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  68
  69static int phyaddr = -1;
  70module_param(phyaddr, int, 0444);
  71MODULE_PARM_DESC(phyaddr, "Physical device address");
  72
  73#define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
  74#define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
  75
  76static int flow_ctrl = FLOW_OFF;
  77module_param(flow_ctrl, int, 0644);
  78MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
  79
  80static int pause = PAUSE_TIME;
  81module_param(pause, int, 0644);
  82MODULE_PARM_DESC(pause, "Flow Control Pause Time");
  83
  84#define TC_DEFAULT 64
  85static int tc = TC_DEFAULT;
  86module_param(tc, int, 0644);
  87MODULE_PARM_DESC(tc, "DMA threshold control value");
  88
  89#define DEFAULT_BUFSIZE 1536
  90static int buf_sz = DEFAULT_BUFSIZE;
  91module_param(buf_sz, int, 0644);
  92MODULE_PARM_DESC(buf_sz, "DMA buffer size");
  93
  94#define STMMAC_RX_COPYBREAK     256
  95
  96static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  97                                      NETIF_MSG_LINK | NETIF_MSG_IFUP |
  98                                      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
  99
 100#define STMMAC_DEFAULT_LPI_TIMER        1000
 101static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 102module_param(eee_timer, int, 0644);
 103MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
 104#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
 105
 106/* By default the driver will use the ring mode to manage tx and rx descriptors,
 107 * but allow user to force to use the chain instead of the ring
 108 */
 109static unsigned int chain_mode;
 110module_param(chain_mode, int, 0444);
 111MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
 112
 113static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 114
 115#ifdef CONFIG_DEBUG_FS
 116static int stmmac_init_fs(struct net_device *dev);
 117static void stmmac_exit_fs(struct net_device *dev);
 118#endif
 119
 120#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
 121
 122/**
 123 * stmmac_verify_args - verify the driver parameters.
 124 * Description: it checks the driver parameters and set a default in case of
 125 * errors.
 126 */
 127static void stmmac_verify_args(void)
 128{
 129        if (unlikely(watchdog < 0))
 130                watchdog = TX_TIMEO;
 131        if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
 132                buf_sz = DEFAULT_BUFSIZE;
 133        if (unlikely(flow_ctrl > 1))
 134                flow_ctrl = FLOW_AUTO;
 135        else if (likely(flow_ctrl < 0))
 136                flow_ctrl = FLOW_OFF;
 137        if (unlikely((pause < 0) || (pause > 0xffff)))
 138                pause = PAUSE_TIME;
 139        if (eee_timer < 0)
 140                eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 141}
 142
 143/**
 144 * stmmac_disable_all_queues - Disable all queues
 145 * @priv: driver private structure
 146 */
 147static void stmmac_disable_all_queues(struct stmmac_priv *priv)
 148{
 149        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 150        u32 queue;
 151
 152        for (queue = 0; queue < rx_queues_cnt; queue++) {
 153                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 154
 155                napi_disable(&rx_q->napi);
 156        }
 157}
 158
 159/**
 160 * stmmac_enable_all_queues - Enable all queues
 161 * @priv: driver private structure
 162 */
 163static void stmmac_enable_all_queues(struct stmmac_priv *priv)
 164{
 165        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 166        u32 queue;
 167
 168        for (queue = 0; queue < rx_queues_cnt; queue++) {
 169                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 170
 171                napi_enable(&rx_q->napi);
 172        }
 173}
 174
 175/**
 176 * stmmac_stop_all_queues - Stop all queues
 177 * @priv: driver private structure
 178 */
 179static void stmmac_stop_all_queues(struct stmmac_priv *priv)
 180{
 181        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 182        u32 queue;
 183
 184        for (queue = 0; queue < tx_queues_cnt; queue++)
 185                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
 186}
 187
 188/**
 189 * stmmac_start_all_queues - Start all queues
 190 * @priv: driver private structure
 191 */
 192static void stmmac_start_all_queues(struct stmmac_priv *priv)
 193{
 194        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 195        u32 queue;
 196
 197        for (queue = 0; queue < tx_queues_cnt; queue++)
 198                netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
 199}
 200
 201static void stmmac_service_event_schedule(struct stmmac_priv *priv)
 202{
 203        if (!test_bit(STMMAC_DOWN, &priv->state) &&
 204            !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
 205                queue_work(priv->wq, &priv->service_task);
 206}
 207
 208static void stmmac_global_err(struct stmmac_priv *priv)
 209{
 210        netif_carrier_off(priv->dev);
 211        set_bit(STMMAC_RESET_REQUESTED, &priv->state);
 212        stmmac_service_event_schedule(priv);
 213}
 214
 215/**
 216 * stmmac_clk_csr_set - dynamically set the MDC clock
 217 * @priv: driver private structure
 218 * Description: this is to dynamically set the MDC clock according to the csr
 219 * clock input.
 220 * Note:
 221 *      If a specific clk_csr value is passed from the platform
 222 *      this means that the CSR Clock Range selection cannot be
 223 *      changed at run-time and it is fixed (as reported in the driver
 224 *      documentation). Viceversa the driver will try to set the MDC
 225 *      clock dynamically according to the actual clock input.
 226 */
 227static void stmmac_clk_csr_set(struct stmmac_priv *priv)
 228{
 229        u32 clk_rate;
 230
 231        clk_rate = clk_get_rate(priv->plat->stmmac_clk);
 232
 233        /* Platform provided default clk_csr would be assumed valid
 234         * for all other cases except for the below mentioned ones.
 235         * For values higher than the IEEE 802.3 specified frequency
 236         * we can not estimate the proper divider as it is not known
 237         * the frequency of clk_csr_i. So we do not change the default
 238         * divider.
 239         */
 240        if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
 241                if (clk_rate < CSR_F_35M)
 242                        priv->clk_csr = STMMAC_CSR_20_35M;
 243                else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
 244                        priv->clk_csr = STMMAC_CSR_35_60M;
 245                else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
 246                        priv->clk_csr = STMMAC_CSR_60_100M;
 247                else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
 248                        priv->clk_csr = STMMAC_CSR_100_150M;
 249                else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
 250                        priv->clk_csr = STMMAC_CSR_150_250M;
 251                else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
 252                        priv->clk_csr = STMMAC_CSR_250_300M;
 253        }
 254
 255        if (priv->plat->has_sun8i) {
 256                if (clk_rate > 160000000)
 257                        priv->clk_csr = 0x03;
 258                else if (clk_rate > 80000000)
 259                        priv->clk_csr = 0x02;
 260                else if (clk_rate > 40000000)
 261                        priv->clk_csr = 0x01;
 262                else
 263                        priv->clk_csr = 0;
 264        }
 265}
 266
 267static void print_pkt(unsigned char *buf, int len)
 268{
 269        pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
 270        print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
 271}
 272
 273static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
 274{
 275        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
 276        u32 avail;
 277
 278        if (tx_q->dirty_tx > tx_q->cur_tx)
 279                avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
 280        else
 281                avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
 282
 283        return avail;
 284}
 285
 286/**
 287 * stmmac_rx_dirty - Get RX queue dirty
 288 * @priv: driver private structure
 289 * @queue: RX queue index
 290 */
 291static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
 292{
 293        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 294        u32 dirty;
 295
 296        if (rx_q->dirty_rx <= rx_q->cur_rx)
 297                dirty = rx_q->cur_rx - rx_q->dirty_rx;
 298        else
 299                dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
 300
 301        return dirty;
 302}
 303
 304/**
 305 * stmmac_hw_fix_mac_speed - callback for speed selection
 306 * @priv: driver private structure
 307 * Description: on some platforms (e.g. ST), some HW system configuration
 308 * registers have to be set according to the link speed negotiated.
 309 */
 310static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
 311{
 312        struct net_device *ndev = priv->dev;
 313        struct phy_device *phydev = ndev->phydev;
 314
 315        if (likely(priv->plat->fix_mac_speed))
 316                priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
 317}
 318
 319/**
 320 * stmmac_enable_eee_mode - check and enter in LPI mode
 321 * @priv: driver private structure
 322 * Description: this function is to verify and enter in LPI mode in case of
 323 * EEE.
 324 */
 325static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
 326{
 327        u32 tx_cnt = priv->plat->tx_queues_to_use;
 328        u32 queue;
 329
 330        /* check if all TX queues have the work finished */
 331        for (queue = 0; queue < tx_cnt; queue++) {
 332                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
 333
 334                if (tx_q->dirty_tx != tx_q->cur_tx)
 335                        return; /* still unfinished work */
 336        }
 337
 338        /* Check and enter in LPI mode */
 339        if (!priv->tx_path_in_lpi_mode)
 340                stmmac_set_eee_mode(priv, priv->hw,
 341                                priv->plat->en_tx_lpi_clockgating);
 342}
 343
 344/**
 345 * stmmac_disable_eee_mode - disable and exit from LPI mode
 346 * @priv: driver private structure
 347 * Description: this function is to exit and disable EEE in case of
 348 * LPI state is true. This is called by the xmit.
 349 */
 350void stmmac_disable_eee_mode(struct stmmac_priv *priv)
 351{
 352        stmmac_reset_eee_mode(priv, priv->hw);
 353        del_timer_sync(&priv->eee_ctrl_timer);
 354        priv->tx_path_in_lpi_mode = false;
 355}
 356
 357/**
 358 * stmmac_eee_ctrl_timer - EEE TX SW timer.
 359 * @arg : data hook
 360 * Description:
 361 *  if there is no data transfer and if we are not in LPI state,
 362 *  then MAC Transmitter can be moved to LPI state.
 363 */
 364static void stmmac_eee_ctrl_timer(struct timer_list *t)
 365{
 366        struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
 367
 368        stmmac_enable_eee_mode(priv);
 369        mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
 370}
 371
 372/**
 373 * stmmac_eee_init - init EEE
 374 * @priv: driver private structure
 375 * Description:
 376 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
 377 *  can also manage EEE, this function enable the LPI state and start related
 378 *  timer.
 379 */
 380bool stmmac_eee_init(struct stmmac_priv *priv)
 381{
 382        struct net_device *ndev = priv->dev;
 383        int interface = priv->plat->interface;
 384        bool ret = false;
 385
 386        if ((interface != PHY_INTERFACE_MODE_MII) &&
 387            (interface != PHY_INTERFACE_MODE_GMII) &&
 388            !phy_interface_mode_is_rgmii(interface))
 389                goto out;
 390
 391        /* Using PCS we cannot dial with the phy registers at this stage
 392         * so we do not support extra feature like EEE.
 393         */
 394        if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
 395            (priv->hw->pcs == STMMAC_PCS_TBI) ||
 396            (priv->hw->pcs == STMMAC_PCS_RTBI))
 397                goto out;
 398
 399        /* MAC core supports the EEE feature. */
 400        if (priv->dma_cap.eee) {
 401                int tx_lpi_timer = priv->tx_lpi_timer;
 402
 403                /* Check if the PHY supports EEE */
 404                if (phy_init_eee(ndev->phydev, 1)) {
 405                        /* To manage at run-time if the EEE cannot be supported
 406                         * anymore (for example because the lp caps have been
 407                         * changed).
 408                         * In that case the driver disable own timers.
 409                         */
 410                        mutex_lock(&priv->lock);
 411                        if (priv->eee_active) {
 412                                netdev_dbg(priv->dev, "disable EEE\n");
 413                                del_timer_sync(&priv->eee_ctrl_timer);
 414                                stmmac_set_eee_timer(priv, priv->hw, 0,
 415                                                tx_lpi_timer);
 416                        }
 417                        priv->eee_active = 0;
 418                        mutex_unlock(&priv->lock);
 419                        goto out;
 420                }
 421                /* Activate the EEE and start timers */
 422                mutex_lock(&priv->lock);
 423                if (!priv->eee_active) {
 424                        priv->eee_active = 1;
 425                        timer_setup(&priv->eee_ctrl_timer,
 426                                    stmmac_eee_ctrl_timer, 0);
 427                        mod_timer(&priv->eee_ctrl_timer,
 428                                  STMMAC_LPI_T(eee_timer));
 429
 430                        stmmac_set_eee_timer(priv, priv->hw,
 431                                        STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
 432                }
 433                /* Set HW EEE according to the speed */
 434                stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
 435
 436                ret = true;
 437                mutex_unlock(&priv->lock);
 438
 439                netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
 440        }
 441out:
 442        return ret;
 443}
 444
 445/* stmmac_get_tx_hwtstamp - get HW TX timestamps
 446 * @priv: driver private structure
 447 * @p : descriptor pointer
 448 * @skb : the socket buffer
 449 * Description :
 450 * This function will read timestamp from the descriptor & pass it to stack.
 451 * and also perform some sanity checks.
 452 */
 453static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
 454                                   struct dma_desc *p, struct sk_buff *skb)
 455{
 456        struct skb_shared_hwtstamps shhwtstamp;
 457        u64 ns;
 458
 459        if (!priv->hwts_tx_en)
 460                return;
 461
 462        /* exit if skb doesn't support hw tstamp */
 463        if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
 464                return;
 465
 466        /* check tx tstamp status */
 467        if (stmmac_get_tx_timestamp_status(priv, p)) {
 468                /* get the valid tstamp */
 469                stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
 470
 471                memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 472                shhwtstamp.hwtstamp = ns_to_ktime(ns);
 473
 474                netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
 475                /* pass tstamp to stack */
 476                skb_tstamp_tx(skb, &shhwtstamp);
 477        }
 478
 479        return;
 480}
 481
 482/* stmmac_get_rx_hwtstamp - get HW RX timestamps
 483 * @priv: driver private structure
 484 * @p : descriptor pointer
 485 * @np : next descriptor pointer
 486 * @skb : the socket buffer
 487 * Description :
 488 * This function will read received packet's timestamp from the descriptor
 489 * and pass it to stack. It also perform some sanity checks.
 490 */
 491static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
 492                                   struct dma_desc *np, struct sk_buff *skb)
 493{
 494        struct skb_shared_hwtstamps *shhwtstamp = NULL;
 495        struct dma_desc *desc = p;
 496        u64 ns;
 497
 498        if (!priv->hwts_rx_en)
 499                return;
 500        /* For GMAC4, the valid timestamp is from CTX next desc. */
 501        if (priv->plat->has_gmac4)
 502                desc = np;
 503
 504        /* Check if timestamp is available */
 505        if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
 506                stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
 507                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
 508                shhwtstamp = skb_hwtstamps(skb);
 509                memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 510                shhwtstamp->hwtstamp = ns_to_ktime(ns);
 511        } else  {
 512                netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
 513        }
 514}
 515
 516/**
 517 *  stmmac_hwtstamp_ioctl - control hardware timestamping.
 518 *  @dev: device pointer.
 519 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 520 *  a proprietary structure used to pass information to the driver.
 521 *  Description:
 522 *  This function configures the MAC to enable/disable both outgoing(TX)
 523 *  and incoming(RX) packets time stamping based on user input.
 524 *  Return Value:
 525 *  0 on success and an appropriate -ve integer on failure.
 526 */
 527static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 528{
 529        struct stmmac_priv *priv = netdev_priv(dev);
 530        struct hwtstamp_config config;
 531        struct timespec64 now;
 532        u64 temp = 0;
 533        u32 ptp_v2 = 0;
 534        u32 tstamp_all = 0;
 535        u32 ptp_over_ipv4_udp = 0;
 536        u32 ptp_over_ipv6_udp = 0;
 537        u32 ptp_over_ethernet = 0;
 538        u32 snap_type_sel = 0;
 539        u32 ts_master_en = 0;
 540        u32 ts_event_en = 0;
 541        u32 value = 0;
 542        u32 sec_inc;
 543
 544        if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
 545                netdev_alert(priv->dev, "No support for HW time stamping\n");
 546                priv->hwts_tx_en = 0;
 547                priv->hwts_rx_en = 0;
 548
 549                return -EOPNOTSUPP;
 550        }
 551
 552        if (copy_from_user(&config, ifr->ifr_data,
 553                           sizeof(struct hwtstamp_config)))
 554                return -EFAULT;
 555
 556        netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
 557                   __func__, config.flags, config.tx_type, config.rx_filter);
 558
 559        /* reserved for future extensions */
 560        if (config.flags)
 561                return -EINVAL;
 562
 563        if (config.tx_type != HWTSTAMP_TX_OFF &&
 564            config.tx_type != HWTSTAMP_TX_ON)
 565                return -ERANGE;
 566
 567        if (priv->adv_ts) {
 568                switch (config.rx_filter) {
 569                case HWTSTAMP_FILTER_NONE:
 570                        /* time stamp no incoming packet at all */
 571                        config.rx_filter = HWTSTAMP_FILTER_NONE;
 572                        break;
 573
 574                case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 575                        /* PTP v1, UDP, any kind of event packet */
 576                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 577                        /* take time stamp for all event messages */
 578                        if (priv->plat->has_gmac4)
 579                                snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
 580                        else
 581                                snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 582
 583                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 584                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 585                        break;
 586
 587                case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 588                        /* PTP v1, UDP, Sync packet */
 589                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
 590                        /* take time stamp for SYNC messages only */
 591                        ts_event_en = PTP_TCR_TSEVNTENA;
 592
 593                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 594                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 595                        break;
 596
 597                case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 598                        /* PTP v1, UDP, Delay_req packet */
 599                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
 600                        /* take time stamp for Delay_Req messages only */
 601                        ts_master_en = PTP_TCR_TSMSTRENA;
 602                        ts_event_en = PTP_TCR_TSEVNTENA;
 603
 604                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 605                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 606                        break;
 607
 608                case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 609                        /* PTP v2, UDP, any kind of event packet */
 610                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
 611                        ptp_v2 = PTP_TCR_TSVER2ENA;
 612                        /* take time stamp for all event messages */
 613                        if (priv->plat->has_gmac4)
 614                                snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
 615                        else
 616                                snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 617
 618                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 619                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 620                        break;
 621
 622                case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 623                        /* PTP v2, UDP, Sync packet */
 624                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
 625                        ptp_v2 = PTP_TCR_TSVER2ENA;
 626                        /* take time stamp for SYNC messages only */
 627                        ts_event_en = PTP_TCR_TSEVNTENA;
 628
 629                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 630                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 631                        break;
 632
 633                case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 634                        /* PTP v2, UDP, Delay_req packet */
 635                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
 636                        ptp_v2 = PTP_TCR_TSVER2ENA;
 637                        /* take time stamp for Delay_Req messages only */
 638                        ts_master_en = PTP_TCR_TSMSTRENA;
 639                        ts_event_en = PTP_TCR_TSEVNTENA;
 640
 641                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 642                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 643                        break;
 644
 645                case HWTSTAMP_FILTER_PTP_V2_EVENT:
 646                        /* PTP v2/802.AS1 any layer, any kind of event packet */
 647                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 648                        ptp_v2 = PTP_TCR_TSVER2ENA;
 649                        /* take time stamp for all event messages */
 650                        if (priv->plat->has_gmac4)
 651                                snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
 652                        else
 653                                snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 654
 655                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 656                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 657                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 658                        break;
 659
 660                case HWTSTAMP_FILTER_PTP_V2_SYNC:
 661                        /* PTP v2/802.AS1, any layer, Sync packet */
 662                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
 663                        ptp_v2 = PTP_TCR_TSVER2ENA;
 664                        /* take time stamp for SYNC messages only */
 665                        ts_event_en = PTP_TCR_TSEVNTENA;
 666
 667                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 668                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 669                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 670                        break;
 671
 672                case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 673                        /* PTP v2/802.AS1, any layer, Delay_req packet */
 674                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
 675                        ptp_v2 = PTP_TCR_TSVER2ENA;
 676                        /* take time stamp for Delay_Req messages only */
 677                        ts_master_en = PTP_TCR_TSMSTRENA;
 678                        ts_event_en = PTP_TCR_TSEVNTENA;
 679
 680                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 681                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 682                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 683                        break;
 684
 685                case HWTSTAMP_FILTER_NTP_ALL:
 686                case HWTSTAMP_FILTER_ALL:
 687                        /* time stamp any incoming packet */
 688                        config.rx_filter = HWTSTAMP_FILTER_ALL;
 689                        tstamp_all = PTP_TCR_TSENALL;
 690                        break;
 691
 692                default:
 693                        return -ERANGE;
 694                }
 695        } else {
 696                switch (config.rx_filter) {
 697                case HWTSTAMP_FILTER_NONE:
 698                        config.rx_filter = HWTSTAMP_FILTER_NONE;
 699                        break;
 700                default:
 701                        /* PTP v1, UDP, any kind of event packet */
 702                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 703                        break;
 704                }
 705        }
 706        priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
 707        priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
 708
 709        if (!priv->hwts_tx_en && !priv->hwts_rx_en)
 710                stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
 711        else {
 712                value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
 713                         tstamp_all | ptp_v2 | ptp_over_ethernet |
 714                         ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
 715                         ts_master_en | snap_type_sel);
 716                stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
 717
 718                /* program Sub Second Increment reg */
 719                stmmac_config_sub_second_increment(priv,
 720                                priv->ptpaddr, priv->plat->clk_ptp_rate,
 721                                priv->plat->has_gmac4, &sec_inc);
 722                temp = div_u64(1000000000ULL, sec_inc);
 723
 724                /* Store sub second increment and flags for later use */
 725                priv->sub_second_inc = sec_inc;
 726                priv->systime_flags = value;
 727
 728                /* calculate default added value:
 729                 * formula is :
 730                 * addend = (2^32)/freq_div_ratio;
 731                 * where, freq_div_ratio = 1e9ns/sec_inc
 732                 */
 733                temp = (u64)(temp << 32);
 734                priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
 735                stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
 736
 737                /* initialize system time */
 738                ktime_get_real_ts64(&now);
 739
 740                /* lower 32 bits of tv_sec are safe until y2106 */
 741                stmmac_init_systime(priv, priv->ptpaddr,
 742                                (u32)now.tv_sec, now.tv_nsec);
 743        }
 744
 745        return copy_to_user(ifr->ifr_data, &config,
 746                            sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
 747}
 748
 749/**
 750 * stmmac_init_ptp - init PTP
 751 * @priv: driver private structure
 752 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
 753 * This is done by looking at the HW cap. register.
 754 * This function also registers the ptp driver.
 755 */
 756static int stmmac_init_ptp(struct stmmac_priv *priv)
 757{
 758        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
 759                return -EOPNOTSUPP;
 760
 761        priv->adv_ts = 0;
 762        /* Check if adv_ts can be enabled for dwmac 4.x core */
 763        if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
 764                priv->adv_ts = 1;
 765        /* Dwmac 3.x core with extend_desc can support adv_ts */
 766        else if (priv->extend_desc && priv->dma_cap.atime_stamp)
 767                priv->adv_ts = 1;
 768
 769        if (priv->dma_cap.time_stamp)
 770                netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
 771
 772        if (priv->adv_ts)
 773                netdev_info(priv->dev,
 774                            "IEEE 1588-2008 Advanced Timestamp supported\n");
 775
 776        priv->hwts_tx_en = 0;
 777        priv->hwts_rx_en = 0;
 778
 779        stmmac_ptp_register(priv);
 780
 781        return 0;
 782}
 783
 784static void stmmac_release_ptp(struct stmmac_priv *priv)
 785{
 786        if (priv->plat->clk_ptp_ref)
 787                clk_disable_unprepare(priv->plat->clk_ptp_ref);
 788        stmmac_ptp_unregister(priv);
 789}
 790
 791/**
 792 *  stmmac_mac_flow_ctrl - Configure flow control in all queues
 793 *  @priv: driver private structure
 794 *  Description: It is used for configuring the flow control in all queues
 795 */
 796static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
 797{
 798        u32 tx_cnt = priv->plat->tx_queues_to_use;
 799
 800        stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
 801                        priv->pause, tx_cnt);
 802}
 803
 804/**
 805 * stmmac_adjust_link - adjusts the link parameters
 806 * @dev: net device structure
 807 * Description: this is the helper called by the physical abstraction layer
 808 * drivers to communicate the phy link status. According the speed and duplex
 809 * this driver can invoke registered glue-logic as well.
 810 * It also invoke the eee initialization because it could happen when switch
 811 * on different networks (that are eee capable).
 812 */
 813static void stmmac_adjust_link(struct net_device *dev)
 814{
 815        struct stmmac_priv *priv = netdev_priv(dev);
 816        struct phy_device *phydev = dev->phydev;
 817        bool new_state = false;
 818
 819        if (!phydev)
 820                return;
 821
 822        mutex_lock(&priv->lock);
 823
 824        if (phydev->link) {
 825                u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
 826
 827                /* Now we make sure that we can be in full duplex mode.
 828                 * If not, we operate in half-duplex mode. */
 829                if (phydev->duplex != priv->oldduplex) {
 830                        new_state = true;
 831                        if (!phydev->duplex)
 832                                ctrl &= ~priv->hw->link.duplex;
 833                        else
 834                                ctrl |= priv->hw->link.duplex;
 835                        priv->oldduplex = phydev->duplex;
 836                }
 837                /* Flow Control operation */
 838                if (phydev->pause)
 839                        stmmac_mac_flow_ctrl(priv, phydev->duplex);
 840
 841                if (phydev->speed != priv->speed) {
 842                        new_state = true;
 843                        ctrl &= ~priv->hw->link.speed_mask;
 844                        switch (phydev->speed) {
 845                        case SPEED_1000:
 846                                ctrl |= priv->hw->link.speed1000;
 847                                break;
 848                        case SPEED_100:
 849                                ctrl |= priv->hw->link.speed100;
 850                                break;
 851                        case SPEED_10:
 852                                ctrl |= priv->hw->link.speed10;
 853                                break;
 854                        default:
 855                                netif_warn(priv, link, priv->dev,
 856                                           "broken speed: %d\n", phydev->speed);
 857                                phydev->speed = SPEED_UNKNOWN;
 858                                break;
 859                        }
 860                        if (phydev->speed != SPEED_UNKNOWN)
 861                                stmmac_hw_fix_mac_speed(priv);
 862                        priv->speed = phydev->speed;
 863                }
 864
 865                writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
 866
 867                if (!priv->oldlink) {
 868                        new_state = true;
 869                        priv->oldlink = true;
 870                }
 871        } else if (priv->oldlink) {
 872                new_state = true;
 873                priv->oldlink = false;
 874                priv->speed = SPEED_UNKNOWN;
 875                priv->oldduplex = DUPLEX_UNKNOWN;
 876        }
 877
 878        if (new_state && netif_msg_link(priv))
 879                phy_print_status(phydev);
 880
 881        mutex_unlock(&priv->lock);
 882
 883        if (phydev->is_pseudo_fixed_link)
 884                /* Stop PHY layer to call the hook to adjust the link in case
 885                 * of a switch is attached to the stmmac driver.
 886                 */
 887                phydev->irq = PHY_IGNORE_INTERRUPT;
 888        else
 889                /* At this stage, init the EEE if supported.
 890                 * Never called in case of fixed_link.
 891                 */
 892                priv->eee_enabled = stmmac_eee_init(priv);
 893}
 894
 895/**
 896 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
 897 * @priv: driver private structure
 898 * Description: this is to verify if the HW supports the PCS.
 899 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
 900 * configured for the TBI, RTBI, or SGMII PHY interface.
 901 */
 902static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
 903{
 904        int interface = priv->plat->interface;
 905
 906        if (priv->dma_cap.pcs) {
 907                if ((interface == PHY_INTERFACE_MODE_RGMII) ||
 908                    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
 909                    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
 910                    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
 911                        netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
 912                        priv->hw->pcs = STMMAC_PCS_RGMII;
 913                } else if (interface == PHY_INTERFACE_MODE_SGMII) {
 914                        netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
 915                        priv->hw->pcs = STMMAC_PCS_SGMII;
 916                }
 917        }
 918}
 919
 920/**
 921 * stmmac_init_phy - PHY initialization
 922 * @dev: net device structure
 923 * Description: it initializes the driver's PHY state, and attaches the PHY
 924 * to the mac driver.
 925 *  Return value:
 926 *  0 on success
 927 */
 928static int stmmac_init_phy(struct net_device *dev)
 929{
 930        struct stmmac_priv *priv = netdev_priv(dev);
 931        u32 tx_cnt = priv->plat->tx_queues_to_use;
 932        struct phy_device *phydev;
 933        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
 934        char bus_id[MII_BUS_ID_SIZE];
 935        int interface = priv->plat->interface;
 936        int max_speed = priv->plat->max_speed;
 937        priv->oldlink = false;
 938        priv->speed = SPEED_UNKNOWN;
 939        priv->oldduplex = DUPLEX_UNKNOWN;
 940
 941        if (priv->plat->phy_node) {
 942                phydev = of_phy_connect(dev, priv->plat->phy_node,
 943                                        &stmmac_adjust_link, 0, interface);
 944        } else {
 945                snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
 946                         priv->plat->bus_id);
 947
 948                snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
 949                         priv->plat->phy_addr);
 950                netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
 951                           phy_id_fmt);
 952
 953                phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
 954                                     interface);
 955        }
 956
 957        if (IS_ERR_OR_NULL(phydev)) {
 958                netdev_err(priv->dev, "Could not attach to PHY\n");
 959                if (!phydev)
 960                        return -ENODEV;
 961
 962                return PTR_ERR(phydev);
 963        }
 964
 965        /* Stop Advertising 1000BASE Capability if interface is not GMII */
 966        if ((interface == PHY_INTERFACE_MODE_MII) ||
 967            (interface == PHY_INTERFACE_MODE_RMII) ||
 968                (max_speed < 1000 && max_speed > 0))
 969                phy_set_max_speed(phydev, SPEED_100);
 970
 971        /*
 972         * Half-duplex mode not supported with multiqueue
 973         * half-duplex can only works with single queue
 974         */
 975        if (tx_cnt > 1) {
 976                phy_remove_link_mode(phydev,
 977                                     ETHTOOL_LINK_MODE_10baseT_Half_BIT);
 978                phy_remove_link_mode(phydev,
 979                                     ETHTOOL_LINK_MODE_100baseT_Half_BIT);
 980                phy_remove_link_mode(phydev,
 981                                     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
 982        }
 983
 984        /*
 985         * Broken HW is sometimes missing the pull-up resistor on the
 986         * MDIO line, which results in reads to non-existent devices returning
 987         * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
 988         * device as well.
 989         * Note: phydev->phy_id is the result of reading the UID PHY registers.
 990         */
 991        if (!priv->plat->phy_node && phydev->phy_id == 0) {
 992                phy_disconnect(phydev);
 993                return -ENODEV;
 994        }
 995
 996        /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
 997         * subsequent PHY polling, make sure we force a link transition if
 998         * we have a UP/DOWN/UP transition
 999         */
1000        if (phydev->is_pseudo_fixed_link)
1001                phydev->irq = PHY_POLL;
1002
1003        phy_attached_info(phydev);
1004        return 0;
1005}
1006
1007static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1008{
1009        u32 rx_cnt = priv->plat->rx_queues_to_use;
1010        void *head_rx;
1011        u32 queue;
1012
1013        /* Display RX rings */
1014        for (queue = 0; queue < rx_cnt; queue++) {
1015                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1016
1017                pr_info("\tRX Queue %u rings\n", queue);
1018
1019                if (priv->extend_desc)
1020                        head_rx = (void *)rx_q->dma_erx;
1021                else
1022                        head_rx = (void *)rx_q->dma_rx;
1023
1024                /* Display RX ring */
1025                stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1026        }
1027}
1028
1029static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1030{
1031        u32 tx_cnt = priv->plat->tx_queues_to_use;
1032        void *head_tx;
1033        u32 queue;
1034
1035        /* Display TX rings */
1036        for (queue = 0; queue < tx_cnt; queue++) {
1037                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1038
1039                pr_info("\tTX Queue %d rings\n", queue);
1040
1041                if (priv->extend_desc)
1042                        head_tx = (void *)tx_q->dma_etx;
1043                else
1044                        head_tx = (void *)tx_q->dma_tx;
1045
1046                stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1047        }
1048}
1049
1050static void stmmac_display_rings(struct stmmac_priv *priv)
1051{
1052        /* Display RX ring */
1053        stmmac_display_rx_rings(priv);
1054
1055        /* Display TX ring */
1056        stmmac_display_tx_rings(priv);
1057}
1058
1059static int stmmac_set_bfsize(int mtu, int bufsize)
1060{
1061        int ret = bufsize;
1062
1063        if (mtu >= BUF_SIZE_4KiB)
1064                ret = BUF_SIZE_8KiB;
1065        else if (mtu >= BUF_SIZE_2KiB)
1066                ret = BUF_SIZE_4KiB;
1067        else if (mtu > DEFAULT_BUFSIZE)
1068                ret = BUF_SIZE_2KiB;
1069        else
1070                ret = DEFAULT_BUFSIZE;
1071
1072        return ret;
1073}
1074
1075/**
1076 * stmmac_clear_rx_descriptors - clear RX descriptors
1077 * @priv: driver private structure
1078 * @queue: RX queue index
1079 * Description: this function is called to clear the RX descriptors
1080 * in case of both basic and extended descriptors are used.
1081 */
1082static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1083{
1084        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1085        int i;
1086
1087        /* Clear the RX descriptors */
1088        for (i = 0; i < DMA_RX_SIZE; i++)
1089                if (priv->extend_desc)
1090                        stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1091                                        priv->use_riwt, priv->mode,
1092                                        (i == DMA_RX_SIZE - 1));
1093                else
1094                        stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1095                                        priv->use_riwt, priv->mode,
1096                                        (i == DMA_RX_SIZE - 1));
1097}
1098
1099/**
1100 * stmmac_clear_tx_descriptors - clear tx descriptors
1101 * @priv: driver private structure
1102 * @queue: TX queue index.
1103 * Description: this function is called to clear the TX descriptors
1104 * in case of both basic and extended descriptors are used.
1105 */
1106static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1107{
1108        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1109        int i;
1110
1111        /* Clear the TX descriptors */
1112        for (i = 0; i < DMA_TX_SIZE; i++)
1113                if (priv->extend_desc)
1114                        stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1115                                        priv->mode, (i == DMA_TX_SIZE - 1));
1116                else
1117                        stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1118                                        priv->mode, (i == DMA_TX_SIZE - 1));
1119}
1120
1121/**
1122 * stmmac_clear_descriptors - clear descriptors
1123 * @priv: driver private structure
1124 * Description: this function is called to clear the TX and RX descriptors
1125 * in case of both basic and extended descriptors are used.
1126 */
1127static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1128{
1129        u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1130        u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1131        u32 queue;
1132
1133        /* Clear the RX descriptors */
1134        for (queue = 0; queue < rx_queue_cnt; queue++)
1135                stmmac_clear_rx_descriptors(priv, queue);
1136
1137        /* Clear the TX descriptors */
1138        for (queue = 0; queue < tx_queue_cnt; queue++)
1139                stmmac_clear_tx_descriptors(priv, queue);
1140}
1141
1142/**
1143 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1144 * @priv: driver private structure
1145 * @p: descriptor pointer
1146 * @i: descriptor index
1147 * @flags: gfp flag
1148 * @queue: RX queue index
1149 * Description: this function is called to allocate a receive buffer, perform
1150 * the DMA mapping and init the descriptor.
1151 */
1152static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1153                                  int i, gfp_t flags, u32 queue)
1154{
1155        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1156        struct sk_buff *skb;
1157
1158        skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1159        if (!skb) {
1160                netdev_err(priv->dev,
1161                           "%s: Rx init fails; skb is NULL\n", __func__);
1162                return -ENOMEM;
1163        }
1164        rx_q->rx_skbuff[i] = skb;
1165        rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1166                                                priv->dma_buf_sz,
1167                                                DMA_FROM_DEVICE);
1168        if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1169                netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1170                dev_kfree_skb_any(skb);
1171                return -EINVAL;
1172        }
1173
1174        stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1175
1176        if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1177                stmmac_init_desc3(priv, p);
1178
1179        return 0;
1180}
1181
1182/**
1183 * stmmac_free_rx_buffer - free RX dma buffers
1184 * @priv: private structure
1185 * @queue: RX queue index
1186 * @i: buffer index.
1187 */
1188static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1189{
1190        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1191
1192        if (rx_q->rx_skbuff[i]) {
1193                dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1194                                 priv->dma_buf_sz, DMA_FROM_DEVICE);
1195                dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1196        }
1197        rx_q->rx_skbuff[i] = NULL;
1198}
1199
1200/**
1201 * stmmac_free_tx_buffer - free RX dma buffers
1202 * @priv: private structure
1203 * @queue: RX queue index
1204 * @i: buffer index.
1205 */
1206static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1207{
1208        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1209
1210        if (tx_q->tx_skbuff_dma[i].buf) {
1211                if (tx_q->tx_skbuff_dma[i].map_as_page)
1212                        dma_unmap_page(priv->device,
1213                                       tx_q->tx_skbuff_dma[i].buf,
1214                                       tx_q->tx_skbuff_dma[i].len,
1215                                       DMA_TO_DEVICE);
1216                else
1217                        dma_unmap_single(priv->device,
1218                                         tx_q->tx_skbuff_dma[i].buf,
1219                                         tx_q->tx_skbuff_dma[i].len,
1220                                         DMA_TO_DEVICE);
1221        }
1222
1223        if (tx_q->tx_skbuff[i]) {
1224                dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1225                tx_q->tx_skbuff[i] = NULL;
1226                tx_q->tx_skbuff_dma[i].buf = 0;
1227                tx_q->tx_skbuff_dma[i].map_as_page = false;
1228        }
1229}
1230
1231/**
1232 * init_dma_rx_desc_rings - init the RX descriptor rings
1233 * @dev: net device structure
1234 * @flags: gfp flag.
1235 * Description: this function initializes the DMA RX descriptors
1236 * and allocates the socket buffers. It supports the chained and ring
1237 * modes.
1238 */
1239static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1240{
1241        struct stmmac_priv *priv = netdev_priv(dev);
1242        u32 rx_count = priv->plat->rx_queues_to_use;
1243        int ret = -ENOMEM;
1244        int bfsize = 0;
1245        int queue;
1246        int i;
1247
1248        bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1249        if (bfsize < 0)
1250                bfsize = 0;
1251
1252        if (bfsize < BUF_SIZE_16KiB)
1253                bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1254
1255        priv->dma_buf_sz = bfsize;
1256
1257        /* RX INITIALIZATION */
1258        netif_dbg(priv, probe, priv->dev,
1259                  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1260
1261        for (queue = 0; queue < rx_count; queue++) {
1262                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1263
1264                netif_dbg(priv, probe, priv->dev,
1265                          "(%s) dma_rx_phy=0x%08x\n", __func__,
1266                          (u32)rx_q->dma_rx_phy);
1267
1268                for (i = 0; i < DMA_RX_SIZE; i++) {
1269                        struct dma_desc *p;
1270
1271                        if (priv->extend_desc)
1272                                p = &((rx_q->dma_erx + i)->basic);
1273                        else
1274                                p = rx_q->dma_rx + i;
1275
1276                        ret = stmmac_init_rx_buffers(priv, p, i, flags,
1277                                                     queue);
1278                        if (ret)
1279                                goto err_init_rx_buffers;
1280
1281                        netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1282                                  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1283                                  (unsigned int)rx_q->rx_skbuff_dma[i]);
1284                }
1285
1286                rx_q->cur_rx = 0;
1287                rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1288
1289                stmmac_clear_rx_descriptors(priv, queue);
1290
1291                /* Setup the chained descriptor addresses */
1292                if (priv->mode == STMMAC_CHAIN_MODE) {
1293                        if (priv->extend_desc)
1294                                stmmac_mode_init(priv, rx_q->dma_erx,
1295                                                rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1296                        else
1297                                stmmac_mode_init(priv, rx_q->dma_rx,
1298                                                rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1299                }
1300        }
1301
1302        buf_sz = bfsize;
1303
1304        return 0;
1305
1306err_init_rx_buffers:
1307        while (queue >= 0) {
1308                while (--i >= 0)
1309                        stmmac_free_rx_buffer(priv, queue, i);
1310
1311                if (queue == 0)
1312                        break;
1313
1314                i = DMA_RX_SIZE;
1315                queue--;
1316        }
1317
1318        return ret;
1319}
1320
1321/**
1322 * init_dma_tx_desc_rings - init the TX descriptor rings
1323 * @dev: net device structure.
1324 * Description: this function initializes the DMA TX descriptors
1325 * and allocates the socket buffers. It supports the chained and ring
1326 * modes.
1327 */
1328static int init_dma_tx_desc_rings(struct net_device *dev)
1329{
1330        struct stmmac_priv *priv = netdev_priv(dev);
1331        u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1332        u32 queue;
1333        int i;
1334
1335        for (queue = 0; queue < tx_queue_cnt; queue++) {
1336                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1337
1338                netif_dbg(priv, probe, priv->dev,
1339                          "(%s) dma_tx_phy=0x%08x\n", __func__,
1340                         (u32)tx_q->dma_tx_phy);
1341
1342                /* Setup the chained descriptor addresses */
1343                if (priv->mode == STMMAC_CHAIN_MODE) {
1344                        if (priv->extend_desc)
1345                                stmmac_mode_init(priv, tx_q->dma_etx,
1346                                                tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1347                        else
1348                                stmmac_mode_init(priv, tx_q->dma_tx,
1349                                                tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1350                }
1351
1352                for (i = 0; i < DMA_TX_SIZE; i++) {
1353                        struct dma_desc *p;
1354                        if (priv->extend_desc)
1355                                p = &((tx_q->dma_etx + i)->basic);
1356                        else
1357                                p = tx_q->dma_tx + i;
1358
1359                        stmmac_clear_desc(priv, p);
1360
1361                        tx_q->tx_skbuff_dma[i].buf = 0;
1362                        tx_q->tx_skbuff_dma[i].map_as_page = false;
1363                        tx_q->tx_skbuff_dma[i].len = 0;
1364                        tx_q->tx_skbuff_dma[i].last_segment = false;
1365                        tx_q->tx_skbuff[i] = NULL;
1366                }
1367
1368                tx_q->dirty_tx = 0;
1369                tx_q->cur_tx = 0;
1370                tx_q->mss = 0;
1371
1372                netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1373        }
1374
1375        return 0;
1376}
1377
1378/**
1379 * init_dma_desc_rings - init the RX/TX descriptor rings
1380 * @dev: net device structure
1381 * @flags: gfp flag.
1382 * Description: this function initializes the DMA RX/TX descriptors
1383 * and allocates the socket buffers. It supports the chained and ring
1384 * modes.
1385 */
1386static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1387{
1388        struct stmmac_priv *priv = netdev_priv(dev);
1389        int ret;
1390
1391        ret = init_dma_rx_desc_rings(dev, flags);
1392        if (ret)
1393                return ret;
1394
1395        ret = init_dma_tx_desc_rings(dev);
1396
1397        stmmac_clear_descriptors(priv);
1398
1399        if (netif_msg_hw(priv))
1400                stmmac_display_rings(priv);
1401
1402        return ret;
1403}
1404
1405/**
1406 * dma_free_rx_skbufs - free RX dma buffers
1407 * @priv: private structure
1408 * @queue: RX queue index
1409 */
1410static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1411{
1412        int i;
1413
1414        for (i = 0; i < DMA_RX_SIZE; i++)
1415                stmmac_free_rx_buffer(priv, queue, i);
1416}
1417
1418/**
1419 * dma_free_tx_skbufs - free TX dma buffers
1420 * @priv: private structure
1421 * @queue: TX queue index
1422 */
1423static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1424{
1425        int i;
1426
1427        for (i = 0; i < DMA_TX_SIZE; i++)
1428                stmmac_free_tx_buffer(priv, queue, i);
1429}
1430
1431/**
1432 * free_dma_rx_desc_resources - free RX dma desc resources
1433 * @priv: private structure
1434 */
1435static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1436{
1437        u32 rx_count = priv->plat->rx_queues_to_use;
1438        u32 queue;
1439
1440        /* Free RX queue resources */
1441        for (queue = 0; queue < rx_count; queue++) {
1442                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1443
1444                /* Release the DMA RX socket buffers */
1445                dma_free_rx_skbufs(priv, queue);
1446
1447                /* Free DMA regions of consistent memory previously allocated */
1448                if (!priv->extend_desc)
1449                        dma_free_coherent(priv->device,
1450                                          DMA_RX_SIZE * sizeof(struct dma_desc),
1451                                          rx_q->dma_rx, rx_q->dma_rx_phy);
1452                else
1453                        dma_free_coherent(priv->device, DMA_RX_SIZE *
1454                                          sizeof(struct dma_extended_desc),
1455                                          rx_q->dma_erx, rx_q->dma_rx_phy);
1456
1457                kfree(rx_q->rx_skbuff_dma);
1458                kfree(rx_q->rx_skbuff);
1459        }
1460}
1461
1462/**
1463 * free_dma_tx_desc_resources - free TX dma desc resources
1464 * @priv: private structure
1465 */
1466static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1467{
1468        u32 tx_count = priv->plat->tx_queues_to_use;
1469        u32 queue;
1470
1471        /* Free TX queue resources */
1472        for (queue = 0; queue < tx_count; queue++) {
1473                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1474
1475                /* Release the DMA TX socket buffers */
1476                dma_free_tx_skbufs(priv, queue);
1477
1478                /* Free DMA regions of consistent memory previously allocated */
1479                if (!priv->extend_desc)
1480                        dma_free_coherent(priv->device,
1481                                          DMA_TX_SIZE * sizeof(struct dma_desc),
1482                                          tx_q->dma_tx, tx_q->dma_tx_phy);
1483                else
1484                        dma_free_coherent(priv->device, DMA_TX_SIZE *
1485                                          sizeof(struct dma_extended_desc),
1486                                          tx_q->dma_etx, tx_q->dma_tx_phy);
1487
1488                kfree(tx_q->tx_skbuff_dma);
1489                kfree(tx_q->tx_skbuff);
1490        }
1491}
1492
1493/**
1494 * alloc_dma_rx_desc_resources - alloc RX resources.
1495 * @priv: private structure
1496 * Description: according to which descriptor can be used (extend or basic)
1497 * this function allocates the resources for TX and RX paths. In case of
1498 * reception, for example, it pre-allocated the RX socket buffer in order to
1499 * allow zero-copy mechanism.
1500 */
1501static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1502{
1503        u32 rx_count = priv->plat->rx_queues_to_use;
1504        int ret = -ENOMEM;
1505        u32 queue;
1506
1507        /* RX queues buffers and DMA */
1508        for (queue = 0; queue < rx_count; queue++) {
1509                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1510
1511                rx_q->queue_index = queue;
1512                rx_q->priv_data = priv;
1513
1514                rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1515                                                    sizeof(dma_addr_t),
1516                                                    GFP_KERNEL);
1517                if (!rx_q->rx_skbuff_dma)
1518                        goto err_dma;
1519
1520                rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1521                                                sizeof(struct sk_buff *),
1522                                                GFP_KERNEL);
1523                if (!rx_q->rx_skbuff)
1524                        goto err_dma;
1525
1526                if (priv->extend_desc) {
1527                        rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1528                                                            DMA_RX_SIZE *
1529                                                            sizeof(struct
1530                                                            dma_extended_desc),
1531                                                            &rx_q->dma_rx_phy,
1532                                                            GFP_KERNEL);
1533                        if (!rx_q->dma_erx)
1534                                goto err_dma;
1535
1536                } else {
1537                        rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1538                                                           DMA_RX_SIZE *
1539                                                           sizeof(struct
1540                                                           dma_desc),
1541                                                           &rx_q->dma_rx_phy,
1542                                                           GFP_KERNEL);
1543                        if (!rx_q->dma_rx)
1544                                goto err_dma;
1545                }
1546        }
1547
1548        return 0;
1549
1550err_dma:
1551        free_dma_rx_desc_resources(priv);
1552
1553        return ret;
1554}
1555
1556/**
1557 * alloc_dma_tx_desc_resources - alloc TX resources.
1558 * @priv: private structure
1559 * Description: according to which descriptor can be used (extend or basic)
1560 * this function allocates the resources for TX and RX paths. In case of
1561 * reception, for example, it pre-allocated the RX socket buffer in order to
1562 * allow zero-copy mechanism.
1563 */
1564static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1565{
1566        u32 tx_count = priv->plat->tx_queues_to_use;
1567        int ret = -ENOMEM;
1568        u32 queue;
1569
1570        /* TX queues buffers and DMA */
1571        for (queue = 0; queue < tx_count; queue++) {
1572                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1573
1574                tx_q->queue_index = queue;
1575                tx_q->priv_data = priv;
1576
1577                tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1578                                                    sizeof(*tx_q->tx_skbuff_dma),
1579                                                    GFP_KERNEL);
1580                if (!tx_q->tx_skbuff_dma)
1581                        goto err_dma;
1582
1583                tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1584                                                sizeof(struct sk_buff *),
1585                                                GFP_KERNEL);
1586                if (!tx_q->tx_skbuff)
1587                        goto err_dma;
1588
1589                if (priv->extend_desc) {
1590                        tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1591                                                            DMA_TX_SIZE *
1592                                                            sizeof(struct
1593                                                            dma_extended_desc),
1594                                                            &tx_q->dma_tx_phy,
1595                                                            GFP_KERNEL);
1596                        if (!tx_q->dma_etx)
1597                                goto err_dma;
1598                } else {
1599                        tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1600                                                           DMA_TX_SIZE *
1601                                                           sizeof(struct
1602                                                                  dma_desc),
1603                                                           &tx_q->dma_tx_phy,
1604                                                           GFP_KERNEL);
1605                        if (!tx_q->dma_tx)
1606                                goto err_dma;
1607                }
1608        }
1609
1610        return 0;
1611
1612err_dma:
1613        free_dma_tx_desc_resources(priv);
1614
1615        return ret;
1616}
1617
1618/**
1619 * alloc_dma_desc_resources - alloc TX/RX resources.
1620 * @priv: private structure
1621 * Description: according to which descriptor can be used (extend or basic)
1622 * this function allocates the resources for TX and RX paths. In case of
1623 * reception, for example, it pre-allocated the RX socket buffer in order to
1624 * allow zero-copy mechanism.
1625 */
1626static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1627{
1628        /* RX Allocation */
1629        int ret = alloc_dma_rx_desc_resources(priv);
1630
1631        if (ret)
1632                return ret;
1633
1634        ret = alloc_dma_tx_desc_resources(priv);
1635
1636        return ret;
1637}
1638
1639/**
1640 * free_dma_desc_resources - free dma desc resources
1641 * @priv: private structure
1642 */
1643static void free_dma_desc_resources(struct stmmac_priv *priv)
1644{
1645        /* Release the DMA RX socket buffers */
1646        free_dma_rx_desc_resources(priv);
1647
1648        /* Release the DMA TX socket buffers */
1649        free_dma_tx_desc_resources(priv);
1650}
1651
1652/**
1653 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1654 *  @priv: driver private structure
1655 *  Description: It is used for enabling the rx queues in the MAC
1656 */
1657static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1658{
1659        u32 rx_queues_count = priv->plat->rx_queues_to_use;
1660        int queue;
1661        u8 mode;
1662
1663        for (queue = 0; queue < rx_queues_count; queue++) {
1664                mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1665                stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1666        }
1667}
1668
1669/**
1670 * stmmac_start_rx_dma - start RX DMA channel
1671 * @priv: driver private structure
1672 * @chan: RX channel index
1673 * Description:
1674 * This starts a RX DMA channel
1675 */
1676static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1677{
1678        netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1679        stmmac_start_rx(priv, priv->ioaddr, chan);
1680}
1681
1682/**
1683 * stmmac_start_tx_dma - start TX DMA channel
1684 * @priv: driver private structure
1685 * @chan: TX channel index
1686 * Description:
1687 * This starts a TX DMA channel
1688 */
1689static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1690{
1691        netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1692        stmmac_start_tx(priv, priv->ioaddr, chan);
1693}
1694
1695/**
1696 * stmmac_stop_rx_dma - stop RX DMA channel
1697 * @priv: driver private structure
1698 * @chan: RX channel index
1699 * Description:
1700 * This stops a RX DMA channel
1701 */
1702static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1703{
1704        netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1705        stmmac_stop_rx(priv, priv->ioaddr, chan);
1706}
1707
1708/**
1709 * stmmac_stop_tx_dma - stop TX DMA channel
1710 * @priv: driver private structure
1711 * @chan: TX channel index
1712 * Description:
1713 * This stops a TX DMA channel
1714 */
1715static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1716{
1717        netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1718        stmmac_stop_tx(priv, priv->ioaddr, chan);
1719}
1720
1721/**
1722 * stmmac_start_all_dma - start all RX and TX DMA channels
1723 * @priv: driver private structure
1724 * Description:
1725 * This starts all the RX and TX DMA channels
1726 */
1727static void stmmac_start_all_dma(struct stmmac_priv *priv)
1728{
1729        u32 rx_channels_count = priv->plat->rx_queues_to_use;
1730        u32 tx_channels_count = priv->plat->tx_queues_to_use;
1731        u32 chan = 0;
1732
1733        for (chan = 0; chan < rx_channels_count; chan++)
1734                stmmac_start_rx_dma(priv, chan);
1735
1736        for (chan = 0; chan < tx_channels_count; chan++)
1737                stmmac_start_tx_dma(priv, chan);
1738}
1739
1740/**
1741 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1742 * @priv: driver private structure
1743 * Description:
1744 * This stops the RX and TX DMA channels
1745 */
1746static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1747{
1748        u32 rx_channels_count = priv->plat->rx_queues_to_use;
1749        u32 tx_channels_count = priv->plat->tx_queues_to_use;
1750        u32 chan = 0;
1751
1752        for (chan = 0; chan < rx_channels_count; chan++)
1753                stmmac_stop_rx_dma(priv, chan);
1754
1755        for (chan = 0; chan < tx_channels_count; chan++)
1756                stmmac_stop_tx_dma(priv, chan);
1757}
1758
1759/**
1760 *  stmmac_dma_operation_mode - HW DMA operation mode
1761 *  @priv: driver private structure
1762 *  Description: it is used for configuring the DMA operation mode register in
1763 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1764 */
1765static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1766{
1767        u32 rx_channels_count = priv->plat->rx_queues_to_use;
1768        u32 tx_channels_count = priv->plat->tx_queues_to_use;
1769        int rxfifosz = priv->plat->rx_fifo_size;
1770        int txfifosz = priv->plat->tx_fifo_size;
1771        u32 txmode = 0;
1772        u32 rxmode = 0;
1773        u32 chan = 0;
1774        u8 qmode = 0;
1775
1776        if (rxfifosz == 0)
1777                rxfifosz = priv->dma_cap.rx_fifo_size;
1778        if (txfifosz == 0)
1779                txfifosz = priv->dma_cap.tx_fifo_size;
1780
1781        /* Adjust for real per queue fifo size */
1782        rxfifosz /= rx_channels_count;
1783        txfifosz /= tx_channels_count;
1784
1785        if (priv->plat->force_thresh_dma_mode) {
1786                txmode = tc;
1787                rxmode = tc;
1788        } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1789                /*
1790                 * In case of GMAC, SF mode can be enabled
1791                 * to perform the TX COE in HW. This depends on:
1792                 * 1) TX COE if actually supported
1793                 * 2) There is no bugged Jumbo frame support
1794                 *    that needs to not insert csum in the TDES.
1795                 */
1796                txmode = SF_DMA_MODE;
1797                rxmode = SF_DMA_MODE;
1798                priv->xstats.threshold = SF_DMA_MODE;
1799        } else {
1800                txmode = tc;
1801                rxmode = SF_DMA_MODE;
1802        }
1803
1804        /* configure all channels */
1805        for (chan = 0; chan < rx_channels_count; chan++) {
1806                qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1807
1808                stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1809                                rxfifosz, qmode);
1810                stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1811                                chan);
1812        }
1813
1814        for (chan = 0; chan < tx_channels_count; chan++) {
1815                qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1816
1817                stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1818                                txfifosz, qmode);
1819        }
1820}
1821
1822/**
1823 * stmmac_tx_clean - to manage the transmission completion
1824 * @priv: driver private structure
1825 * @queue: TX queue index
1826 * Description: it reclaims the transmit resources after transmission completes.
1827 */
1828static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1829{
1830        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1831        unsigned int bytes_compl = 0, pkts_compl = 0;
1832        unsigned int entry;
1833
1834        netif_tx_lock(priv->dev);
1835
1836        priv->xstats.tx_clean++;
1837
1838        entry = tx_q->dirty_tx;
1839        while (entry != tx_q->cur_tx) {
1840                struct sk_buff *skb = tx_q->tx_skbuff[entry];
1841                struct dma_desc *p;
1842                int status;
1843
1844                if (priv->extend_desc)
1845                        p = (struct dma_desc *)(tx_q->dma_etx + entry);
1846                else
1847                        p = tx_q->dma_tx + entry;
1848
1849                status = stmmac_tx_status(priv, &priv->dev->stats,
1850                                &priv->xstats, p, priv->ioaddr);
1851                /* Check if the descriptor is owned by the DMA */
1852                if (unlikely(status & tx_dma_own))
1853                        break;
1854
1855                /* Make sure descriptor fields are read after reading
1856                 * the own bit.
1857                 */
1858                dma_rmb();
1859
1860                /* Just consider the last segment and ...*/
1861                if (likely(!(status & tx_not_ls))) {
1862                        /* ... verify the status error condition */
1863                        if (unlikely(status & tx_err)) {
1864                                priv->dev->stats.tx_errors++;
1865                        } else {
1866                                priv->dev->stats.tx_packets++;
1867                                priv->xstats.tx_pkt_n++;
1868                        }
1869                        stmmac_get_tx_hwtstamp(priv, p, skb);
1870                }
1871
1872                if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1873                        if (tx_q->tx_skbuff_dma[entry].map_as_page)
1874                                dma_unmap_page(priv->device,
1875                                               tx_q->tx_skbuff_dma[entry].buf,
1876                                               tx_q->tx_skbuff_dma[entry].len,
1877                                               DMA_TO_DEVICE);
1878                        else
1879                                dma_unmap_single(priv->device,
1880                                                 tx_q->tx_skbuff_dma[entry].buf,
1881                                                 tx_q->tx_skbuff_dma[entry].len,
1882                                                 DMA_TO_DEVICE);
1883                        tx_q->tx_skbuff_dma[entry].buf = 0;
1884                        tx_q->tx_skbuff_dma[entry].len = 0;
1885                        tx_q->tx_skbuff_dma[entry].map_as_page = false;
1886                }
1887
1888                stmmac_clean_desc3(priv, tx_q, p);
1889
1890                tx_q->tx_skbuff_dma[entry].last_segment = false;
1891                tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1892
1893                if (likely(skb != NULL)) {
1894                        pkts_compl++;
1895                        bytes_compl += skb->len;
1896                        dev_consume_skb_any(skb);
1897                        tx_q->tx_skbuff[entry] = NULL;
1898                }
1899
1900                stmmac_release_tx_desc(priv, p, priv->mode);
1901
1902                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1903        }
1904        tx_q->dirty_tx = entry;
1905
1906        netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1907                                  pkts_compl, bytes_compl);
1908
1909        if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1910                                                                queue))) &&
1911            stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1912
1913                netif_dbg(priv, tx_done, priv->dev,
1914                          "%s: restart transmit\n", __func__);
1915                netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1916        }
1917
1918        if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1919                stmmac_enable_eee_mode(priv);
1920                mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1921        }
1922        netif_tx_unlock(priv->dev);
1923}
1924
1925/**
1926 * stmmac_tx_err - to manage the tx error
1927 * @priv: driver private structure
1928 * @chan: channel index
1929 * Description: it cleans the descriptors and restarts the transmission
1930 * in case of transmission errors.
1931 */
1932static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1933{
1934        struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1935        int i;
1936
1937        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1938
1939        stmmac_stop_tx_dma(priv, chan);
1940        dma_free_tx_skbufs(priv, chan);
1941        for (i = 0; i < DMA_TX_SIZE; i++)
1942                if (priv->extend_desc)
1943                        stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1944                                        priv->mode, (i == DMA_TX_SIZE - 1));
1945                else
1946                        stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1947                                        priv->mode, (i == DMA_TX_SIZE - 1));
1948        tx_q->dirty_tx = 0;
1949        tx_q->cur_tx = 0;
1950        tx_q->mss = 0;
1951        netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1952        stmmac_start_tx_dma(priv, chan);
1953
1954        priv->dev->stats.tx_errors++;
1955        netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1956}
1957
1958/**
1959 *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1960 *  @priv: driver private structure
1961 *  @txmode: TX operating mode
1962 *  @rxmode: RX operating mode
1963 *  @chan: channel index
1964 *  Description: it is used for configuring of the DMA operation mode in
1965 *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1966 *  mode.
1967 */
1968static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1969                                          u32 rxmode, u32 chan)
1970{
1971        u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1972        u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1973        u32 rx_channels_count = priv->plat->rx_queues_to_use;
1974        u32 tx_channels_count = priv->plat->tx_queues_to_use;
1975        int rxfifosz = priv->plat->rx_fifo_size;
1976        int txfifosz = priv->plat->tx_fifo_size;
1977
1978        if (rxfifosz == 0)
1979                rxfifosz = priv->dma_cap.rx_fifo_size;
1980        if (txfifosz == 0)
1981                txfifosz = priv->dma_cap.tx_fifo_size;
1982
1983        /* Adjust for real per queue fifo size */
1984        rxfifosz /= rx_channels_count;
1985        txfifosz /= tx_channels_count;
1986
1987        stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
1988        stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
1989}
1990
1991static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
1992{
1993        int ret;
1994
1995        ret = stmmac_safety_feat_irq_status(priv, priv->dev,
1996                        priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
1997        if (ret && (ret != -EINVAL)) {
1998                stmmac_global_err(priv);
1999                return true;
2000        }
2001
2002        return false;
2003}
2004
2005/**
2006 * stmmac_dma_interrupt - DMA ISR
2007 * @priv: driver private structure
2008 * Description: this is the DMA ISR. It is called by the main ISR.
2009 * It calls the dwmac dma routine and schedule poll method in case of some
2010 * work can be done.
2011 */
2012static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2013{
2014        u32 tx_channel_count = priv->plat->tx_queues_to_use;
2015        u32 rx_channel_count = priv->plat->rx_queues_to_use;
2016        u32 channels_to_check = tx_channel_count > rx_channel_count ?
2017                                tx_channel_count : rx_channel_count;
2018        u32 chan;
2019        bool poll_scheduled = false;
2020        int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2021
2022        /* Make sure we never check beyond our status buffer. */
2023        if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2024                channels_to_check = ARRAY_SIZE(status);
2025
2026        /* Each DMA channel can be used for rx and tx simultaneously, yet
2027         * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2028         * stmmac_channel struct.
2029         * Because of this, stmmac_poll currently checks (and possibly wakes)
2030         * all tx queues rather than just a single tx queue.
2031         */
2032        for (chan = 0; chan < channels_to_check; chan++)
2033                status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2034                                &priv->xstats, chan);
2035
2036        for (chan = 0; chan < rx_channel_count; chan++) {
2037                if (likely(status[chan] & handle_rx)) {
2038                        struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2039
2040                        if (likely(napi_schedule_prep(&rx_q->napi))) {
2041                                stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2042                                __napi_schedule(&rx_q->napi);
2043                                poll_scheduled = true;
2044                        }
2045                }
2046        }
2047
2048        /* If we scheduled poll, we already know that tx queues will be checked.
2049         * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2050         * completed transmission, if so, call stmmac_poll (once).
2051         */
2052        if (!poll_scheduled) {
2053                for (chan = 0; chan < tx_channel_count; chan++) {
2054                        if (status[chan] & handle_tx) {
2055                                /* It doesn't matter what rx queue we choose
2056                                 * here. We use 0 since it always exists.
2057                                 */
2058                                struct stmmac_rx_queue *rx_q =
2059                                        &priv->rx_queue[0];
2060
2061                                if (likely(napi_schedule_prep(&rx_q->napi))) {
2062                                        stmmac_disable_dma_irq(priv,
2063                                                        priv->ioaddr, chan);
2064                                        __napi_schedule(&rx_q->napi);
2065                                }
2066                                break;
2067                        }
2068                }
2069        }
2070
2071        for (chan = 0; chan < tx_channel_count; chan++) {
2072                if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2073                        /* Try to bump up the dma threshold on this failure */
2074                        if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2075                            (tc <= 256)) {
2076                                tc += 64;
2077                                if (priv->plat->force_thresh_dma_mode)
2078                                        stmmac_set_dma_operation_mode(priv,
2079                                                                      tc,
2080                                                                      tc,
2081                                                                      chan);
2082                                else
2083                                        stmmac_set_dma_operation_mode(priv,
2084                                                                    tc,
2085                                                                    SF_DMA_MODE,
2086                                                                    chan);
2087                                priv->xstats.threshold = tc;
2088                        }
2089                } else if (unlikely(status[chan] == tx_hard_error)) {
2090                        stmmac_tx_err(priv, chan);
2091                }
2092        }
2093}
2094
2095/**
2096 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2097 * @priv: driver private structure
2098 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2099 */
2100static void stmmac_mmc_setup(struct stmmac_priv *priv)
2101{
2102        unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2103                            MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2104
2105        dwmac_mmc_intr_all_mask(priv->mmcaddr);
2106
2107        if (priv->dma_cap.rmon) {
2108                dwmac_mmc_ctrl(priv->mmcaddr, mode);
2109                memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2110        } else
2111                netdev_info(priv->dev, "No MAC Management Counters available\n");
2112}
2113
2114/**
2115 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2116 * @priv: driver private structure
2117 * Description:
2118 *  new GMAC chip generations have a new register to indicate the
2119 *  presence of the optional feature/functions.
2120 *  This can be also used to override the value passed through the
2121 *  platform and necessary for old MAC10/100 and GMAC chips.
2122 */
2123static int stmmac_get_hw_features(struct stmmac_priv *priv)
2124{
2125        return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2126}
2127
2128/**
2129 * stmmac_check_ether_addr - check if the MAC addr is valid
2130 * @priv: driver private structure
2131 * Description:
2132 * it is to verify if the MAC address is valid, in case of failures it
2133 * generates a random MAC address
2134 */
2135static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2136{
2137        if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2138                stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2139                if (!is_valid_ether_addr(priv->dev->dev_addr))
2140                        eth_hw_addr_random(priv->dev);
2141                netdev_info(priv->dev, "device MAC address %pM\n",
2142                            priv->dev->dev_addr);
2143        }
2144}
2145
2146/**
2147 * stmmac_init_dma_engine - DMA init.
2148 * @priv: driver private structure
2149 * Description:
2150 * It inits the DMA invoking the specific MAC/GMAC callback.
2151 * Some DMA parameters can be passed from the platform;
2152 * in case of these are not passed a default is kept for the MAC or GMAC.
2153 */
2154static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2155{
2156        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2157        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2158        u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2159        struct stmmac_rx_queue *rx_q;
2160        struct stmmac_tx_queue *tx_q;
2161        u32 chan = 0;
2162        int atds = 0;
2163        int ret = 0;
2164
2165        if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2166                dev_err(priv->device, "Invalid DMA configuration\n");
2167                return -EINVAL;
2168        }
2169
2170        if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2171                atds = 1;
2172
2173        ret = stmmac_reset(priv, priv->ioaddr);
2174        if (ret) {
2175                dev_err(priv->device, "Failed to reset the dma\n");
2176                return ret;
2177        }
2178
2179        /* DMA RX Channel Configuration */
2180        for (chan = 0; chan < rx_channels_count; chan++) {
2181                rx_q = &priv->rx_queue[chan];
2182
2183                stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2184                                    rx_q->dma_rx_phy, chan);
2185
2186                rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2187                            (DMA_RX_SIZE * sizeof(struct dma_desc));
2188                stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2189                                       rx_q->rx_tail_addr, chan);
2190        }
2191
2192        /* DMA TX Channel Configuration */
2193        for (chan = 0; chan < tx_channels_count; chan++) {
2194                tx_q = &priv->tx_queue[chan];
2195
2196                stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2197                                    tx_q->dma_tx_phy, chan);
2198
2199                tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2200                            (DMA_TX_SIZE * sizeof(struct dma_desc));
2201                stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2202                                       tx_q->tx_tail_addr, chan);
2203        }
2204
2205        /* DMA CSR Channel configuration */
2206        for (chan = 0; chan < dma_csr_ch; chan++)
2207                stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2208
2209        /* DMA Configuration */
2210        stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2211
2212        if (priv->plat->axi)
2213                stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2214
2215        return ret;
2216}
2217
2218/**
2219 * stmmac_tx_timer - mitigation sw timer for tx.
2220 * @data: data pointer
2221 * Description:
2222 * This is the timer handler to directly invoke the stmmac_tx_clean.
2223 */
2224static void stmmac_tx_timer(struct timer_list *t)
2225{
2226        struct stmmac_priv *priv = from_timer(priv, t, txtimer);
2227        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2228        u32 queue;
2229
2230        /* let's scan all the tx queues */
2231        for (queue = 0; queue < tx_queues_count; queue++)
2232                stmmac_tx_clean(priv, queue);
2233}
2234
2235/**
2236 * stmmac_init_tx_coalesce - init tx mitigation options.
2237 * @priv: driver private structure
2238 * Description:
2239 * This inits the transmit coalesce parameters: i.e. timer rate,
2240 * timer handler and default threshold used for enabling the
2241 * interrupt on completion bit.
2242 */
2243static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2244{
2245        priv->tx_coal_frames = STMMAC_TX_FRAMES;
2246        priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2247        timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
2248        priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2249        add_timer(&priv->txtimer);
2250}
2251
2252static void stmmac_set_rings_length(struct stmmac_priv *priv)
2253{
2254        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2255        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2256        u32 chan;
2257
2258        /* set TX ring length */
2259        for (chan = 0; chan < tx_channels_count; chan++)
2260                stmmac_set_tx_ring_len(priv, priv->ioaddr,
2261                                (DMA_TX_SIZE - 1), chan);
2262
2263        /* set RX ring length */
2264        for (chan = 0; chan < rx_channels_count; chan++)
2265                stmmac_set_rx_ring_len(priv, priv->ioaddr,
2266                                (DMA_RX_SIZE - 1), chan);
2267}
2268
2269/**
2270 *  stmmac_set_tx_queue_weight - Set TX queue weight
2271 *  @priv: driver private structure
2272 *  Description: It is used for setting TX queues weight
2273 */
2274static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2275{
2276        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2277        u32 weight;
2278        u32 queue;
2279
2280        for (queue = 0; queue < tx_queues_count; queue++) {
2281                weight = priv->plat->tx_queues_cfg[queue].weight;
2282                stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2283        }
2284}
2285
2286/**
2287 *  stmmac_configure_cbs - Configure CBS in TX queue
2288 *  @priv: driver private structure
2289 *  Description: It is used for configuring CBS in AVB TX queues
2290 */
2291static void stmmac_configure_cbs(struct stmmac_priv *priv)
2292{
2293        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2294        u32 mode_to_use;
2295        u32 queue;
2296
2297        /* queue 0 is reserved for legacy traffic */
2298        for (queue = 1; queue < tx_queues_count; queue++) {
2299                mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2300                if (mode_to_use == MTL_QUEUE_DCB)
2301                        continue;
2302
2303                stmmac_config_cbs(priv, priv->hw,
2304                                priv->plat->tx_queues_cfg[queue].send_slope,
2305                                priv->plat->tx_queues_cfg[queue].idle_slope,
2306                                priv->plat->tx_queues_cfg[queue].high_credit,
2307                                priv->plat->tx_queues_cfg[queue].low_credit,
2308                                queue);
2309        }
2310}
2311
2312/**
2313 *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2314 *  @priv: driver private structure
2315 *  Description: It is used for mapping RX queues to RX dma channels
2316 */
2317static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2318{
2319        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2320        u32 queue;
2321        u32 chan;
2322
2323        for (queue = 0; queue < rx_queues_count; queue++) {
2324                chan = priv->plat->rx_queues_cfg[queue].chan;
2325                stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2326        }
2327}
2328
2329/**
2330 *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2331 *  @priv: driver private structure
2332 *  Description: It is used for configuring the RX Queue Priority
2333 */
2334static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2335{
2336        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2337        u32 queue;
2338        u32 prio;
2339
2340        for (queue = 0; queue < rx_queues_count; queue++) {
2341                if (!priv->plat->rx_queues_cfg[queue].use_prio)
2342                        continue;
2343
2344                prio = priv->plat->rx_queues_cfg[queue].prio;
2345                stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2346        }
2347}
2348
2349/**
2350 *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2351 *  @priv: driver private structure
2352 *  Description: It is used for configuring the TX Queue Priority
2353 */
2354static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2355{
2356        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2357        u32 queue;
2358        u32 prio;
2359
2360        for (queue = 0; queue < tx_queues_count; queue++) {
2361                if (!priv->plat->tx_queues_cfg[queue].use_prio)
2362                        continue;
2363
2364                prio = priv->plat->tx_queues_cfg[queue].prio;
2365                stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2366        }
2367}
2368
2369/**
2370 *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2371 *  @priv: driver private structure
2372 *  Description: It is used for configuring the RX queue routing
2373 */
2374static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2375{
2376        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2377        u32 queue;
2378        u8 packet;
2379
2380        for (queue = 0; queue < rx_queues_count; queue++) {
2381                /* no specific packet type routing specified for the queue */
2382                if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2383                        continue;
2384
2385                packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2386                stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2387        }
2388}
2389
2390/**
2391 *  stmmac_mtl_configuration - Configure MTL
2392 *  @priv: driver private structure
2393 *  Description: It is used for configurring MTL
2394 */
2395static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2396{
2397        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2398        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2399
2400        if (tx_queues_count > 1)
2401                stmmac_set_tx_queue_weight(priv);
2402
2403        /* Configure MTL RX algorithms */
2404        if (rx_queues_count > 1)
2405                stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2406                                priv->plat->rx_sched_algorithm);
2407
2408        /* Configure MTL TX algorithms */
2409        if (tx_queues_count > 1)
2410                stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2411                                priv->plat->tx_sched_algorithm);
2412
2413        /* Configure CBS in AVB TX queues */
2414        if (tx_queues_count > 1)
2415                stmmac_configure_cbs(priv);
2416
2417        /* Map RX MTL to DMA channels */
2418        stmmac_rx_queue_dma_chan_map(priv);
2419
2420        /* Enable MAC RX Queues */
2421        stmmac_mac_enable_rx_queues(priv);
2422
2423        /* Set RX priorities */
2424        if (rx_queues_count > 1)
2425                stmmac_mac_config_rx_queues_prio(priv);
2426
2427        /* Set TX priorities */
2428        if (tx_queues_count > 1)
2429                stmmac_mac_config_tx_queues_prio(priv);
2430
2431        /* Set RX routing */
2432        if (rx_queues_count > 1)
2433                stmmac_mac_config_rx_queues_routing(priv);
2434}
2435
2436static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2437{
2438        if (priv->dma_cap.asp) {
2439                netdev_info(priv->dev, "Enabling Safety Features\n");
2440                stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2441        } else {
2442                netdev_info(priv->dev, "No Safety Features support found\n");
2443        }
2444}
2445
2446/**
2447 * stmmac_hw_setup - setup mac in a usable state.
2448 *  @dev : pointer to the device structure.
2449 *  Description:
2450 *  this is the main function to setup the HW in a usable state because the
2451 *  dma engine is reset, the core registers are configured (e.g. AXI,
2452 *  Checksum features, timers). The DMA is ready to start receiving and
2453 *  transmitting.
2454 *  Return value:
2455 *  0 on success and an appropriate (-)ve integer as defined in errno.h
2456 *  file on failure.
2457 */
2458static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2459{
2460        struct stmmac_priv *priv = netdev_priv(dev);
2461        u32 rx_cnt = priv->plat->rx_queues_to_use;
2462        u32 tx_cnt = priv->plat->tx_queues_to_use;
2463        u32 chan;
2464        int ret;
2465
2466        /* DMA initialization and SW reset */
2467        ret = stmmac_init_dma_engine(priv);
2468        if (ret < 0) {
2469                netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2470                           __func__);
2471                return ret;
2472        }
2473
2474        /* Copy the MAC addr into the HW  */
2475        stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2476
2477        /* PS and related bits will be programmed according to the speed */
2478        if (priv->hw->pcs) {
2479                int speed = priv->plat->mac_port_sel_speed;
2480
2481                if ((speed == SPEED_10) || (speed == SPEED_100) ||
2482                    (speed == SPEED_1000)) {
2483                        priv->hw->ps = speed;
2484                } else {
2485                        dev_warn(priv->device, "invalid port speed\n");
2486                        priv->hw->ps = 0;
2487                }
2488        }
2489
2490        /* Initialize the MAC Core */
2491        stmmac_core_init(priv, priv->hw, dev);
2492
2493        /* Initialize MTL*/
2494        stmmac_mtl_configuration(priv);
2495
2496        /* Initialize Safety Features */
2497        stmmac_safety_feat_configuration(priv);
2498
2499        ret = stmmac_rx_ipc(priv, priv->hw);
2500        if (!ret) {
2501                netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2502                priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2503                priv->hw->rx_csum = 0;
2504        }
2505
2506        /* Enable the MAC Rx/Tx */
2507        stmmac_mac_set(priv, priv->ioaddr, true);
2508
2509        /* Set the HW DMA mode and the COE */
2510        stmmac_dma_operation_mode(priv);
2511
2512        stmmac_mmc_setup(priv);
2513
2514        if (init_ptp) {
2515                ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2516                if (ret < 0)
2517                        netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2518
2519                ret = stmmac_init_ptp(priv);
2520                if (ret == -EOPNOTSUPP)
2521                        netdev_warn(priv->dev, "PTP not supported by HW\n");
2522                else if (ret)
2523                        netdev_warn(priv->dev, "PTP init failed\n");
2524        }
2525
2526#ifdef CONFIG_DEBUG_FS
2527        ret = stmmac_init_fs(dev);
2528        if (ret < 0)
2529                netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2530                            __func__);
2531#endif
2532        /* Start the ball rolling... */
2533        stmmac_start_all_dma(priv);
2534
2535        priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2536
2537        if (priv->use_riwt) {
2538                ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2539                if (!ret)
2540                        priv->rx_riwt = MAX_DMA_RIWT;
2541        }
2542
2543        if (priv->hw->pcs)
2544                stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2545
2546        /* set TX and RX rings length */
2547        stmmac_set_rings_length(priv);
2548
2549        /* Enable TSO */
2550        if (priv->tso) {
2551                for (chan = 0; chan < tx_cnt; chan++)
2552                        stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2553        }
2554
2555        return 0;
2556}
2557
2558static void stmmac_hw_teardown(struct net_device *dev)
2559{
2560        struct stmmac_priv *priv = netdev_priv(dev);
2561
2562        clk_disable_unprepare(priv->plat->clk_ptp_ref);
2563}
2564
2565/**
2566 *  stmmac_open - open entry point of the driver
2567 *  @dev : pointer to the device structure.
2568 *  Description:
2569 *  This function is the open entry point of the driver.
2570 *  Return value:
2571 *  0 on success and an appropriate (-)ve integer as defined in errno.h
2572 *  file on failure.
2573 */
2574static int stmmac_open(struct net_device *dev)
2575{
2576        struct stmmac_priv *priv = netdev_priv(dev);
2577        int ret;
2578
2579        stmmac_check_ether_addr(priv);
2580
2581        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2582            priv->hw->pcs != STMMAC_PCS_TBI &&
2583            priv->hw->pcs != STMMAC_PCS_RTBI) {
2584                ret = stmmac_init_phy(dev);
2585                if (ret) {
2586                        netdev_err(priv->dev,
2587                                   "%s: Cannot attach to PHY (error: %d)\n",
2588                                   __func__, ret);
2589                        return ret;
2590                }
2591        }
2592
2593        /* Extra statistics */
2594        memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2595        priv->xstats.threshold = tc;
2596
2597        priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2598        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2599
2600        ret = alloc_dma_desc_resources(priv);
2601        if (ret < 0) {
2602                netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2603                           __func__);
2604                goto dma_desc_error;
2605        }
2606
2607        ret = init_dma_desc_rings(dev, GFP_KERNEL);
2608        if (ret < 0) {
2609                netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2610                           __func__);
2611                goto init_error;
2612        }
2613
2614        ret = stmmac_hw_setup(dev, true);
2615        if (ret < 0) {
2616                netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2617                goto init_error;
2618        }
2619
2620        stmmac_init_tx_coalesce(priv);
2621
2622        if (dev->phydev)
2623                phy_start(dev->phydev);
2624
2625        /* Request the IRQ lines */
2626        ret = request_irq(dev->irq, stmmac_interrupt,
2627                          IRQF_SHARED, dev->name, dev);
2628        if (unlikely(ret < 0)) {
2629                netdev_err(priv->dev,
2630                           "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2631                           __func__, dev->irq, ret);
2632                goto irq_error;
2633        }
2634
2635        /* Request the Wake IRQ in case of another line is used for WoL */
2636        if (priv->wol_irq != dev->irq) {
2637                ret = request_irq(priv->wol_irq, stmmac_interrupt,
2638                                  IRQF_SHARED, dev->name, dev);
2639                if (unlikely(ret < 0)) {
2640                        netdev_err(priv->dev,
2641                                   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2642                                   __func__, priv->wol_irq, ret);
2643                        goto wolirq_error;
2644                }
2645        }
2646
2647        /* Request the IRQ lines */
2648        if (priv->lpi_irq > 0) {
2649                ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2650                                  dev->name, dev);
2651                if (unlikely(ret < 0)) {
2652                        netdev_err(priv->dev,
2653                                   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2654                                   __func__, priv->lpi_irq, ret);
2655                        goto lpiirq_error;
2656                }
2657        }
2658
2659        stmmac_enable_all_queues(priv);
2660        stmmac_start_all_queues(priv);
2661
2662        return 0;
2663
2664lpiirq_error:
2665        if (priv->wol_irq != dev->irq)
2666                free_irq(priv->wol_irq, dev);
2667wolirq_error:
2668        free_irq(dev->irq, dev);
2669irq_error:
2670        if (dev->phydev)
2671                phy_stop(dev->phydev);
2672
2673        del_timer_sync(&priv->txtimer);
2674        stmmac_hw_teardown(dev);
2675init_error:
2676        free_dma_desc_resources(priv);
2677dma_desc_error:
2678        if (dev->phydev)
2679                phy_disconnect(dev->phydev);
2680
2681        return ret;
2682}
2683
2684/**
2685 *  stmmac_release - close entry point of the driver
2686 *  @dev : device pointer.
2687 *  Description:
2688 *  This is the stop entry point of the driver.
2689 */
2690static int stmmac_release(struct net_device *dev)
2691{
2692        struct stmmac_priv *priv = netdev_priv(dev);
2693
2694        if (priv->eee_enabled)
2695                del_timer_sync(&priv->eee_ctrl_timer);
2696
2697        /* Stop and disconnect the PHY */
2698        if (dev->phydev) {
2699                phy_stop(dev->phydev);
2700                phy_disconnect(dev->phydev);
2701        }
2702
2703        stmmac_stop_all_queues(priv);
2704
2705        stmmac_disable_all_queues(priv);
2706
2707        del_timer_sync(&priv->txtimer);
2708
2709        /* Free the IRQ lines */
2710        free_irq(dev->irq, dev);
2711        if (priv->wol_irq != dev->irq)
2712                free_irq(priv->wol_irq, dev);
2713        if (priv->lpi_irq > 0)
2714                free_irq(priv->lpi_irq, dev);
2715
2716        /* Stop TX/RX DMA and clear the descriptors */
2717        stmmac_stop_all_dma(priv);
2718
2719        /* Release and free the Rx/Tx resources */
2720        free_dma_desc_resources(priv);
2721
2722        /* Disable the MAC Rx/Tx */
2723        stmmac_mac_set(priv, priv->ioaddr, false);
2724
2725        netif_carrier_off(dev);
2726
2727#ifdef CONFIG_DEBUG_FS
2728        stmmac_exit_fs(dev);
2729#endif
2730
2731        stmmac_release_ptp(priv);
2732
2733        return 0;
2734}
2735
2736/**
2737 *  stmmac_tso_allocator - close entry point of the driver
2738 *  @priv: driver private structure
2739 *  @des: buffer start address
2740 *  @total_len: total length to fill in descriptors
2741 *  @last_segmant: condition for the last descriptor
2742 *  @queue: TX queue index
2743 *  Description:
2744 *  This function fills descriptor and request new descriptors according to
2745 *  buffer length to fill
2746 */
2747static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2748                                 int total_len, bool last_segment, u32 queue)
2749{
2750        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2751        struct dma_desc *desc;
2752        u32 buff_size;
2753        int tmp_len;
2754
2755        tmp_len = total_len;
2756
2757        while (tmp_len > 0) {
2758                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2759                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2760                desc = tx_q->dma_tx + tx_q->cur_tx;
2761
2762                desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2763                buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2764                            TSO_MAX_BUFF_SIZE : tmp_len;
2765
2766                stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2767                                0, 1,
2768                                (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2769                                0, 0);
2770
2771                tmp_len -= TSO_MAX_BUFF_SIZE;
2772        }
2773}
2774
2775/**
2776 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2777 *  @skb : the socket buffer
2778 *  @dev : device pointer
2779 *  Description: this is the transmit function that is called on TSO frames
2780 *  (support available on GMAC4 and newer chips).
2781 *  Diagram below show the ring programming in case of TSO frames:
2782 *
2783 *  First Descriptor
2784 *   --------
2785 *   | DES0 |---> buffer1 = L2/L3/L4 header
2786 *   | DES1 |---> TCP Payload (can continue on next descr...)
2787 *   | DES2 |---> buffer 1 and 2 len
2788 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2789 *   --------
2790 *      |
2791 *     ...
2792 *      |
2793 *   --------
2794 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2795 *   | DES1 | --|
2796 *   | DES2 | --> buffer 1 and 2 len
2797 *   | DES3 |
2798 *   --------
2799 *
2800 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2801 */
2802static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2803{
2804        struct dma_desc *desc, *first, *mss_desc = NULL;
2805        struct stmmac_priv *priv = netdev_priv(dev);
2806        int nfrags = skb_shinfo(skb)->nr_frags;
2807        u32 queue = skb_get_queue_mapping(skb);
2808        unsigned int first_entry, des;
2809        struct stmmac_tx_queue *tx_q;
2810        int tmp_pay_len = 0;
2811        u32 pay_len, mss;
2812        u8 proto_hdr_len;
2813        int i;
2814
2815        tx_q = &priv->tx_queue[queue];
2816
2817        /* Compute header lengths */
2818        proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2819
2820        /* Desc availability based on threshold should be enough safe */
2821        if (unlikely(stmmac_tx_avail(priv, queue) <
2822                (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2823                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2824                        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2825                                                                queue));
2826                        /* This is a hard error, log it. */
2827                        netdev_err(priv->dev,
2828                                   "%s: Tx Ring full when queue awake\n",
2829                                   __func__);
2830                }
2831                return NETDEV_TX_BUSY;
2832        }
2833
2834        pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2835
2836        mss = skb_shinfo(skb)->gso_size;
2837
2838        /* set new MSS value if needed */
2839        if (mss != tx_q->mss) {
2840                mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2841                stmmac_set_mss(priv, mss_desc, mss);
2842                tx_q->mss = mss;
2843                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2844                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2845        }
2846
2847        if (netif_msg_tx_queued(priv)) {
2848                pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2849                        __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2850                pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2851                        skb->data_len);
2852        }
2853
2854        first_entry = tx_q->cur_tx;
2855        WARN_ON(tx_q->tx_skbuff[first_entry]);
2856
2857        desc = tx_q->dma_tx + first_entry;
2858        first = desc;
2859
2860        /* first descriptor: fill Headers on Buf1 */
2861        des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2862                             DMA_TO_DEVICE);
2863        if (dma_mapping_error(priv->device, des))
2864                goto dma_map_err;
2865
2866        tx_q->tx_skbuff_dma[first_entry].buf = des;
2867        tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2868
2869        first->des0 = cpu_to_le32(des);
2870
2871        /* Fill start of payload in buff2 of first descriptor */
2872        if (pay_len)
2873                first->des1 = cpu_to_le32(des + proto_hdr_len);
2874
2875        /* If needed take extra descriptors to fill the remaining payload */
2876        tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2877
2878        stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2879
2880        /* Prepare fragments */
2881        for (i = 0; i < nfrags; i++) {
2882                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2883
2884                des = skb_frag_dma_map(priv->device, frag, 0,
2885                                       skb_frag_size(frag),
2886                                       DMA_TO_DEVICE);
2887                if (dma_mapping_error(priv->device, des))
2888                        goto dma_map_err;
2889
2890                stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2891                                     (i == nfrags - 1), queue);
2892
2893                tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2894                tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2895                tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2896        }
2897
2898        tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2899
2900        /* Only the last descriptor gets to point to the skb. */
2901        tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2902
2903        /* We've used all descriptors we need for this skb, however,
2904         * advance cur_tx so that it references a fresh descriptor.
2905         * ndo_start_xmit will fill this descriptor the next time it's
2906         * called and stmmac_tx_clean may clean up to this descriptor.
2907         */
2908        tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2909
2910        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2911                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2912                          __func__);
2913                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2914        }
2915
2916        dev->stats.tx_bytes += skb->len;
2917        priv->xstats.tx_tso_frames++;
2918        priv->xstats.tx_tso_nfrags += nfrags;
2919
2920        /* Manage tx mitigation */
2921        priv->tx_count_frames += nfrags + 1;
2922        if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2923                mod_timer(&priv->txtimer,
2924                          STMMAC_COAL_TIMER(priv->tx_coal_timer));
2925        } else {
2926                priv->tx_count_frames = 0;
2927                stmmac_set_tx_ic(priv, desc);
2928                priv->xstats.tx_set_ic_bit++;
2929        }
2930
2931        skb_tx_timestamp(skb);
2932
2933        if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2934                     priv->hwts_tx_en)) {
2935                /* declare that device is doing timestamping */
2936                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2937                stmmac_enable_tx_timestamp(priv, first);
2938        }
2939
2940        /* Complete the first descriptor before granting the DMA */
2941        stmmac_prepare_tso_tx_desc(priv, first, 1,
2942                        proto_hdr_len,
2943                        pay_len,
2944                        1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2945                        tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2946
2947        /* If context desc is used to change MSS */
2948        if (mss_desc) {
2949                /* Make sure that first descriptor has been completely
2950                 * written, including its own bit. This is because MSS is
2951                 * actually before first descriptor, so we need to make
2952                 * sure that MSS's own bit is the last thing written.
2953                 */
2954                dma_wmb();
2955                stmmac_set_tx_owner(priv, mss_desc);
2956        }
2957
2958        /* The own bit must be the latest setting done when prepare the
2959         * descriptor and then barrier is needed to make sure that
2960         * all is coherent before granting the DMA engine.
2961         */
2962        wmb();
2963
2964        if (netif_msg_pktdata(priv)) {
2965                pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2966                        __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2967                        tx_q->cur_tx, first, nfrags);
2968
2969                stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2970
2971                pr_info(">>> frame to be transmitted: ");
2972                print_pkt(skb->data, skb_headlen(skb));
2973        }
2974
2975        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2976
2977        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2978
2979        return NETDEV_TX_OK;
2980
2981dma_map_err:
2982        dev_err(priv->device, "Tx dma map failed\n");
2983        dev_kfree_skb(skb);
2984        priv->dev->stats.tx_dropped++;
2985        return NETDEV_TX_OK;
2986}
2987
2988/**
2989 *  stmmac_xmit - Tx entry point of the driver
2990 *  @skb : the socket buffer
2991 *  @dev : device pointer
2992 *  Description : this is the tx entry point of the driver.
2993 *  It programs the chain or the ring and supports oversized frames
2994 *  and SG feature.
2995 */
2996static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2997{
2998        struct stmmac_priv *priv = netdev_priv(dev);
2999        unsigned int nopaged_len = skb_headlen(skb);
3000        int i, csum_insertion = 0, is_jumbo = 0;
3001        u32 queue = skb_get_queue_mapping(skb);
3002        int nfrags = skb_shinfo(skb)->nr_frags;
3003        int entry;
3004        unsigned int first_entry;
3005        struct dma_desc *desc, *first;
3006        struct stmmac_tx_queue *tx_q;
3007        unsigned int enh_desc;
3008        unsigned int des;
3009
3010        tx_q = &priv->tx_queue[queue];
3011
3012        /* Manage oversized TCP frames for GMAC4 device */
3013        if (skb_is_gso(skb) && priv->tso) {
3014                if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3015                        return stmmac_tso_xmit(skb, dev);
3016        }
3017
3018        if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3019                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3020                        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3021                                                                queue));
3022                        /* This is a hard error, log it. */
3023                        netdev_err(priv->dev,
3024                                   "%s: Tx Ring full when queue awake\n",
3025                                   __func__);
3026                }
3027                return NETDEV_TX_BUSY;
3028        }
3029
3030        if (priv->tx_path_in_lpi_mode)
3031                stmmac_disable_eee_mode(priv);
3032
3033        entry = tx_q->cur_tx;
3034        first_entry = entry;
3035        WARN_ON(tx_q->tx_skbuff[first_entry]);
3036
3037        csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3038
3039        if (likely(priv->extend_desc))
3040                desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3041        else
3042                desc = tx_q->dma_tx + entry;
3043
3044        first = desc;
3045
3046        enh_desc = priv->plat->enh_desc;
3047        /* To program the descriptors according to the size of the frame */
3048        if (enh_desc)
3049                is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3050
3051        if (unlikely(is_jumbo)) {
3052                entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3053                if (unlikely(entry < 0) && (entry != -EINVAL))
3054                        goto dma_map_err;
3055        }
3056
3057        for (i = 0; i < nfrags; i++) {
3058                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3059                int len = skb_frag_size(frag);
3060                bool last_segment = (i == (nfrags - 1));
3061
3062                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3063                WARN_ON(tx_q->tx_skbuff[entry]);
3064
3065                if (likely(priv->extend_desc))
3066                        desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3067                else
3068                        desc = tx_q->dma_tx + entry;
3069
3070                des = skb_frag_dma_map(priv->device, frag, 0, len,
3071                                       DMA_TO_DEVICE);
3072                if (dma_mapping_error(priv->device, des))
3073                        goto dma_map_err; /* should reuse desc w/o issues */
3074
3075                tx_q->tx_skbuff_dma[entry].buf = des;
3076
3077                stmmac_set_desc_addr(priv, desc, des);
3078
3079                tx_q->tx_skbuff_dma[entry].map_as_page = true;
3080                tx_q->tx_skbuff_dma[entry].len = len;
3081                tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3082
3083                /* Prepare the descriptor and set the own bit too */
3084                stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3085                                priv->mode, 1, last_segment, skb->len);
3086        }
3087
3088        /* Only the last descriptor gets to point to the skb. */
3089        tx_q->tx_skbuff[entry] = skb;
3090
3091        /* We've used all descriptors we need for this skb, however,
3092         * advance cur_tx so that it references a fresh descriptor.
3093         * ndo_start_xmit will fill this descriptor the next time it's
3094         * called and stmmac_tx_clean may clean up to this descriptor.
3095         */
3096        entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3097        tx_q->cur_tx = entry;
3098
3099        if (netif_msg_pktdata(priv)) {
3100                void *tx_head;
3101
3102                netdev_dbg(priv->dev,
3103                           "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3104                           __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3105                           entry, first, nfrags);
3106
3107                if (priv->extend_desc)
3108                        tx_head = (void *)tx_q->dma_etx;
3109                else
3110                        tx_head = (void *)tx_q->dma_tx;
3111
3112                stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3113
3114                netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3115                print_pkt(skb->data, skb->len);
3116        }
3117
3118        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3119                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3120                          __func__);
3121                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3122        }
3123
3124        dev->stats.tx_bytes += skb->len;
3125
3126        /* According to the coalesce parameter the IC bit for the latest
3127         * segment is reset and the timer re-started to clean the tx status.
3128         * This approach takes care about the fragments: desc is the first
3129         * element in case of no SG.
3130         */
3131        priv->tx_count_frames += nfrags + 1;
3132        if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
3133            !priv->tx_timer_armed) {
3134                mod_timer(&priv->txtimer,
3135                          STMMAC_COAL_TIMER(priv->tx_coal_timer));
3136                priv->tx_timer_armed = true;
3137        } else {
3138                priv->tx_count_frames = 0;
3139                stmmac_set_tx_ic(priv, desc);
3140                priv->xstats.tx_set_ic_bit++;
3141                priv->tx_timer_armed = false;
3142        }
3143
3144        skb_tx_timestamp(skb);
3145
3146        /* Ready to fill the first descriptor and set the OWN bit w/o any
3147         * problems because all the descriptors are actually ready to be
3148         * passed to the DMA engine.
3149         */
3150        if (likely(!is_jumbo)) {
3151                bool last_segment = (nfrags == 0);
3152
3153                des = dma_map_single(priv->device, skb->data,
3154                                     nopaged_len, DMA_TO_DEVICE);
3155                if (dma_mapping_error(priv->device, des))
3156                        goto dma_map_err;
3157
3158                tx_q->tx_skbuff_dma[first_entry].buf = des;
3159
3160                stmmac_set_desc_addr(priv, first, des);
3161
3162                tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3163                tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3164
3165                if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3166                             priv->hwts_tx_en)) {
3167                        /* declare that device is doing timestamping */
3168                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3169                        stmmac_enable_tx_timestamp(priv, first);
3170                }
3171
3172                /* Prepare the first descriptor setting the OWN bit too */
3173                stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3174                                csum_insertion, priv->mode, 1, last_segment,
3175                                skb->len);
3176
3177                /* The own bit must be the latest setting done when prepare the
3178                 * descriptor and then barrier is needed to make sure that
3179                 * all is coherent before granting the DMA engine.
3180                 */
3181                wmb();
3182        }
3183
3184        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3185
3186        stmmac_enable_dma_transmission(priv, priv->ioaddr);
3187        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3188
3189        return NETDEV_TX_OK;
3190
3191dma_map_err:
3192        netdev_err(priv->dev, "Tx DMA map failed\n");
3193        dev_kfree_skb(skb);
3194        priv->dev->stats.tx_dropped++;
3195        return NETDEV_TX_OK;
3196}
3197
3198static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3199{
3200        struct vlan_ethhdr *veth;
3201        __be16 vlan_proto;
3202        u16 vlanid;
3203
3204        veth = (struct vlan_ethhdr *)skb->data;
3205        vlan_proto = veth->h_vlan_proto;
3206
3207        if ((vlan_proto == htons(ETH_P_8021Q) &&
3208             dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3209            (vlan_proto == htons(ETH_P_8021AD) &&
3210             dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3211                /* pop the vlan tag */
3212                vlanid = ntohs(veth->h_vlan_TCI);
3213                memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3214                skb_pull(skb, VLAN_HLEN);
3215                __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3216        }
3217}
3218
3219
3220static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3221{
3222        if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3223                return 0;
3224
3225        return 1;
3226}
3227
3228/**
3229 * stmmac_rx_refill - refill used skb preallocated buffers
3230 * @priv: driver private structure
3231 * @queue: RX queue index
3232 * Description : this is to reallocate the skb for the reception process
3233 * that is based on zero-copy.
3234 */
3235static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3236{
3237        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3238        int dirty = stmmac_rx_dirty(priv, queue);
3239        unsigned int entry = rx_q->dirty_rx;
3240
3241        int bfsize = priv->dma_buf_sz;
3242
3243        while (dirty-- > 0) {
3244                struct dma_desc *p;
3245
3246                if (priv->extend_desc)
3247                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
3248                else
3249                        p = rx_q->dma_rx + entry;
3250
3251                if (likely(!rx_q->rx_skbuff[entry])) {
3252                        struct sk_buff *skb;
3253
3254                        skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3255                        if (unlikely(!skb)) {
3256                                /* so for a while no zero-copy! */
3257                                rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3258                                if (unlikely(net_ratelimit()))
3259                                        dev_err(priv->device,
3260                                                "fail to alloc skb entry %d\n",
3261                                                entry);
3262                                break;
3263                        }
3264
3265                        rx_q->rx_skbuff[entry] = skb;
3266                        rx_q->rx_skbuff_dma[entry] =
3267                            dma_map_single(priv->device, skb->data, bfsize,
3268                                           DMA_FROM_DEVICE);
3269                        if (dma_mapping_error(priv->device,
3270                                              rx_q->rx_skbuff_dma[entry])) {
3271                                netdev_err(priv->dev, "Rx DMA map failed\n");
3272                                dev_kfree_skb(skb);
3273                                break;
3274                        }
3275
3276                        stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3277                        stmmac_refill_desc3(priv, rx_q, p);
3278
3279                        if (rx_q->rx_zeroc_thresh > 0)
3280                                rx_q->rx_zeroc_thresh--;
3281
3282                        netif_dbg(priv, rx_status, priv->dev,
3283                                  "refill entry #%d\n", entry);
3284                }
3285                dma_wmb();
3286
3287                stmmac_set_rx_owner(priv, p, priv->use_riwt);
3288
3289                dma_wmb();
3290
3291                entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3292        }
3293        rx_q->dirty_rx = entry;
3294}
3295
3296/**
3297 * stmmac_rx - manage the receive process
3298 * @priv: driver private structure
3299 * @limit: napi bugget
3300 * @queue: RX queue index.
3301 * Description :  this the function called by the napi poll method.
3302 * It gets all the frames inside the ring.
3303 */
3304static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3305{
3306        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3307        unsigned int entry = rx_q->cur_rx;
3308        int coe = priv->hw->rx_csum;
3309        unsigned int next_entry;
3310        unsigned int count = 0;
3311
3312        if (netif_msg_rx_status(priv)) {
3313                void *rx_head;
3314
3315                netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3316                if (priv->extend_desc)
3317                        rx_head = (void *)rx_q->dma_erx;
3318                else
3319                        rx_head = (void *)rx_q->dma_rx;
3320
3321                stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3322        }
3323        while (count < limit) {
3324                int status;
3325                struct dma_desc *p;
3326                struct dma_desc *np;
3327
3328                if (priv->extend_desc)
3329                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
3330                else
3331                        p = rx_q->dma_rx + entry;
3332
3333                /* read the status of the incoming frame */
3334                status = stmmac_rx_status(priv, &priv->dev->stats,
3335                                &priv->xstats, p);
3336                /* check if managed by the DMA otherwise go ahead */
3337                if (unlikely(status & dma_own))
3338                        break;
3339
3340                count++;
3341
3342                rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3343                next_entry = rx_q->cur_rx;
3344
3345                if (priv->extend_desc)
3346                        np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3347                else
3348                        np = rx_q->dma_rx + next_entry;
3349
3350                prefetch(np);
3351
3352                if (priv->extend_desc)
3353                        stmmac_rx_extended_status(priv, &priv->dev->stats,
3354                                        &priv->xstats, rx_q->dma_erx + entry);
3355                if (unlikely(status == discard_frame)) {
3356                        priv->dev->stats.rx_errors++;
3357                        if (priv->hwts_rx_en && !priv->extend_desc) {
3358                                /* DESC2 & DESC3 will be overwritten by device
3359                                 * with timestamp value, hence reinitialize
3360                                 * them in stmmac_rx_refill() function so that
3361                                 * device can reuse it.
3362                                 */
3363                                dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3364                                rx_q->rx_skbuff[entry] = NULL;
3365                                dma_unmap_single(priv->device,
3366                                                 rx_q->rx_skbuff_dma[entry],
3367                                                 priv->dma_buf_sz,
3368                                                 DMA_FROM_DEVICE);
3369                        }
3370                } else {
3371                        struct sk_buff *skb;
3372                        int frame_len;
3373                        unsigned int des;
3374
3375                        stmmac_get_desc_addr(priv, p, &des);
3376                        frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3377
3378                        /*  If frame length is greater than skb buffer size
3379                         *  (preallocated during init) then the packet is
3380                         *  ignored
3381                         */
3382                        if (frame_len > priv->dma_buf_sz) {
3383                                netdev_err(priv->dev,
3384                                           "len %d larger than size (%d)\n",
3385                                           frame_len, priv->dma_buf_sz);
3386                                priv->dev->stats.rx_length_errors++;
3387                                break;
3388                        }
3389
3390                        /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3391                         * Type frames (LLC/LLC-SNAP)
3392                         *
3393                         * llc_snap is never checked in GMAC >= 4, so this ACS
3394                         * feature is always disabled and packets need to be
3395                         * stripped manually.
3396                         */
3397                        if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3398                            unlikely(status != llc_snap))
3399                                frame_len -= ETH_FCS_LEN;
3400
3401                        if (netif_msg_rx_status(priv)) {
3402                                netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3403                                           p, entry, des);
3404                                netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3405                                           frame_len, status);
3406                        }
3407
3408                        /* The zero-copy is always used for all the sizes
3409                         * in case of GMAC4 because it needs
3410                         * to refill the used descriptors, always.
3411                         */
3412                        if (unlikely(!priv->plat->has_gmac4 &&
3413                                     ((frame_len < priv->rx_copybreak) ||
3414                                     stmmac_rx_threshold_count(rx_q)))) {
3415                                skb = netdev_alloc_skb_ip_align(priv->dev,
3416                                                                frame_len);
3417                                if (unlikely(!skb)) {
3418                                        if (net_ratelimit())
3419                                                dev_warn(priv->device,
3420                                                         "packet dropped\n");
3421                                        priv->dev->stats.rx_dropped++;
3422                                        break;
3423                                }
3424
3425                                dma_sync_single_for_cpu(priv->device,
3426                                                        rx_q->rx_skbuff_dma
3427                                                        [entry], frame_len,
3428                                                        DMA_FROM_DEVICE);
3429                                skb_copy_to_linear_data(skb,
3430                                                        rx_q->
3431                                                        rx_skbuff[entry]->data,
3432                                                        frame_len);
3433
3434                                skb_put(skb, frame_len);
3435                                dma_sync_single_for_device(priv->device,
3436                                                           rx_q->rx_skbuff_dma
3437                                                           [entry], frame_len,
3438                                                           DMA_FROM_DEVICE);
3439                        } else {
3440                                skb = rx_q->rx_skbuff[entry];
3441                                if (unlikely(!skb)) {
3442                                        netdev_err(priv->dev,
3443                                                   "%s: Inconsistent Rx chain\n",
3444                                                   priv->dev->name);
3445                                        priv->dev->stats.rx_dropped++;
3446                                        break;
3447                                }
3448                                prefetch(skb->data - NET_IP_ALIGN);
3449                                rx_q->rx_skbuff[entry] = NULL;
3450                                rx_q->rx_zeroc_thresh++;
3451
3452                                skb_put(skb, frame_len);
3453                                dma_unmap_single(priv->device,
3454                                                 rx_q->rx_skbuff_dma[entry],
3455                                                 priv->dma_buf_sz,
3456                                                 DMA_FROM_DEVICE);
3457                        }
3458
3459                        if (netif_msg_pktdata(priv)) {
3460                                netdev_dbg(priv->dev, "frame received (%dbytes)",
3461                                           frame_len);
3462                                print_pkt(skb->data, frame_len);
3463                        }
3464
3465                        stmmac_get_rx_hwtstamp(priv, p, np, skb);
3466
3467                        stmmac_rx_vlan(priv->dev, skb);
3468
3469                        skb->protocol = eth_type_trans(skb, priv->dev);
3470
3471                        if (unlikely(!coe))
3472                                skb_checksum_none_assert(skb);
3473                        else
3474                                skb->ip_summed = CHECKSUM_UNNECESSARY;
3475
3476                        napi_gro_receive(&rx_q->napi, skb);
3477
3478                        priv->dev->stats.rx_packets++;
3479                        priv->dev->stats.rx_bytes += frame_len;
3480                }
3481                entry = next_entry;
3482        }
3483
3484        stmmac_rx_refill(priv, queue);
3485
3486        priv->xstats.rx_pkt_n += count;
3487
3488        return count;
3489}
3490
3491/**
3492 *  stmmac_poll - stmmac poll method (NAPI)
3493 *  @napi : pointer to the napi structure.
3494 *  @budget : maximum number of packets that the current CPU can receive from
3495 *            all interfaces.
3496 *  Description :
3497 *  To look at the incoming frames and clear the tx resources.
3498 */
3499static int stmmac_poll(struct napi_struct *napi, int budget)
3500{
3501        struct stmmac_rx_queue *rx_q =
3502                container_of(napi, struct stmmac_rx_queue, napi);
3503        struct stmmac_priv *priv = rx_q->priv_data;
3504        u32 tx_count = priv->plat->tx_queues_to_use;
3505        u32 chan = rx_q->queue_index;
3506        int work_done = 0;
3507        u32 queue;
3508
3509        priv->xstats.napi_poll++;
3510
3511        /* check all the queues */
3512        for (queue = 0; queue < tx_count; queue++)
3513                stmmac_tx_clean(priv, queue);
3514
3515        work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3516        if (work_done < budget) {
3517                napi_complete_done(napi, work_done);
3518                stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3519        }
3520        return work_done;
3521}
3522
3523/**
3524 *  stmmac_tx_timeout
3525 *  @dev : Pointer to net device structure
3526 *  Description: this function is called when a packet transmission fails to
3527 *   complete within a reasonable time. The driver will mark the error in the
3528 *   netdev structure and arrange for the device to be reset to a sane state
3529 *   in order to transmit a new packet.
3530 */
3531static void stmmac_tx_timeout(struct net_device *dev)
3532{
3533        struct stmmac_priv *priv = netdev_priv(dev);
3534
3535        stmmac_global_err(priv);
3536}
3537
3538/**
3539 *  stmmac_set_rx_mode - entry point for multicast addressing
3540 *  @dev : pointer to the device structure
3541 *  Description:
3542 *  This function is a driver entry point which gets called by the kernel
3543 *  whenever multicast addresses must be enabled/disabled.
3544 *  Return value:
3545 *  void.
3546 */
3547static void stmmac_set_rx_mode(struct net_device *dev)
3548{
3549        struct stmmac_priv *priv = netdev_priv(dev);
3550
3551        stmmac_set_filter(priv, priv->hw, dev);
3552}
3553
3554/**
3555 *  stmmac_change_mtu - entry point to change MTU size for the device.
3556 *  @dev : device pointer.
3557 *  @new_mtu : the new MTU size for the device.
3558 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3559 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3560 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3561 *  Return value:
3562 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3563 *  file on failure.
3564 */
3565static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3566{
3567        struct stmmac_priv *priv = netdev_priv(dev);
3568
3569        if (netif_running(dev)) {
3570                netdev_err(priv->dev, "must be stopped to change its MTU\n");
3571                return -EBUSY;
3572        }
3573
3574        dev->mtu = new_mtu;
3575
3576        netdev_update_features(dev);
3577
3578        return 0;
3579}
3580
3581static netdev_features_t stmmac_fix_features(struct net_device *dev,
3582                                             netdev_features_t features)
3583{
3584        struct stmmac_priv *priv = netdev_priv(dev);
3585
3586        if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3587                features &= ~NETIF_F_RXCSUM;
3588
3589        if (!priv->plat->tx_coe)
3590                features &= ~NETIF_F_CSUM_MASK;
3591
3592        /* Some GMAC devices have a bugged Jumbo frame support that
3593         * needs to have the Tx COE disabled for oversized frames
3594         * (due to limited buffer sizes). In this case we disable
3595         * the TX csum insertion in the TDES and not use SF.
3596         */
3597        if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3598                features &= ~NETIF_F_CSUM_MASK;
3599
3600        /* Disable tso if asked by ethtool */
3601        if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3602                if (features & NETIF_F_TSO)
3603                        priv->tso = true;
3604                else
3605                        priv->tso = false;
3606        }
3607
3608        return features;
3609}
3610
3611static int stmmac_set_features(struct net_device *netdev,
3612                               netdev_features_t features)
3613{
3614        struct stmmac_priv *priv = netdev_priv(netdev);
3615
3616        /* Keep the COE Type in case of csum is supporting */
3617        if (features & NETIF_F_RXCSUM)
3618                priv->hw->rx_csum = priv->plat->rx_coe;
3619        else
3620                priv->hw->rx_csum = 0;
3621        /* No check needed because rx_coe has been set before and it will be
3622         * fixed in case of issue.
3623         */
3624        stmmac_rx_ipc(priv, priv->hw);
3625
3626        return 0;
3627}
3628
3629/**
3630 *  stmmac_interrupt - main ISR
3631 *  @irq: interrupt number.
3632 *  @dev_id: to pass the net device pointer.
3633 *  Description: this is the main driver interrupt service routine.
3634 *  It can call:
3635 *  o DMA service routine (to manage incoming frame reception and transmission
3636 *    status)
3637 *  o Core interrupts to manage: remote wake-up, management counter, LPI
3638 *    interrupts.
3639 */
3640static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3641{
3642        struct net_device *dev = (struct net_device *)dev_id;
3643        struct stmmac_priv *priv = netdev_priv(dev);
3644        u32 rx_cnt = priv->plat->rx_queues_to_use;
3645        u32 tx_cnt = priv->plat->tx_queues_to_use;
3646        u32 queues_count;
3647        u32 queue;
3648
3649        queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3650
3651        if (priv->irq_wake)
3652                pm_wakeup_event(priv->device, 0);
3653
3654        if (unlikely(!dev)) {
3655                netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3656                return IRQ_NONE;
3657        }
3658
3659        /* Check if adapter is up */
3660        if (test_bit(STMMAC_DOWN, &priv->state))
3661                return IRQ_HANDLED;
3662        /* Check if a fatal error happened */
3663        if (stmmac_safety_feat_interrupt(priv))
3664                return IRQ_HANDLED;
3665
3666        /* To handle GMAC own interrupts */
3667        if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3668                int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3669                int mtl_status;
3670
3671                if (unlikely(status)) {
3672                        /* For LPI we need to save the tx status */
3673                        if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3674                                priv->tx_path_in_lpi_mode = true;
3675                        if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3676                                priv->tx_path_in_lpi_mode = false;
3677                }
3678
3679                for (queue = 0; queue < queues_count; queue++) {
3680                        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3681
3682                        mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3683                                                                queue);
3684                        if (mtl_status != -EINVAL)
3685                                status |= mtl_status;
3686
3687                        if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3688                                stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3689                                                       rx_q->rx_tail_addr,
3690                                                       queue);
3691                }
3692
3693                /* PCS link status */
3694                if (priv->hw->pcs) {
3695                        if (priv->xstats.pcs_link)
3696                                netif_carrier_on(dev);
3697                        else
3698                                netif_carrier_off(dev);
3699                }
3700        }
3701
3702        /* To handle DMA interrupts */
3703        stmmac_dma_interrupt(priv);
3704
3705        return IRQ_HANDLED;
3706}
3707
3708#ifdef CONFIG_NET_POLL_CONTROLLER
3709/* Polling receive - used by NETCONSOLE and other diagnostic tools
3710 * to allow network I/O with interrupts disabled.
3711 */
3712static void stmmac_poll_controller(struct net_device *dev)
3713{
3714        disable_irq(dev->irq);
3715        stmmac_interrupt(dev->irq, dev);
3716        enable_irq(dev->irq);
3717}
3718#endif
3719
3720/**
3721 *  stmmac_ioctl - Entry point for the Ioctl
3722 *  @dev: Device pointer.
3723 *  @rq: An IOCTL specefic structure, that can contain a pointer to
3724 *  a proprietary structure used to pass information to the driver.
3725 *  @cmd: IOCTL command
3726 *  Description:
3727 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3728 */
3729static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3730{
3731        int ret = -EOPNOTSUPP;
3732
3733        if (!netif_running(dev))
3734                return -EINVAL;
3735
3736        switch (cmd) {
3737        case SIOCGMIIPHY:
3738        case SIOCGMIIREG:
3739        case SIOCSMIIREG:
3740                if (!dev->phydev)
3741                        return -EINVAL;
3742                ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3743                break;
3744        case SIOCSHWTSTAMP:
3745                ret = stmmac_hwtstamp_ioctl(dev, rq);
3746                break;
3747        default:
3748                break;
3749        }
3750
3751        return ret;
3752}
3753
3754static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3755                                    void *cb_priv)
3756{
3757        struct stmmac_priv *priv = cb_priv;
3758        int ret = -EOPNOTSUPP;
3759
3760        stmmac_disable_all_queues(priv);
3761
3762        switch (type) {
3763        case TC_SETUP_CLSU32:
3764                if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3765                        ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3766                break;
3767        default:
3768                break;
3769        }
3770
3771        stmmac_enable_all_queues(priv);
3772        return ret;
3773}
3774
3775static LIST_HEAD(stmmac_block_cb_list);
3776
3777static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3778                           void *type_data)
3779{
3780        struct stmmac_priv *priv = netdev_priv(ndev);
3781
3782        switch (type) {
3783        case TC_SETUP_BLOCK:
3784                return flow_block_cb_setup_simple(type_data,
3785                                                  &stmmac_block_cb_list,
3786                                                  stmmac_setup_tc_block_cb,
3787                                                  priv, priv, true);
3788        default:
3789                return -EOPNOTSUPP;
3790        }
3791}
3792
3793static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3794{
3795        struct stmmac_priv *priv = netdev_priv(ndev);
3796        int ret = 0;
3797
3798        ret = eth_mac_addr(ndev, addr);
3799        if (ret)
3800                return ret;
3801
3802        stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3803
3804        return ret;
3805}
3806
3807#ifdef CONFIG_DEBUG_FS
3808static struct dentry *stmmac_fs_dir;
3809
3810static void sysfs_display_ring(void *head, int size, int extend_desc,
3811                               struct seq_file *seq)
3812{
3813        int i;
3814        struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3815        struct dma_desc *p = (struct dma_desc *)head;
3816
3817        for (i = 0; i < size; i++) {
3818                if (extend_desc) {
3819                        seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3820                                   i, (unsigned int)virt_to_phys(ep),
3821                                   le32_to_cpu(ep->basic.des0),
3822                                   le32_to_cpu(ep->basic.des1),
3823                                   le32_to_cpu(ep->basic.des2),
3824                                   le32_to_cpu(ep->basic.des3));
3825                        ep++;
3826                } else {
3827                        seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3828                                   i, (unsigned int)virt_to_phys(p),
3829                                   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3830                                   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3831                        p++;
3832                }
3833                seq_printf(seq, "\n");
3834        }
3835}
3836
3837static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3838{
3839        struct net_device *dev = seq->private;
3840        struct stmmac_priv *priv = netdev_priv(dev);
3841        u32 rx_count = priv->plat->rx_queues_to_use;
3842        u32 tx_count = priv->plat->tx_queues_to_use;
3843        u32 queue;
3844
3845        for (queue = 0; queue < rx_count; queue++) {
3846                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3847
3848                seq_printf(seq, "RX Queue %d:\n", queue);
3849
3850                if (priv->extend_desc) {
3851                        seq_printf(seq, "Extended descriptor ring:\n");
3852                        sysfs_display_ring((void *)rx_q->dma_erx,
3853                                           DMA_RX_SIZE, 1, seq);
3854                } else {
3855                        seq_printf(seq, "Descriptor ring:\n");
3856                        sysfs_display_ring((void *)rx_q->dma_rx,
3857                                           DMA_RX_SIZE, 0, seq);
3858                }
3859        }
3860
3861        for (queue = 0; queue < tx_count; queue++) {
3862                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3863
3864                seq_printf(seq, "TX Queue %d:\n", queue);
3865
3866                if (priv->extend_desc) {
3867                        seq_printf(seq, "Extended descriptor ring:\n");
3868                        sysfs_display_ring((void *)tx_q->dma_etx,
3869                                           DMA_TX_SIZE, 1, seq);
3870                } else {
3871                        seq_printf(seq, "Descriptor ring:\n");
3872                        sysfs_display_ring((void *)tx_q->dma_tx,
3873                                           DMA_TX_SIZE, 0, seq);
3874                }
3875        }
3876
3877        return 0;
3878}
3879
3880static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3881{
3882        return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3883}
3884
3885/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3886
3887static const struct file_operations stmmac_rings_status_fops = {
3888        .owner = THIS_MODULE,
3889        .open = stmmac_sysfs_ring_open,
3890        .read = seq_read,
3891        .llseek = seq_lseek,
3892        .release = single_release,
3893};
3894
3895static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3896{
3897        struct net_device *dev = seq->private;
3898        struct stmmac_priv *priv = netdev_priv(dev);
3899
3900        if (!priv->hw_cap_support) {
3901                seq_printf(seq, "DMA HW features not supported\n");
3902                return 0;
3903        }
3904
3905        seq_printf(seq, "==============================\n");
3906        seq_printf(seq, "\tDMA HW features\n");
3907        seq_printf(seq, "==============================\n");
3908
3909        seq_printf(seq, "\t10/100 Mbps: %s\n",
3910                   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3911        seq_printf(seq, "\t1000 Mbps: %s\n",
3912                   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3913        seq_printf(seq, "\tHalf duplex: %s\n",
3914                   (priv->dma_cap.half_duplex) ? "Y" : "N");
3915        seq_printf(seq, "\tHash Filter: %s\n",
3916                   (priv->dma_cap.hash_filter) ? "Y" : "N");
3917        seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3918                   (priv->dma_cap.multi_addr) ? "Y" : "N");
3919        seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3920                   (priv->dma_cap.pcs) ? "Y" : "N");
3921        seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3922                   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3923        seq_printf(seq, "\tPMT Remote wake up: %s\n",
3924                   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3925        seq_printf(seq, "\tPMT Magic Frame: %s\n",
3926                   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3927        seq_printf(seq, "\tRMON module: %s\n",
3928                   (priv->dma_cap.rmon) ? "Y" : "N");
3929        seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3930                   (priv->dma_cap.time_stamp) ? "Y" : "N");
3931        seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3932                   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3933        seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3934                   (priv->dma_cap.eee) ? "Y" : "N");
3935        seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3936        seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3937                   (priv->dma_cap.tx_coe) ? "Y" : "N");
3938        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3939                seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3940                           (priv->dma_cap.rx_coe) ? "Y" : "N");
3941        } else {
3942                seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3943                           (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3944                seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3945                           (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3946        }
3947        seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3948                   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3949        seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3950                   priv->dma_cap.number_rx_channel);
3951        seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3952                   priv->dma_cap.number_tx_channel);
3953        seq_printf(seq, "\tEnhanced descriptors: %s\n",
3954                   (priv->dma_cap.enh_desc) ? "Y" : "N");
3955
3956        return 0;
3957}
3958
3959static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3960{
3961        return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3962}
3963
3964static const struct file_operations stmmac_dma_cap_fops = {
3965        .owner = THIS_MODULE,
3966        .open = stmmac_sysfs_dma_cap_open,
3967        .read = seq_read,
3968        .llseek = seq_lseek,
3969        .release = single_release,
3970};
3971
3972static int stmmac_init_fs(struct net_device *dev)
3973{
3974        struct stmmac_priv *priv = netdev_priv(dev);
3975
3976        /* Create per netdev entries */
3977        priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3978
3979        if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3980                netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3981
3982                return -ENOMEM;
3983        }
3984
3985        /* Entry to report DMA RX/TX rings */
3986        priv->dbgfs_rings_status =
3987                debugfs_create_file("descriptors_status", 0444,
3988                                    priv->dbgfs_dir, dev,
3989                                    &stmmac_rings_status_fops);
3990
3991        if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3992                netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3993                debugfs_remove_recursive(priv->dbgfs_dir);
3994
3995                return -ENOMEM;
3996        }
3997
3998        /* Entry to report the DMA HW features */
3999        priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4000                                                  priv->dbgfs_dir,
4001                                                  dev, &stmmac_dma_cap_fops);
4002
4003        if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4004                netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4005                debugfs_remove_recursive(priv->dbgfs_dir);
4006
4007                return -ENOMEM;
4008        }
4009
4010        return 0;
4011}
4012
4013static void stmmac_exit_fs(struct net_device *dev)
4014{
4015        struct stmmac_priv *priv = netdev_priv(dev);
4016
4017        debugfs_remove_recursive(priv->dbgfs_dir);
4018}
4019#endif /* CONFIG_DEBUG_FS */
4020
4021static const struct net_device_ops stmmac_netdev_ops = {
4022        .ndo_open = stmmac_open,
4023        .ndo_start_xmit = stmmac_xmit,
4024        .ndo_stop = stmmac_release,
4025        .ndo_change_mtu = stmmac_change_mtu,
4026        .ndo_fix_features = stmmac_fix_features,
4027        .ndo_set_features = stmmac_set_features,
4028        .ndo_set_rx_mode = stmmac_set_rx_mode,
4029        .ndo_tx_timeout = stmmac_tx_timeout,
4030        .ndo_do_ioctl = stmmac_ioctl,
4031        .ndo_setup_tc = stmmac_setup_tc,
4032#ifdef CONFIG_NET_POLL_CONTROLLER
4033        .ndo_poll_controller = stmmac_poll_controller,
4034#endif
4035        .ndo_set_mac_address = stmmac_set_mac_address,
4036};
4037
4038static void stmmac_reset_subtask(struct stmmac_priv *priv)
4039{
4040        if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4041                return;
4042        if (test_bit(STMMAC_DOWN, &priv->state))
4043                return;
4044
4045        netdev_err(priv->dev, "Reset adapter.\n");
4046
4047        rtnl_lock();
4048        netif_trans_update(priv->dev);
4049        while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4050                usleep_range(1000, 2000);
4051
4052        set_bit(STMMAC_DOWN, &priv->state);
4053        dev_close(priv->dev);
4054        dev_open(priv->dev, NULL);
4055        clear_bit(STMMAC_DOWN, &priv->state);
4056        clear_bit(STMMAC_RESETING, &priv->state);
4057        rtnl_unlock();
4058}
4059
4060static void stmmac_service_task(struct work_struct *work)
4061{
4062        struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4063                        service_task);
4064
4065        stmmac_reset_subtask(priv);
4066        clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4067}
4068
4069/**
4070 *  stmmac_hw_init - Init the MAC device
4071 *  @priv: driver private structure
4072 *  Description: this function is to configure the MAC device according to
4073 *  some platform parameters or the HW capability register. It prepares the
4074 *  driver to use either ring or chain modes and to setup either enhanced or
4075 *  normal descriptors.
4076 */
4077static int stmmac_hw_init(struct stmmac_priv *priv)
4078{
4079        int ret;
4080
4081        /* dwmac-sun8i only work in chain mode */
4082        if (priv->plat->has_sun8i)
4083                chain_mode = 1;
4084        priv->chain_mode = chain_mode;
4085
4086        /* Initialize HW Interface */
4087        ret = stmmac_hwif_init(priv);
4088        if (ret)
4089                return ret;
4090
4091        /* Get the HW capability (new GMAC newer than 3.50a) */
4092        priv->hw_cap_support = stmmac_get_hw_features(priv);
4093        if (priv->hw_cap_support) {
4094                dev_info(priv->device, "DMA HW capability register supported\n");
4095
4096                /* We can override some gmac/dma configuration fields: e.g.
4097                 * enh_desc, tx_coe (e.g. that are passed through the
4098                 * platform) with the values from the HW capability
4099                 * register (if supported).
4100                 */
4101                priv->plat->enh_desc = priv->dma_cap.enh_desc;
4102                priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4103                priv->hw->pmt = priv->plat->pmt;
4104
4105                /* TXCOE doesn't work in thresh DMA mode */
4106                if (priv->plat->force_thresh_dma_mode)
4107                        priv->plat->tx_coe = 0;
4108                else
4109                        priv->plat->tx_coe = priv->dma_cap.tx_coe;
4110
4111                /* In case of GMAC4 rx_coe is from HW cap register. */
4112                priv->plat->rx_coe = priv->dma_cap.rx_coe;
4113
4114                if (priv->dma_cap.rx_coe_type2)
4115                        priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4116                else if (priv->dma_cap.rx_coe_type1)
4117                        priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4118
4119        } else {
4120                dev_info(priv->device, "No HW DMA feature register supported\n");
4121        }
4122
4123        if (priv->plat->rx_coe) {
4124                priv->hw->rx_csum = priv->plat->rx_coe;
4125                dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4126                if (priv->synopsys_id < DWMAC_CORE_4_00)
4127                        dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4128        }
4129        if (priv->plat->tx_coe)
4130                dev_info(priv->device, "TX Checksum insertion supported\n");
4131
4132        if (priv->plat->pmt) {
4133                dev_info(priv->device, "Wake-Up On Lan supported\n");
4134                device_set_wakeup_capable(priv->device, 1);
4135        }
4136
4137        if (priv->dma_cap.tsoen)
4138                dev_info(priv->device, "TSO supported\n");
4139
4140        /* Run HW quirks, if any */
4141        if (priv->hwif_quirks) {
4142                ret = priv->hwif_quirks(priv);
4143                if (ret)
4144                        return ret;
4145        }
4146
4147        return 0;
4148}
4149
4150/**
4151 * stmmac_dvr_probe
4152 * @device: device pointer
4153 * @plat_dat: platform data pointer
4154 * @res: stmmac resource pointer
4155 * Description: this is the main probe function used to
4156 * call the alloc_etherdev, allocate the priv structure.
4157 * Return:
4158 * returns 0 on success, otherwise errno.
4159 */
4160int stmmac_dvr_probe(struct device *device,
4161                     struct plat_stmmacenet_data *plat_dat,
4162                     struct stmmac_resources *res)
4163{
4164        struct net_device *ndev = NULL;
4165        struct stmmac_priv *priv;
4166        int ret = 0;
4167        u32 queue;
4168
4169        ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4170                                  MTL_MAX_TX_QUEUES,
4171                                  MTL_MAX_RX_QUEUES);
4172        if (!ndev)
4173                return -ENOMEM;
4174
4175        SET_NETDEV_DEV(ndev, device);
4176
4177        priv = netdev_priv(ndev);
4178        priv->device = device;
4179        priv->dev = ndev;
4180
4181        stmmac_set_ethtool_ops(ndev);
4182        priv->pause = pause;
4183        priv->plat = plat_dat;
4184        priv->ioaddr = res->addr;
4185        priv->dev->base_addr = (unsigned long)res->addr;
4186
4187        priv->dev->irq = res->irq;
4188        priv->wol_irq = res->wol_irq;
4189        priv->lpi_irq = res->lpi_irq;
4190
4191        if (res->mac)
4192                memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4193
4194        dev_set_drvdata(device, priv->dev);
4195
4196        /* Verify driver arguments */
4197        stmmac_verify_args();
4198
4199        /* Allocate workqueue */
4200        priv->wq = create_singlethread_workqueue("stmmac_wq");
4201        if (!priv->wq) {
4202                dev_err(priv->device, "failed to create workqueue\n");
4203                goto error_wq;
4204        }
4205
4206        INIT_WORK(&priv->service_task, stmmac_service_task);
4207
4208        /* Override with kernel parameters if supplied XXX CRS XXX
4209         * this needs to have multiple instances
4210         */
4211        if ((phyaddr >= 0) && (phyaddr <= 31))
4212                priv->plat->phy_addr = phyaddr;
4213
4214        if (priv->plat->stmmac_rst) {
4215                ret = reset_control_assert(priv->plat->stmmac_rst);
4216                reset_control_deassert(priv->plat->stmmac_rst);
4217                /* Some reset controllers have only reset callback instead of
4218                 * assert + deassert callbacks pair.
4219                 */
4220                if (ret == -ENOTSUPP)
4221                        reset_control_reset(priv->plat->stmmac_rst);
4222        }
4223
4224        /* Init MAC and get the capabilities */
4225        ret = stmmac_hw_init(priv);
4226        if (ret)
4227                goto error_hw_init;
4228
4229        /* Configure real RX and TX queues */
4230        netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4231        netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4232
4233        ndev->netdev_ops = &stmmac_netdev_ops;
4234
4235        ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4236                            NETIF_F_RXCSUM;
4237
4238        ret = stmmac_tc_init(priv, priv);
4239        if (!ret) {
4240                ndev->hw_features |= NETIF_F_HW_TC;
4241        }
4242
4243        if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4244                ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4245                priv->tso = true;
4246                dev_info(priv->device, "TSO feature enabled\n");
4247        }
4248        ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4249        ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4250#ifdef STMMAC_VLAN_TAG_USED
4251        /* Both mac100 and gmac support receive VLAN tag detection */
4252        ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4253#endif
4254        priv->msg_enable = netif_msg_init(debug, default_msg_level);
4255
4256        /* MTU range: 46 - hw-specific max */
4257        ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4258        if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4259                ndev->max_mtu = JUMBO_LEN;
4260        else
4261                ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4262        /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4263         * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4264         */
4265        if ((priv->plat->maxmtu < ndev->max_mtu) &&
4266            (priv->plat->maxmtu >= ndev->min_mtu))
4267                ndev->max_mtu = priv->plat->maxmtu;
4268        else if (priv->plat->maxmtu < ndev->min_mtu)
4269                dev_warn(priv->device,
4270                         "%s: warning: maxmtu having invalid value (%d)\n",
4271                         __func__, priv->plat->maxmtu);
4272
4273        if (flow_ctrl)
4274                priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4275
4276        /* Rx Watchdog is available in the COREs newer than the 3.40.
4277         * In some case, for example on bugged HW this feature
4278         * has to be disable and this can be done by passing the
4279         * riwt_off field from the platform.
4280         */
4281        if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4282                priv->use_riwt = 1;
4283                dev_info(priv->device,
4284                         "Enable RX Mitigation via HW Watchdog Timer\n");
4285        }
4286
4287        for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4288                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4289
4290                netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4291                               (8 * priv->plat->rx_queues_to_use));
4292        }
4293
4294        mutex_init(&priv->lock);
4295
4296        /* If a specific clk_csr value is passed from the platform
4297         * this means that the CSR Clock Range selection cannot be
4298         * changed at run-time and it is fixed. Viceversa the driver'll try to
4299         * set the MDC clock dynamically according to the csr actual
4300         * clock input.
4301         */
4302        if (!priv->plat->clk_csr)
4303                stmmac_clk_csr_set(priv);
4304        else
4305                priv->clk_csr = priv->plat->clk_csr;
4306
4307        stmmac_check_pcs_mode(priv);
4308
4309        if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4310            priv->hw->pcs != STMMAC_PCS_TBI &&
4311            priv->hw->pcs != STMMAC_PCS_RTBI) {
4312                /* MDIO bus Registration */
4313                ret = stmmac_mdio_register(ndev);
4314                if (ret < 0) {
4315                        dev_err(priv->device,
4316                                "%s: MDIO bus (id: %d) registration failed",
4317                                __func__, priv->plat->bus_id);
4318                        goto error_mdio_register;
4319                }
4320        }
4321
4322        ret = register_netdev(ndev);
4323        if (ret) {
4324                dev_err(priv->device, "%s: ERROR %i registering the device\n",
4325                        __func__, ret);
4326                goto error_netdev_register;
4327        }
4328
4329        return ret;
4330
4331error_netdev_register:
4332        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4333            priv->hw->pcs != STMMAC_PCS_TBI &&
4334            priv->hw->pcs != STMMAC_PCS_RTBI)
4335                stmmac_mdio_unregister(ndev);
4336error_mdio_register:
4337        for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4338                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4339
4340                netif_napi_del(&rx_q->napi);
4341        }
4342error_hw_init:
4343        destroy_workqueue(priv->wq);
4344error_wq:
4345        free_netdev(ndev);
4346
4347        return ret;
4348}
4349EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4350
4351/**
4352 * stmmac_dvr_remove
4353 * @dev: device pointer
4354 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4355 * changes the link status, releases the DMA descriptor rings.
4356 */
4357int stmmac_dvr_remove(struct device *dev)
4358{
4359        struct net_device *ndev = dev_get_drvdata(dev);
4360        struct stmmac_priv *priv = netdev_priv(ndev);
4361
4362        netdev_info(priv->dev, "%s: removing driver", __func__);
4363
4364        stmmac_stop_all_dma(priv);
4365
4366        stmmac_mac_set(priv, priv->ioaddr, false);
4367        netif_carrier_off(ndev);
4368        unregister_netdev(ndev);
4369        if (priv->plat->stmmac_rst)
4370                reset_control_assert(priv->plat->stmmac_rst);
4371        clk_disable_unprepare(priv->plat->pclk);
4372        clk_disable_unprepare(priv->plat->stmmac_clk);
4373        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4374            priv->hw->pcs != STMMAC_PCS_TBI &&
4375            priv->hw->pcs != STMMAC_PCS_RTBI)
4376                stmmac_mdio_unregister(ndev);
4377        destroy_workqueue(priv->wq);
4378        mutex_destroy(&priv->lock);
4379        free_netdev(ndev);
4380
4381        return 0;
4382}
4383EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4384
4385/**
4386 * stmmac_suspend - suspend callback
4387 * @dev: device pointer
4388 * Description: this is the function to suspend the device and it is called
4389 * by the platform driver to stop the network queue, release the resources,
4390 * program the PMT register (for WoL), clean and release driver resources.
4391 */
4392int stmmac_suspend(struct device *dev)
4393{
4394        struct net_device *ndev = dev_get_drvdata(dev);
4395        struct stmmac_priv *priv = netdev_priv(ndev);
4396
4397        if (!ndev || !netif_running(ndev))
4398                return 0;
4399
4400        if (ndev->phydev)
4401                phy_stop(ndev->phydev);
4402
4403        mutex_lock(&priv->lock);
4404
4405        netif_device_detach(ndev);
4406        stmmac_stop_all_queues(priv);
4407
4408        stmmac_disable_all_queues(priv);
4409
4410        /* Stop TX/RX DMA */
4411        stmmac_stop_all_dma(priv);
4412
4413        /* Enable Power down mode by programming the PMT regs */
4414        if (device_may_wakeup(priv->device)) {
4415                stmmac_pmt(priv, priv->hw, priv->wolopts);
4416                priv->irq_wake = 1;
4417        } else {
4418                stmmac_mac_set(priv, priv->ioaddr, false);
4419                pinctrl_pm_select_sleep_state(priv->device);
4420                /* Disable clock in case of PWM is off */
4421                clk_disable(priv->plat->pclk);
4422                clk_disable(priv->plat->stmmac_clk);
4423        }
4424        mutex_unlock(&priv->lock);
4425
4426        priv->oldlink = false;
4427        priv->speed = SPEED_UNKNOWN;
4428        priv->oldduplex = DUPLEX_UNKNOWN;
4429        return 0;
4430}
4431EXPORT_SYMBOL_GPL(stmmac_suspend);
4432
4433/**
4434 * stmmac_reset_queues_param - reset queue parameters
4435 * @dev: device pointer
4436 */
4437static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4438{
4439        u32 rx_cnt = priv->plat->rx_queues_to_use;
4440        u32 tx_cnt = priv->plat->tx_queues_to_use;
4441        u32 queue;
4442
4443        for (queue = 0; queue < rx_cnt; queue++) {
4444                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4445
4446                rx_q->cur_rx = 0;
4447                rx_q->dirty_rx = 0;
4448        }
4449
4450        for (queue = 0; queue < tx_cnt; queue++) {
4451                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4452
4453                tx_q->cur_tx = 0;
4454                tx_q->dirty_tx = 0;
4455                tx_q->mss = 0;
4456        }
4457}
4458
4459/**
4460 * stmmac_resume - resume callback
4461 * @dev: device pointer
4462 * Description: when resume this function is invoked to setup the DMA and CORE
4463 * in a usable state.
4464 */
4465int stmmac_resume(struct device *dev)
4466{
4467        struct net_device *ndev = dev_get_drvdata(dev);
4468        struct stmmac_priv *priv = netdev_priv(ndev);
4469
4470        if (!netif_running(ndev))
4471                return 0;
4472
4473        /* Power Down bit, into the PM register, is cleared
4474         * automatically as soon as a magic packet or a Wake-up frame
4475         * is received. Anyway, it's better to manually clear
4476         * this bit because it can generate problems while resuming
4477         * from another devices (e.g. serial console).
4478         */
4479        if (device_may_wakeup(priv->device)) {
4480                mutex_lock(&priv->lock);
4481                stmmac_pmt(priv, priv->hw, 0);
4482                mutex_unlock(&priv->lock);
4483                priv->irq_wake = 0;
4484        } else {
4485                pinctrl_pm_select_default_state(priv->device);
4486                /* enable the clk previously disabled */
4487                clk_enable(priv->plat->stmmac_clk);
4488                clk_enable(priv->plat->pclk);
4489                /* reset the phy so that it's ready */
4490                if (priv->mii)
4491                        stmmac_mdio_reset(priv->mii);
4492        }
4493
4494        netif_device_attach(ndev);
4495
4496        mutex_lock(&priv->lock);
4497
4498        stmmac_reset_queues_param(priv);
4499
4500        stmmac_clear_descriptors(priv);
4501
4502        stmmac_hw_setup(ndev, false);
4503        stmmac_init_tx_coalesce(priv);
4504        stmmac_set_rx_mode(ndev);
4505
4506        stmmac_enable_all_queues(priv);
4507
4508        stmmac_start_all_queues(priv);
4509
4510        mutex_unlock(&priv->lock);
4511
4512        if (ndev->phydev)
4513                phy_start(ndev->phydev);
4514
4515        return 0;
4516}
4517EXPORT_SYMBOL_GPL(stmmac_resume);
4518
4519#ifndef MODULE
4520static int __init stmmac_cmdline_opt(char *str)
4521{
4522        char *opt;
4523
4524        if (!str || !*str)
4525                return -EINVAL;
4526        while ((opt = strsep(&str, ",")) != NULL) {
4527                if (!strncmp(opt, "debug:", 6)) {
4528                        if (kstrtoint(opt + 6, 0, &debug))
4529                                goto err;
4530                } else if (!strncmp(opt, "phyaddr:", 8)) {
4531                        if (kstrtoint(opt + 8, 0, &phyaddr))
4532                                goto err;
4533                } else if (!strncmp(opt, "buf_sz:", 7)) {
4534                        if (kstrtoint(opt + 7, 0, &buf_sz))
4535                                goto err;
4536                } else if (!strncmp(opt, "tc:", 3)) {
4537                        if (kstrtoint(opt + 3, 0, &tc))
4538                                goto err;
4539                } else if (!strncmp(opt, "watchdog:", 9)) {
4540                        if (kstrtoint(opt + 9, 0, &watchdog))
4541                                goto err;
4542                } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4543                        if (kstrtoint(opt + 10, 0, &flow_ctrl))
4544                                goto err;
4545                } else if (!strncmp(opt, "pause:", 6)) {
4546                        if (kstrtoint(opt + 6, 0, &pause))
4547                                goto err;
4548                } else if (!strncmp(opt, "eee_timer:", 10)) {
4549                        if (kstrtoint(opt + 10, 0, &eee_timer))
4550                                goto err;
4551                } else if (!strncmp(opt, "chain_mode:", 11)) {
4552                        if (kstrtoint(opt + 11, 0, &chain_mode))
4553                                goto err;
4554                }
4555        }
4556        return 0;
4557
4558err:
4559        pr_err("%s: ERROR broken module parameter conversion", __func__);
4560        return -EINVAL;
4561}
4562
4563__setup("stmmaceth=", stmmac_cmdline_opt);
4564#endif /* MODULE */
4565
4566static int __init stmmac_init(void)
4567{
4568#ifdef CONFIG_DEBUG_FS
4569        /* Create debugfs main directory if it doesn't exist yet */
4570        if (!stmmac_fs_dir) {
4571                stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4572
4573                if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4574                        pr_err("ERROR %s, debugfs create directory failed\n",
4575                               STMMAC_RESOURCE_NAME);
4576
4577                        return -ENOMEM;
4578                }
4579        }
4580#endif
4581
4582        return 0;
4583}
4584
4585static void __exit stmmac_exit(void)
4586{
4587#ifdef CONFIG_DEBUG_FS
4588        debugfs_remove_recursive(stmmac_fs_dir);
4589#endif
4590}
4591
4592module_init(stmmac_init)
4593module_exit(stmmac_exit)
4594
4595MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4596MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4597MODULE_LICENSE("GPL");
4598