linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
<<
>>
Prefs
   1/*******************************************************************************
   2  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
   3  ST Ethernet IPs are built around a Synopsys IP Core.
   4
   5        Copyright(C) 2007-2011 STMicroelectronics Ltd
   6
   7  This program is free software; you can redistribute it and/or modify it
   8  under the terms and conditions of the GNU General Public License,
   9  version 2, as published by the Free Software Foundation.
  10
  11  This program is distributed in the hope it will be useful, but WITHOUT
  12  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14  more details.
  15
  16  The full GNU General Public License is included in this distribution in
  17  the file called "COPYING".
  18
  19  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  20
  21  Documentation available at:
  22        http://www.stlinux.com
  23  Support available at:
  24        https://bugzilla.stlinux.com/
  25*******************************************************************************/
  26
  27#include <linux/clk.h>
  28#include <linux/kernel.h>
  29#include <linux/interrupt.h>
  30#include <linux/ip.h>
  31#include <linux/tcp.h>
  32#include <linux/skbuff.h>
  33#include <linux/ethtool.h>
  34#include <linux/if_ether.h>
  35#include <linux/crc32.h>
  36#include <linux/mii.h>
  37#include <linux/if.h>
  38#include <linux/if_vlan.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/slab.h>
  41#include <linux/prefetch.h>
  42#include <linux/pinctrl/consumer.h>
  43#ifdef CONFIG_DEBUG_FS
  44#include <linux/debugfs.h>
  45#include <linux/seq_file.h>
  46#endif /* CONFIG_DEBUG_FS */
  47#include <linux/net_tstamp.h>
  48#include "stmmac_ptp.h"
  49#include "stmmac.h"
  50#include <linux/reset.h>
  51#include <linux/of_mdio.h>
  52#include "dwmac1000.h"
  53
  54#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
  55#define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
  56
  57/* Module parameters */
  58#define TX_TIMEO        5000
  59static int watchdog = TX_TIMEO;
  60module_param(watchdog, int, S_IRUGO | S_IWUSR);
  61MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
  62
  63static int debug = -1;
  64module_param(debug, int, S_IRUGO | S_IWUSR);
  65MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  66
  67static int phyaddr = -1;
  68module_param(phyaddr, int, S_IRUGO);
  69MODULE_PARM_DESC(phyaddr, "Physical device address");
  70
  71#define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
  72#define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
  73
  74static int flow_ctrl = FLOW_OFF;
  75module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
  76MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
  77
  78static int pause = PAUSE_TIME;
  79module_param(pause, int, S_IRUGO | S_IWUSR);
  80MODULE_PARM_DESC(pause, "Flow Control Pause Time");
  81
  82#define TC_DEFAULT 64
  83static int tc = TC_DEFAULT;
  84module_param(tc, int, S_IRUGO | S_IWUSR);
  85MODULE_PARM_DESC(tc, "DMA threshold control value");
  86
  87#define DEFAULT_BUFSIZE 1536
  88static int buf_sz = DEFAULT_BUFSIZE;
  89module_param(buf_sz, int, S_IRUGO | S_IWUSR);
  90MODULE_PARM_DESC(buf_sz, "DMA buffer size");
  91
  92#define STMMAC_RX_COPYBREAK     256
  93
  94static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  95                                      NETIF_MSG_LINK | NETIF_MSG_IFUP |
  96                                      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
  97
  98#define STMMAC_DEFAULT_LPI_TIMER        1000
  99static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 100module_param(eee_timer, int, S_IRUGO | S_IWUSR);
 101MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
 102#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
 103
 104/* By default the driver will use the ring mode to manage tx and rx descriptors,
 105 * but allow user to force to use the chain instead of the ring
 106 */
 107static unsigned int chain_mode;
 108module_param(chain_mode, int, S_IRUGO);
 109MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
 110
 111static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 112
 113#ifdef CONFIG_DEBUG_FS
 114static int stmmac_init_fs(struct net_device *dev);
 115static void stmmac_exit_fs(struct net_device *dev);
 116#endif
 117
 118#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
 119
 120/**
 121 * stmmac_verify_args - verify the driver parameters.
 122 * Description: it checks the driver parameters and set a default in case of
 123 * errors.
 124 */
 125static void stmmac_verify_args(void)
 126{
 127        if (unlikely(watchdog < 0))
 128                watchdog = TX_TIMEO;
 129        if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
 130                buf_sz = DEFAULT_BUFSIZE;
 131        if (unlikely(flow_ctrl > 1))
 132                flow_ctrl = FLOW_AUTO;
 133        else if (likely(flow_ctrl < 0))
 134                flow_ctrl = FLOW_OFF;
 135        if (unlikely((pause < 0) || (pause > 0xffff)))
 136                pause = PAUSE_TIME;
 137        if (eee_timer < 0)
 138                eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 139}
 140
 141/**
 142 * stmmac_disable_all_queues - Disable all queues
 143 * @priv: driver private structure
 144 */
 145static void stmmac_disable_all_queues(struct stmmac_priv *priv)
 146{
 147        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 148        u32 queue;
 149
 150        for (queue = 0; queue < rx_queues_cnt; queue++) {
 151                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 152
 153                napi_disable(&rx_q->napi);
 154        }
 155}
 156
 157/**
 158 * stmmac_enable_all_queues - Enable all queues
 159 * @priv: driver private structure
 160 */
 161static void stmmac_enable_all_queues(struct stmmac_priv *priv)
 162{
 163        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 164        u32 queue;
 165
 166        for (queue = 0; queue < rx_queues_cnt; queue++) {
 167                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 168
 169                napi_enable(&rx_q->napi);
 170        }
 171}
 172
 173/**
 174 * stmmac_stop_all_queues - Stop all queues
 175 * @priv: driver private structure
 176 */
 177static void stmmac_stop_all_queues(struct stmmac_priv *priv)
 178{
 179        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 180        u32 queue;
 181
 182        for (queue = 0; queue < tx_queues_cnt; queue++)
 183                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
 184}
 185
 186/**
 187 * stmmac_start_all_queues - Start all queues
 188 * @priv: driver private structure
 189 */
 190static void stmmac_start_all_queues(struct stmmac_priv *priv)
 191{
 192        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 193        u32 queue;
 194
 195        for (queue = 0; queue < tx_queues_cnt; queue++)
 196                netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
 197}
 198
 199/**
 200 * stmmac_clk_csr_set - dynamically set the MDC clock
 201 * @priv: driver private structure
 202 * Description: this is to dynamically set the MDC clock according to the csr
 203 * clock input.
 204 * Note:
 205 *      If a specific clk_csr value is passed from the platform
 206 *      this means that the CSR Clock Range selection cannot be
 207 *      changed at run-time and it is fixed (as reported in the driver
 208 *      documentation). Viceversa the driver will try to set the MDC
 209 *      clock dynamically according to the actual clock input.
 210 */
 211static void stmmac_clk_csr_set(struct stmmac_priv *priv)
 212{
 213        u32 clk_rate;
 214
 215        clk_rate = clk_get_rate(priv->plat->stmmac_clk);
 216
 217        /* Platform provided default clk_csr would be assumed valid
 218         * for all other cases except for the below mentioned ones.
 219         * For values higher than the IEEE 802.3 specified frequency
 220         * we can not estimate the proper divider as it is not known
 221         * the frequency of clk_csr_i. So we do not change the default
 222         * divider.
 223         */
 224        if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
 225                if (clk_rate < CSR_F_35M)
 226                        priv->clk_csr = STMMAC_CSR_20_35M;
 227                else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
 228                        priv->clk_csr = STMMAC_CSR_35_60M;
 229                else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
 230                        priv->clk_csr = STMMAC_CSR_60_100M;
 231                else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
 232                        priv->clk_csr = STMMAC_CSR_100_150M;
 233                else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
 234                        priv->clk_csr = STMMAC_CSR_150_250M;
 235                else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
 236                        priv->clk_csr = STMMAC_CSR_250_300M;
 237        }
 238}
 239
 240static void print_pkt(unsigned char *buf, int len)
 241{
 242        pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
 243        print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
 244}
 245
 246static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
 247{
 248        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
 249        u32 avail;
 250
 251        if (tx_q->dirty_tx > tx_q->cur_tx)
 252                avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
 253        else
 254                avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
 255
 256        return avail;
 257}
 258
 259/**
 260 * stmmac_rx_dirty - Get RX queue dirty
 261 * @priv: driver private structure
 262 * @queue: RX queue index
 263 */
 264static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
 265{
 266        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 267        u32 dirty;
 268
 269        if (rx_q->dirty_rx <= rx_q->cur_rx)
 270                dirty = rx_q->cur_rx - rx_q->dirty_rx;
 271        else
 272                dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
 273
 274        return dirty;
 275}
 276
 277/**
 278 * stmmac_hw_fix_mac_speed - callback for speed selection
 279 * @priv: driver private structure
 280 * Description: on some platforms (e.g. ST), some HW system configuration
 281 * registers have to be set according to the link speed negotiated.
 282 */
 283static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
 284{
 285        struct net_device *ndev = priv->dev;
 286        struct phy_device *phydev = ndev->phydev;
 287
 288        if (likely(priv->plat->fix_mac_speed))
 289                priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
 290}
 291
 292/**
 293 * stmmac_enable_eee_mode - check and enter in LPI mode
 294 * @priv: driver private structure
 295 * Description: this function is to verify and enter in LPI mode in case of
 296 * EEE.
 297 */
 298static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
 299{
 300        u32 tx_cnt = priv->plat->tx_queues_to_use;
 301        u32 queue;
 302
 303        /* check if all TX queues have the work finished */
 304        for (queue = 0; queue < tx_cnt; queue++) {
 305                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
 306
 307                if (tx_q->dirty_tx != tx_q->cur_tx)
 308                        return; /* still unfinished work */
 309        }
 310
 311        /* Check and enter in LPI mode */
 312        if (!priv->tx_path_in_lpi_mode)
 313                priv->hw->mac->set_eee_mode(priv->hw,
 314                                            priv->plat->en_tx_lpi_clockgating);
 315}
 316
 317/**
 318 * stmmac_disable_eee_mode - disable and exit from LPI mode
 319 * @priv: driver private structure
 320 * Description: this function is to exit and disable EEE in case of
 321 * LPI state is true. This is called by the xmit.
 322 */
 323void stmmac_disable_eee_mode(struct stmmac_priv *priv)
 324{
 325        priv->hw->mac->reset_eee_mode(priv->hw);
 326        del_timer_sync(&priv->eee_ctrl_timer);
 327        priv->tx_path_in_lpi_mode = false;
 328}
 329
 330/**
 331 * stmmac_eee_ctrl_timer - EEE TX SW timer.
 332 * @arg : data hook
 333 * Description:
 334 *  if there is no data transfer and if we are not in LPI state,
 335 *  then MAC Transmitter can be moved to LPI state.
 336 */
 337static void stmmac_eee_ctrl_timer(unsigned long arg)
 338{
 339        struct stmmac_priv *priv = (struct stmmac_priv *)arg;
 340
 341        stmmac_enable_eee_mode(priv);
 342        mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
 343}
 344
 345/**
 346 * stmmac_eee_init - init EEE
 347 * @priv: driver private structure
 348 * Description:
 349 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
 350 *  can also manage EEE, this function enable the LPI state and start related
 351 *  timer.
 352 */
 353bool stmmac_eee_init(struct stmmac_priv *priv)
 354{
 355        struct net_device *ndev = priv->dev;
 356        unsigned long flags;
 357        bool ret = false;
 358
 359        /* Using PCS we cannot dial with the phy registers at this stage
 360         * so we do not support extra feature like EEE.
 361         */
 362        if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
 363            (priv->hw->pcs == STMMAC_PCS_TBI) ||
 364            (priv->hw->pcs == STMMAC_PCS_RTBI))
 365                goto out;
 366
 367        /* MAC core supports the EEE feature. */
 368        if (priv->dma_cap.eee) {
 369                int tx_lpi_timer = priv->tx_lpi_timer;
 370
 371                /* Check if the PHY supports EEE */
 372                if (phy_init_eee(ndev->phydev, 1)) {
 373                        /* To manage at run-time if the EEE cannot be supported
 374                         * anymore (for example because the lp caps have been
 375                         * changed).
 376                         * In that case the driver disable own timers.
 377                         */
 378                        spin_lock_irqsave(&priv->lock, flags);
 379                        if (priv->eee_active) {
 380                                netdev_dbg(priv->dev, "disable EEE\n");
 381                                del_timer_sync(&priv->eee_ctrl_timer);
 382                                priv->hw->mac->set_eee_timer(priv->hw, 0,
 383                                                             tx_lpi_timer);
 384                        }
 385                        priv->eee_active = 0;
 386                        spin_unlock_irqrestore(&priv->lock, flags);
 387                        goto out;
 388                }
 389                /* Activate the EEE and start timers */
 390                spin_lock_irqsave(&priv->lock, flags);
 391                if (!priv->eee_active) {
 392                        priv->eee_active = 1;
 393                        setup_timer(&priv->eee_ctrl_timer,
 394                                    stmmac_eee_ctrl_timer,
 395                                    (unsigned long)priv);
 396                        mod_timer(&priv->eee_ctrl_timer,
 397                                  STMMAC_LPI_T(eee_timer));
 398
 399                        priv->hw->mac->set_eee_timer(priv->hw,
 400                                                     STMMAC_DEFAULT_LIT_LS,
 401                                                     tx_lpi_timer);
 402                }
 403                /* Set HW EEE according to the speed */
 404                priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
 405
 406                ret = true;
 407                spin_unlock_irqrestore(&priv->lock, flags);
 408
 409                netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
 410        }
 411out:
 412        return ret;
 413}
 414
 415/* stmmac_get_tx_hwtstamp - get HW TX timestamps
 416 * @priv: driver private structure
 417 * @p : descriptor pointer
 418 * @skb : the socket buffer
 419 * Description :
 420 * This function will read timestamp from the descriptor & pass it to stack.
 421 * and also perform some sanity checks.
 422 */
 423static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
 424                                   struct dma_desc *p, struct sk_buff *skb)
 425{
 426        struct skb_shared_hwtstamps shhwtstamp;
 427        u64 ns;
 428
 429        if (!priv->hwts_tx_en)
 430                return;
 431
 432        /* exit if skb doesn't support hw tstamp */
 433        if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
 434                return;
 435
 436        /* check tx tstamp status */
 437        if (priv->hw->desc->get_tx_timestamp_status(p)) {
 438                /* get the valid tstamp */
 439                ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
 440
 441                memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 442                shhwtstamp.hwtstamp = ns_to_ktime(ns);
 443
 444                netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
 445                /* pass tstamp to stack */
 446                skb_tstamp_tx(skb, &shhwtstamp);
 447        }
 448
 449        return;
 450}
 451
 452/* stmmac_get_rx_hwtstamp - get HW RX timestamps
 453 * @priv: driver private structure
 454 * @p : descriptor pointer
 455 * @np : next descriptor pointer
 456 * @skb : the socket buffer
 457 * Description :
 458 * This function will read received packet's timestamp from the descriptor
 459 * and pass it to stack. It also perform some sanity checks.
 460 */
 461static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
 462                                   struct dma_desc *np, struct sk_buff *skb)
 463{
 464        struct skb_shared_hwtstamps *shhwtstamp = NULL;
 465        u64 ns;
 466
 467        if (!priv->hwts_rx_en)
 468                return;
 469
 470        /* Check if timestamp is available */
 471        if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
 472                /* For GMAC4, the valid timestamp is from CTX next desc. */
 473                if (priv->plat->has_gmac4)
 474                        ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
 475                else
 476                        ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
 477
 478                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
 479                shhwtstamp = skb_hwtstamps(skb);
 480                memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 481                shhwtstamp->hwtstamp = ns_to_ktime(ns);
 482        } else  {
 483                netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
 484        }
 485}
 486
 487/**
 488 *  stmmac_hwtstamp_ioctl - control hardware timestamping.
 489 *  @dev: device pointer.
 490 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 491 *  a proprietary structure used to pass information to the driver.
 492 *  Description:
 493 *  This function configures the MAC to enable/disable both outgoing(TX)
 494 *  and incoming(RX) packets time stamping based on user input.
 495 *  Return Value:
 496 *  0 on success and an appropriate -ve integer on failure.
 497 */
 498static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 499{
 500        struct stmmac_priv *priv = netdev_priv(dev);
 501        struct hwtstamp_config config;
 502        struct timespec64 now;
 503        u64 temp = 0;
 504        u32 ptp_v2 = 0;
 505        u32 tstamp_all = 0;
 506        u32 ptp_over_ipv4_udp = 0;
 507        u32 ptp_over_ipv6_udp = 0;
 508        u32 ptp_over_ethernet = 0;
 509        u32 snap_type_sel = 0;
 510        u32 ts_master_en = 0;
 511        u32 ts_event_en = 0;
 512        u32 value = 0;
 513        u32 sec_inc;
 514
 515        if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
 516                netdev_alert(priv->dev, "No support for HW time stamping\n");
 517                priv->hwts_tx_en = 0;
 518                priv->hwts_rx_en = 0;
 519
 520                return -EOPNOTSUPP;
 521        }
 522
 523        if (copy_from_user(&config, ifr->ifr_data,
 524                           sizeof(struct hwtstamp_config)))
 525                return -EFAULT;
 526
 527        netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
 528                   __func__, config.flags, config.tx_type, config.rx_filter);
 529
 530        /* reserved for future extensions */
 531        if (config.flags)
 532                return -EINVAL;
 533
 534        if (config.tx_type != HWTSTAMP_TX_OFF &&
 535            config.tx_type != HWTSTAMP_TX_ON)
 536                return -ERANGE;
 537
 538        if (priv->adv_ts) {
 539                switch (config.rx_filter) {
 540                case HWTSTAMP_FILTER_NONE:
 541                        /* time stamp no incoming packet at all */
 542                        config.rx_filter = HWTSTAMP_FILTER_NONE;
 543                        break;
 544
 545                case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 546                        /* PTP v1, UDP, any kind of event packet */
 547                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 548                        /* take time stamp for all event messages */
 549                        if (priv->plat->has_gmac4)
 550                                snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
 551                        else
 552                                snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 553
 554                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 555                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 556                        break;
 557
 558                case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 559                        /* PTP v1, UDP, Sync packet */
 560                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
 561                        /* take time stamp for SYNC messages only */
 562                        ts_event_en = PTP_TCR_TSEVNTENA;
 563
 564                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 565                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 566                        break;
 567
 568                case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 569                        /* PTP v1, UDP, Delay_req packet */
 570                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
 571                        /* take time stamp for Delay_Req messages only */
 572                        ts_master_en = PTP_TCR_TSMSTRENA;
 573                        ts_event_en = PTP_TCR_TSEVNTENA;
 574
 575                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 576                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 577                        break;
 578
 579                case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 580                        /* PTP v2, UDP, any kind of event packet */
 581                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
 582                        ptp_v2 = PTP_TCR_TSVER2ENA;
 583                        /* take time stamp for all event messages */
 584                        if (priv->plat->has_gmac4)
 585                                snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
 586                        else
 587                                snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 588
 589                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 590                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 591                        break;
 592
 593                case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 594                        /* PTP v2, UDP, Sync packet */
 595                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
 596                        ptp_v2 = PTP_TCR_TSVER2ENA;
 597                        /* take time stamp for SYNC messages only */
 598                        ts_event_en = PTP_TCR_TSEVNTENA;
 599
 600                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 601                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 602                        break;
 603
 604                case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 605                        /* PTP v2, UDP, Delay_req packet */
 606                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
 607                        ptp_v2 = PTP_TCR_TSVER2ENA;
 608                        /* take time stamp for Delay_Req messages only */
 609                        ts_master_en = PTP_TCR_TSMSTRENA;
 610                        ts_event_en = PTP_TCR_TSEVNTENA;
 611
 612                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 613                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 614                        break;
 615
 616                case HWTSTAMP_FILTER_PTP_V2_EVENT:
 617                        /* PTP v2/802.AS1 any layer, any kind of event packet */
 618                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 619                        ptp_v2 = PTP_TCR_TSVER2ENA;
 620                        /* take time stamp for all event messages */
 621                        if (priv->plat->has_gmac4)
 622                                snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
 623                        else
 624                                snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 625
 626                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 627                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 628                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 629                        break;
 630
 631                case HWTSTAMP_FILTER_PTP_V2_SYNC:
 632                        /* PTP v2/802.AS1, any layer, Sync packet */
 633                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
 634                        ptp_v2 = PTP_TCR_TSVER2ENA;
 635                        /* take time stamp for SYNC messages only */
 636                        ts_event_en = PTP_TCR_TSEVNTENA;
 637
 638                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 639                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 640                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 641                        break;
 642
 643                case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 644                        /* PTP v2/802.AS1, any layer, Delay_req packet */
 645                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
 646                        ptp_v2 = PTP_TCR_TSVER2ENA;
 647                        /* take time stamp for Delay_Req messages only */
 648                        ts_master_en = PTP_TCR_TSMSTRENA;
 649                        ts_event_en = PTP_TCR_TSEVNTENA;
 650
 651                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 652                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 653                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 654                        break;
 655
 656                case HWTSTAMP_FILTER_ALL:
 657                        /* time stamp any incoming packet */
 658                        config.rx_filter = HWTSTAMP_FILTER_ALL;
 659                        tstamp_all = PTP_TCR_TSENALL;
 660                        break;
 661
 662                default:
 663                        return -ERANGE;
 664                }
 665        } else {
 666                switch (config.rx_filter) {
 667                case HWTSTAMP_FILTER_NONE:
 668                        config.rx_filter = HWTSTAMP_FILTER_NONE;
 669                        break;
 670                default:
 671                        /* PTP v1, UDP, any kind of event packet */
 672                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 673                        break;
 674                }
 675        }
 676        priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
 677        priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
 678
 679        if (!priv->hwts_tx_en && !priv->hwts_rx_en)
 680                priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
 681        else {
 682                value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
 683                         tstamp_all | ptp_v2 | ptp_over_ethernet |
 684                         ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
 685                         ts_master_en | snap_type_sel);
 686                priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
 687
 688                /* program Sub Second Increment reg */
 689                sec_inc = priv->hw->ptp->config_sub_second_increment(
 690                        priv->ptpaddr, priv->plat->clk_ptp_rate,
 691                        priv->plat->has_gmac4);
 692                temp = div_u64(1000000000ULL, sec_inc);
 693
 694                /* calculate default added value:
 695                 * formula is :
 696                 * addend = (2^32)/freq_div_ratio;
 697                 * where, freq_div_ratio = 1e9ns/sec_inc
 698                 */
 699                temp = (u64)(temp << 32);
 700                priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
 701                priv->hw->ptp->config_addend(priv->ptpaddr,
 702                                             priv->default_addend);
 703
 704                /* initialize system time */
 705                ktime_get_real_ts64(&now);
 706
 707                /* lower 32 bits of tv_sec are safe until y2106 */
 708                priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
 709                                            now.tv_nsec);
 710        }
 711
 712        return copy_to_user(ifr->ifr_data, &config,
 713                            sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
 714}
 715
 716/**
 717 * stmmac_init_ptp - init PTP
 718 * @priv: driver private structure
 719 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
 720 * This is done by looking at the HW cap. register.
 721 * This function also registers the ptp driver.
 722 */
 723static int stmmac_init_ptp(struct stmmac_priv *priv)
 724{
 725        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
 726                return -EOPNOTSUPP;
 727
 728        priv->adv_ts = 0;
 729        /* Check if adv_ts can be enabled for dwmac 4.x core */
 730        if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
 731                priv->adv_ts = 1;
 732        /* Dwmac 3.x core with extend_desc can support adv_ts */
 733        else if (priv->extend_desc && priv->dma_cap.atime_stamp)
 734                priv->adv_ts = 1;
 735
 736        if (priv->dma_cap.time_stamp)
 737                netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
 738
 739        if (priv->adv_ts)
 740                netdev_info(priv->dev,
 741                            "IEEE 1588-2008 Advanced Timestamp supported\n");
 742
 743        priv->hw->ptp = &stmmac_ptp;
 744        priv->hwts_tx_en = 0;
 745        priv->hwts_rx_en = 0;
 746
 747        stmmac_ptp_register(priv);
 748
 749        return 0;
 750}
 751
 752static void stmmac_release_ptp(struct stmmac_priv *priv)
 753{
 754        if (priv->plat->clk_ptp_ref)
 755                clk_disable_unprepare(priv->plat->clk_ptp_ref);
 756        stmmac_ptp_unregister(priv);
 757}
 758
 759/**
 760 *  stmmac_mac_flow_ctrl - Configure flow control in all queues
 761 *  @priv: driver private structure
 762 *  Description: It is used for configuring the flow control in all queues
 763 */
 764static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
 765{
 766        u32 tx_cnt = priv->plat->tx_queues_to_use;
 767
 768        priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
 769                                 priv->pause, tx_cnt);
 770}
 771
 772/**
 773 * stmmac_adjust_link - adjusts the link parameters
 774 * @dev: net device structure
 775 * Description: this is the helper called by the physical abstraction layer
 776 * drivers to communicate the phy link status. According the speed and duplex
 777 * this driver can invoke registered glue-logic as well.
 778 * It also invoke the eee initialization because it could happen when switch
 779 * on different networks (that are eee capable).
 780 */
 781static void stmmac_adjust_link(struct net_device *dev)
 782{
 783        struct stmmac_priv *priv = netdev_priv(dev);
 784        struct phy_device *phydev = dev->phydev;
 785        unsigned long flags;
 786        int new_state = 0;
 787
 788        if (!phydev)
 789                return;
 790
 791        spin_lock_irqsave(&priv->lock, flags);
 792
 793        if (phydev->link) {
 794                u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
 795
 796                /* Now we make sure that we can be in full duplex mode.
 797                 * If not, we operate in half-duplex mode. */
 798                if (phydev->duplex != priv->oldduplex) {
 799                        new_state = 1;
 800                        if (!(phydev->duplex))
 801                                ctrl &= ~priv->hw->link.duplex;
 802                        else
 803                                ctrl |= priv->hw->link.duplex;
 804                        priv->oldduplex = phydev->duplex;
 805                }
 806                /* Flow Control operation */
 807                if (phydev->pause)
 808                        stmmac_mac_flow_ctrl(priv, phydev->duplex);
 809
 810                if (phydev->speed != priv->speed) {
 811                        new_state = 1;
 812                        switch (phydev->speed) {
 813                        case 1000:
 814                                if (priv->plat->has_gmac ||
 815                                    priv->plat->has_gmac4)
 816                                        ctrl &= ~priv->hw->link.port;
 817                                break;
 818                        case 100:
 819                                if (priv->plat->has_gmac ||
 820                                    priv->plat->has_gmac4) {
 821                                        ctrl |= priv->hw->link.port;
 822                                        ctrl |= priv->hw->link.speed;
 823                                } else {
 824                                        ctrl &= ~priv->hw->link.port;
 825                                }
 826                                break;
 827                        case 10:
 828                                if (priv->plat->has_gmac ||
 829                                    priv->plat->has_gmac4) {
 830                                        ctrl |= priv->hw->link.port;
 831                                        ctrl &= ~(priv->hw->link.speed);
 832                                } else {
 833                                        ctrl &= ~priv->hw->link.port;
 834                                }
 835                                break;
 836                        default:
 837                                netif_warn(priv, link, priv->dev,
 838                                           "broken speed: %d\n", phydev->speed);
 839                                phydev->speed = SPEED_UNKNOWN;
 840                                break;
 841                        }
 842                        if (phydev->speed != SPEED_UNKNOWN)
 843                                stmmac_hw_fix_mac_speed(priv);
 844                        priv->speed = phydev->speed;
 845                }
 846
 847                writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
 848
 849                if (!priv->oldlink) {
 850                        new_state = 1;
 851                        priv->oldlink = 1;
 852                }
 853        } else if (priv->oldlink) {
 854                new_state = 1;
 855                priv->oldlink = 0;
 856                priv->speed = SPEED_UNKNOWN;
 857                priv->oldduplex = DUPLEX_UNKNOWN;
 858        }
 859
 860        if (new_state && netif_msg_link(priv))
 861                phy_print_status(phydev);
 862
 863        spin_unlock_irqrestore(&priv->lock, flags);
 864
 865        if (phydev->is_pseudo_fixed_link)
 866                /* Stop PHY layer to call the hook to adjust the link in case
 867                 * of a switch is attached to the stmmac driver.
 868                 */
 869                phydev->irq = PHY_IGNORE_INTERRUPT;
 870        else
 871                /* At this stage, init the EEE if supported.
 872                 * Never called in case of fixed_link.
 873                 */
 874                priv->eee_enabled = stmmac_eee_init(priv);
 875}
 876
 877/**
 878 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
 879 * @priv: driver private structure
 880 * Description: this is to verify if the HW supports the PCS.
 881 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
 882 * configured for the TBI, RTBI, or SGMII PHY interface.
 883 */
 884static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
 885{
 886        int interface = priv->plat->interface;
 887
 888        if (priv->dma_cap.pcs) {
 889                if ((interface == PHY_INTERFACE_MODE_RGMII) ||
 890                    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
 891                    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
 892                    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
 893                        netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
 894                        priv->hw->pcs = STMMAC_PCS_RGMII;
 895                } else if (interface == PHY_INTERFACE_MODE_SGMII) {
 896                        netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
 897                        priv->hw->pcs = STMMAC_PCS_SGMII;
 898                }
 899        }
 900}
 901
 902/**
 903 * stmmac_init_phy - PHY initialization
 904 * @dev: net device structure
 905 * Description: it initializes the driver's PHY state, and attaches the PHY
 906 * to the mac driver.
 907 *  Return value:
 908 *  0 on success
 909 */
 910static int stmmac_init_phy(struct net_device *dev)
 911{
 912        struct stmmac_priv *priv = netdev_priv(dev);
 913        struct phy_device *phydev;
 914        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
 915        char bus_id[MII_BUS_ID_SIZE];
 916        int interface = priv->plat->interface;
 917        int max_speed = priv->plat->max_speed;
 918        priv->oldlink = 0;
 919        priv->speed = SPEED_UNKNOWN;
 920        priv->oldduplex = DUPLEX_UNKNOWN;
 921
 922        if (priv->plat->phy_node) {
 923                phydev = of_phy_connect(dev, priv->plat->phy_node,
 924                                        &stmmac_adjust_link, 0, interface);
 925        } else {
 926                snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
 927                         priv->plat->bus_id);
 928
 929                snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
 930                         priv->plat->phy_addr);
 931                netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
 932                           phy_id_fmt);
 933
 934                phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
 935                                     interface);
 936        }
 937
 938        if (IS_ERR_OR_NULL(phydev)) {
 939                netdev_err(priv->dev, "Could not attach to PHY\n");
 940                if (!phydev)
 941                        return -ENODEV;
 942
 943                return PTR_ERR(phydev);
 944        }
 945
 946        /* Stop Advertising 1000BASE Capability if interface is not GMII */
 947        if ((interface == PHY_INTERFACE_MODE_MII) ||
 948            (interface == PHY_INTERFACE_MODE_RMII) ||
 949                (max_speed < 1000 && max_speed > 0))
 950                phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
 951                                         SUPPORTED_1000baseT_Full);
 952
 953        /*
 954         * Broken HW is sometimes missing the pull-up resistor on the
 955         * MDIO line, which results in reads to non-existent devices returning
 956         * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
 957         * device as well.
 958         * Note: phydev->phy_id is the result of reading the UID PHY registers.
 959         */
 960        if (!priv->plat->phy_node && phydev->phy_id == 0) {
 961                phy_disconnect(phydev);
 962                return -ENODEV;
 963        }
 964
 965        /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
 966         * subsequent PHY polling, make sure we force a link transition if
 967         * we have a UP/DOWN/UP transition
 968         */
 969        if (phydev->is_pseudo_fixed_link)
 970                phydev->irq = PHY_POLL;
 971
 972        phy_attached_info(phydev);
 973        return 0;
 974}
 975
 976static void stmmac_display_rx_rings(struct stmmac_priv *priv)
 977{
 978        u32 rx_cnt = priv->plat->rx_queues_to_use;
 979        void *head_rx;
 980        u32 queue;
 981
 982        /* Display RX rings */
 983        for (queue = 0; queue < rx_cnt; queue++) {
 984                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 985
 986                pr_info("\tRX Queue %u rings\n", queue);
 987
 988                if (priv->extend_desc)
 989                        head_rx = (void *)rx_q->dma_erx;
 990                else
 991                        head_rx = (void *)rx_q->dma_rx;
 992
 993                /* Display RX ring */
 994                priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
 995        }
 996}
 997
 998static void stmmac_display_tx_rings(struct stmmac_priv *priv)
 999{
1000        u32 tx_cnt = priv->plat->tx_queues_to_use;
1001        void *head_tx;
1002        u32 queue;
1003
1004        /* Display TX rings */
1005        for (queue = 0; queue < tx_cnt; queue++) {
1006                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1007
1008                pr_info("\tTX Queue %d rings\n", queue);
1009
1010                if (priv->extend_desc)
1011                        head_tx = (void *)tx_q->dma_etx;
1012                else
1013                        head_tx = (void *)tx_q->dma_tx;
1014
1015                priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1016        }
1017}
1018
1019static void stmmac_display_rings(struct stmmac_priv *priv)
1020{
1021        /* Display RX ring */
1022        stmmac_display_rx_rings(priv);
1023
1024        /* Display TX ring */
1025        stmmac_display_tx_rings(priv);
1026}
1027
1028static int stmmac_set_bfsize(int mtu, int bufsize)
1029{
1030        int ret = bufsize;
1031
1032        if (mtu >= BUF_SIZE_4KiB)
1033                ret = BUF_SIZE_8KiB;
1034        else if (mtu >= BUF_SIZE_2KiB)
1035                ret = BUF_SIZE_4KiB;
1036        else if (mtu > DEFAULT_BUFSIZE)
1037                ret = BUF_SIZE_2KiB;
1038        else
1039                ret = DEFAULT_BUFSIZE;
1040
1041        return ret;
1042}
1043
1044/**
1045 * stmmac_clear_rx_descriptors - clear RX descriptors
1046 * @priv: driver private structure
1047 * @queue: RX queue index
1048 * Description: this function is called to clear the RX descriptors
1049 * in case of both basic and extended descriptors are used.
1050 */
1051static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1052{
1053        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1054        int i;
1055
1056        /* Clear the RX descriptors */
1057        for (i = 0; i < DMA_RX_SIZE; i++)
1058                if (priv->extend_desc)
1059                        priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1060                                                     priv->use_riwt, priv->mode,
1061                                                     (i == DMA_RX_SIZE - 1));
1062                else
1063                        priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1064                                                     priv->use_riwt, priv->mode,
1065                                                     (i == DMA_RX_SIZE - 1));
1066}
1067
1068/**
1069 * stmmac_clear_tx_descriptors - clear tx descriptors
1070 * @priv: driver private structure
1071 * @queue: TX queue index.
1072 * Description: this function is called to clear the TX descriptors
1073 * in case of both basic and extended descriptors are used.
1074 */
1075static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1076{
1077        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1078        int i;
1079
1080        /* Clear the TX descriptors */
1081        for (i = 0; i < DMA_TX_SIZE; i++)
1082                if (priv->extend_desc)
1083                        priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1084                                                     priv->mode,
1085                                                     (i == DMA_TX_SIZE - 1));
1086                else
1087                        priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1088                                                     priv->mode,
1089                                                     (i == DMA_TX_SIZE - 1));
1090}
1091
1092/**
1093 * stmmac_clear_descriptors - clear descriptors
1094 * @priv: driver private structure
1095 * Description: this function is called to clear the TX and RX descriptors
1096 * in case of both basic and extended descriptors are used.
1097 */
1098static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1099{
1100        u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1101        u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1102        u32 queue;
1103
1104        /* Clear the RX descriptors */
1105        for (queue = 0; queue < rx_queue_cnt; queue++)
1106                stmmac_clear_rx_descriptors(priv, queue);
1107
1108        /* Clear the TX descriptors */
1109        for (queue = 0; queue < tx_queue_cnt; queue++)
1110                stmmac_clear_tx_descriptors(priv, queue);
1111}
1112
1113/**
1114 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1115 * @priv: driver private structure
1116 * @p: descriptor pointer
1117 * @i: descriptor index
1118 * @flags: gfp flag
1119 * @queue: RX queue index
1120 * Description: this function is called to allocate a receive buffer, perform
1121 * the DMA mapping and init the descriptor.
1122 */
1123static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1124                                  int i, gfp_t flags, u32 queue)
1125{
1126        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1127        struct sk_buff *skb;
1128
1129        skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1130        if (!skb) {
1131                netdev_err(priv->dev,
1132                           "%s: Rx init fails; skb is NULL\n", __func__);
1133                return -ENOMEM;
1134        }
1135        rx_q->rx_skbuff[i] = skb;
1136        rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1137                                                priv->dma_buf_sz,
1138                                                DMA_FROM_DEVICE);
1139        if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1140                netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1141                dev_kfree_skb_any(skb);
1142                return -EINVAL;
1143        }
1144
1145        if (priv->synopsys_id >= DWMAC_CORE_4_00)
1146                p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1147        else
1148                p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1149
1150        if ((priv->hw->mode->init_desc3) &&
1151            (priv->dma_buf_sz == BUF_SIZE_16KiB))
1152                priv->hw->mode->init_desc3(p);
1153
1154        return 0;
1155}
1156
1157/**
1158 * stmmac_free_rx_buffer - free RX dma buffers
1159 * @priv: private structure
1160 * @queue: RX queue index
1161 * @i: buffer index.
1162 */
1163static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1164{
1165        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1166
1167        if (rx_q->rx_skbuff[i]) {
1168                dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1169                                 priv->dma_buf_sz, DMA_FROM_DEVICE);
1170                dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1171        }
1172        rx_q->rx_skbuff[i] = NULL;
1173}
1174
1175/**
1176 * stmmac_free_tx_buffer - free RX dma buffers
1177 * @priv: private structure
1178 * @queue: RX queue index
1179 * @i: buffer index.
1180 */
1181static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1182{
1183        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1184
1185        if (tx_q->tx_skbuff_dma[i].buf) {
1186                if (tx_q->tx_skbuff_dma[i].map_as_page)
1187                        dma_unmap_page(priv->device,
1188                                       tx_q->tx_skbuff_dma[i].buf,
1189                                       tx_q->tx_skbuff_dma[i].len,
1190                                       DMA_TO_DEVICE);
1191                else
1192                        dma_unmap_single(priv->device,
1193                                         tx_q->tx_skbuff_dma[i].buf,
1194                                         tx_q->tx_skbuff_dma[i].len,
1195                                         DMA_TO_DEVICE);
1196        }
1197
1198        if (tx_q->tx_skbuff[i]) {
1199                dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1200                tx_q->tx_skbuff[i] = NULL;
1201                tx_q->tx_skbuff_dma[i].buf = 0;
1202                tx_q->tx_skbuff_dma[i].map_as_page = false;
1203        }
1204}
1205
1206/**
1207 * init_dma_rx_desc_rings - init the RX descriptor rings
1208 * @dev: net device structure
1209 * @flags: gfp flag.
1210 * Description: this function initializes the DMA RX descriptors
1211 * and allocates the socket buffers. It supports the chained and ring
1212 * modes.
1213 */
1214static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1215{
1216        struct stmmac_priv *priv = netdev_priv(dev);
1217        u32 rx_count = priv->plat->rx_queues_to_use;
1218        unsigned int bfsize = 0;
1219        int ret = -ENOMEM;
1220        int queue;
1221        int i;
1222
1223        if (priv->hw->mode->set_16kib_bfsize)
1224                bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1225
1226        if (bfsize < BUF_SIZE_16KiB)
1227                bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1228
1229        priv->dma_buf_sz = bfsize;
1230
1231        /* RX INITIALIZATION */
1232        netif_dbg(priv, probe, priv->dev,
1233                  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1234
1235        for (queue = 0; queue < rx_count; queue++) {
1236                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1237
1238                netif_dbg(priv, probe, priv->dev,
1239                          "(%s) dma_rx_phy=0x%08x\n", __func__,
1240                          (u32)rx_q->dma_rx_phy);
1241
1242                for (i = 0; i < DMA_RX_SIZE; i++) {
1243                        struct dma_desc *p;
1244
1245                        if (priv->extend_desc)
1246                                p = &((rx_q->dma_erx + i)->basic);
1247                        else
1248                                p = rx_q->dma_rx + i;
1249
1250                        ret = stmmac_init_rx_buffers(priv, p, i, flags,
1251                                                     queue);
1252                        if (ret)
1253                                goto err_init_rx_buffers;
1254
1255                        netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1256                                  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1257                                  (unsigned int)rx_q->rx_skbuff_dma[i]);
1258                }
1259
1260                rx_q->cur_rx = 0;
1261                rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1262
1263                stmmac_clear_rx_descriptors(priv, queue);
1264
1265                /* Setup the chained descriptor addresses */
1266                if (priv->mode == STMMAC_CHAIN_MODE) {
1267                        if (priv->extend_desc)
1268                                priv->hw->mode->init(rx_q->dma_erx,
1269                                                     rx_q->dma_rx_phy,
1270                                                     DMA_RX_SIZE, 1);
1271                        else
1272                                priv->hw->mode->init(rx_q->dma_rx,
1273                                                     rx_q->dma_rx_phy,
1274                                                     DMA_RX_SIZE, 0);
1275                }
1276        }
1277
1278        buf_sz = bfsize;
1279
1280        return 0;
1281
1282err_init_rx_buffers:
1283        while (queue >= 0) {
1284                while (--i >= 0)
1285                        stmmac_free_rx_buffer(priv, queue, i);
1286
1287                if (queue == 0)
1288                        break;
1289
1290                i = DMA_RX_SIZE;
1291                queue--;
1292        }
1293
1294        return ret;
1295}
1296
1297/**
1298 * init_dma_tx_desc_rings - init the TX descriptor rings
1299 * @dev: net device structure.
1300 * Description: this function initializes the DMA TX descriptors
1301 * and allocates the socket buffers. It supports the chained and ring
1302 * modes.
1303 */
1304static int init_dma_tx_desc_rings(struct net_device *dev)
1305{
1306        struct stmmac_priv *priv = netdev_priv(dev);
1307        u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1308        u32 queue;
1309        int i;
1310
1311        for (queue = 0; queue < tx_queue_cnt; queue++) {
1312                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1313
1314                netif_dbg(priv, probe, priv->dev,
1315                          "(%s) dma_tx_phy=0x%08x\n", __func__,
1316                         (u32)tx_q->dma_tx_phy);
1317
1318                /* Setup the chained descriptor addresses */
1319                if (priv->mode == STMMAC_CHAIN_MODE) {
1320                        if (priv->extend_desc)
1321                                priv->hw->mode->init(tx_q->dma_etx,
1322                                                     tx_q->dma_tx_phy,
1323                                                     DMA_TX_SIZE, 1);
1324                        else
1325                                priv->hw->mode->init(tx_q->dma_tx,
1326                                                     tx_q->dma_tx_phy,
1327                                                     DMA_TX_SIZE, 0);
1328                }
1329
1330                for (i = 0; i < DMA_TX_SIZE; i++) {
1331                        struct dma_desc *p;
1332                        if (priv->extend_desc)
1333                                p = &((tx_q->dma_etx + i)->basic);
1334                        else
1335                                p = tx_q->dma_tx + i;
1336
1337                        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1338                                p->des0 = 0;
1339                                p->des1 = 0;
1340                                p->des2 = 0;
1341                                p->des3 = 0;
1342                        } else {
1343                                p->des2 = 0;
1344                        }
1345
1346                        tx_q->tx_skbuff_dma[i].buf = 0;
1347                        tx_q->tx_skbuff_dma[i].map_as_page = false;
1348                        tx_q->tx_skbuff_dma[i].len = 0;
1349                        tx_q->tx_skbuff_dma[i].last_segment = false;
1350                        tx_q->tx_skbuff[i] = NULL;
1351                }
1352
1353                tx_q->dirty_tx = 0;
1354                tx_q->cur_tx = 0;
1355
1356                netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1357        }
1358
1359        return 0;
1360}
1361
1362/**
1363 * init_dma_desc_rings - init the RX/TX descriptor rings
1364 * @dev: net device structure
1365 * @flags: gfp flag.
1366 * Description: this function initializes the DMA RX/TX descriptors
1367 * and allocates the socket buffers. It supports the chained and ring
1368 * modes.
1369 */
1370static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1371{
1372        struct stmmac_priv *priv = netdev_priv(dev);
1373        int ret;
1374
1375        ret = init_dma_rx_desc_rings(dev, flags);
1376        if (ret)
1377                return ret;
1378
1379        ret = init_dma_tx_desc_rings(dev);
1380
1381        stmmac_clear_descriptors(priv);
1382
1383        if (netif_msg_hw(priv))
1384                stmmac_display_rings(priv);
1385
1386        return ret;
1387}
1388
1389/**
1390 * dma_free_rx_skbufs - free RX dma buffers
1391 * @priv: private structure
1392 * @queue: RX queue index
1393 */
1394static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1395{
1396        int i;
1397
1398        for (i = 0; i < DMA_RX_SIZE; i++)
1399                stmmac_free_rx_buffer(priv, queue, i);
1400}
1401
1402/**
1403 * dma_free_tx_skbufs - free TX dma buffers
1404 * @priv: private structure
1405 * @queue: TX queue index
1406 */
1407static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1408{
1409        int i;
1410
1411        for (i = 0; i < DMA_TX_SIZE; i++)
1412                stmmac_free_tx_buffer(priv, queue, i);
1413}
1414
1415/**
1416 * free_dma_rx_desc_resources - free RX dma desc resources
1417 * @priv: private structure
1418 */
1419static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1420{
1421        u32 rx_count = priv->plat->rx_queues_to_use;
1422        u32 queue;
1423
1424        /* Free RX queue resources */
1425        for (queue = 0; queue < rx_count; queue++) {
1426                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1427
1428                /* Release the DMA RX socket buffers */
1429                dma_free_rx_skbufs(priv, queue);
1430
1431                /* Free DMA regions of consistent memory previously allocated */
1432                if (!priv->extend_desc)
1433                        dma_free_coherent(priv->device,
1434                                          DMA_RX_SIZE * sizeof(struct dma_desc),
1435                                          rx_q->dma_rx, rx_q->dma_rx_phy);
1436                else
1437                        dma_free_coherent(priv->device, DMA_RX_SIZE *
1438                                          sizeof(struct dma_extended_desc),
1439                                          rx_q->dma_erx, rx_q->dma_rx_phy);
1440
1441                kfree(rx_q->rx_skbuff_dma);
1442                kfree(rx_q->rx_skbuff);
1443        }
1444}
1445
1446/**
1447 * free_dma_tx_desc_resources - free TX dma desc resources
1448 * @priv: private structure
1449 */
1450static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1451{
1452        u32 tx_count = priv->plat->tx_queues_to_use;
1453        u32 queue = 0;
1454
1455        /* Free TX queue resources */
1456        for (queue = 0; queue < tx_count; queue++) {
1457                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1458
1459                /* Release the DMA TX socket buffers */
1460                dma_free_tx_skbufs(priv, queue);
1461
1462                /* Free DMA regions of consistent memory previously allocated */
1463                if (!priv->extend_desc)
1464                        dma_free_coherent(priv->device,
1465                                          DMA_TX_SIZE * sizeof(struct dma_desc),
1466                                          tx_q->dma_tx, tx_q->dma_tx_phy);
1467                else
1468                        dma_free_coherent(priv->device, DMA_TX_SIZE *
1469                                          sizeof(struct dma_extended_desc),
1470                                          tx_q->dma_etx, tx_q->dma_tx_phy);
1471
1472                kfree(tx_q->tx_skbuff_dma);
1473                kfree(tx_q->tx_skbuff);
1474        }
1475}
1476
1477/**
1478 * alloc_dma_rx_desc_resources - alloc RX resources.
1479 * @priv: private structure
1480 * Description: according to which descriptor can be used (extend or basic)
1481 * this function allocates the resources for TX and RX paths. In case of
1482 * reception, for example, it pre-allocated the RX socket buffer in order to
1483 * allow zero-copy mechanism.
1484 */
1485static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1486{
1487        u32 rx_count = priv->plat->rx_queues_to_use;
1488        int ret = -ENOMEM;
1489        u32 queue;
1490
1491        /* RX queues buffers and DMA */
1492        for (queue = 0; queue < rx_count; queue++) {
1493                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1494
1495                rx_q->queue_index = queue;
1496                rx_q->priv_data = priv;
1497
1498                rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1499                                                    sizeof(dma_addr_t),
1500                                                    GFP_KERNEL);
1501                if (!rx_q->rx_skbuff_dma)
1502                        return -ENOMEM;
1503
1504                rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1505                                                sizeof(struct sk_buff *),
1506                                                GFP_KERNEL);
1507                if (!rx_q->rx_skbuff)
1508                        goto err_dma;
1509
1510                if (priv->extend_desc) {
1511                        rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1512                                                            DMA_RX_SIZE *
1513                                                            sizeof(struct
1514                                                            dma_extended_desc),
1515                                                            &rx_q->dma_rx_phy,
1516                                                            GFP_KERNEL);
1517                        if (!rx_q->dma_erx)
1518                                goto err_dma;
1519
1520                } else {
1521                        rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1522                                                           DMA_RX_SIZE *
1523                                                           sizeof(struct
1524                                                           dma_desc),
1525                                                           &rx_q->dma_rx_phy,
1526                                                           GFP_KERNEL);
1527                        if (!rx_q->dma_rx)
1528                                goto err_dma;
1529                }
1530        }
1531
1532        return 0;
1533
1534err_dma:
1535        free_dma_rx_desc_resources(priv);
1536
1537        return ret;
1538}
1539
1540/**
1541 * alloc_dma_tx_desc_resources - alloc TX resources.
1542 * @priv: private structure
1543 * Description: according to which descriptor can be used (extend or basic)
1544 * this function allocates the resources for TX and RX paths. In case of
1545 * reception, for example, it pre-allocated the RX socket buffer in order to
1546 * allow zero-copy mechanism.
1547 */
1548static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1549{
1550        u32 tx_count = priv->plat->tx_queues_to_use;
1551        int ret = -ENOMEM;
1552        u32 queue;
1553
1554        /* TX queues buffers and DMA */
1555        for (queue = 0; queue < tx_count; queue++) {
1556                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1557
1558                tx_q->queue_index = queue;
1559                tx_q->priv_data = priv;
1560
1561                tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1562                                                    sizeof(*tx_q->tx_skbuff_dma),
1563                                                    GFP_KERNEL);
1564                if (!tx_q->tx_skbuff_dma)
1565                        return -ENOMEM;
1566
1567                tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1568                                                sizeof(struct sk_buff *),
1569                                                GFP_KERNEL);
1570                if (!tx_q->tx_skbuff)
1571                        goto err_dma_buffers;
1572
1573                if (priv->extend_desc) {
1574                        tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1575                                                            DMA_TX_SIZE *
1576                                                            sizeof(struct
1577                                                            dma_extended_desc),
1578                                                            &tx_q->dma_tx_phy,
1579                                                            GFP_KERNEL);
1580                        if (!tx_q->dma_etx)
1581                                goto err_dma_buffers;
1582                } else {
1583                        tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1584                                                           DMA_TX_SIZE *
1585                                                           sizeof(struct
1586                                                                  dma_desc),
1587                                                           &tx_q->dma_tx_phy,
1588                                                           GFP_KERNEL);
1589                        if (!tx_q->dma_tx)
1590                                goto err_dma_buffers;
1591                }
1592        }
1593
1594        return 0;
1595
1596err_dma_buffers:
1597        free_dma_tx_desc_resources(priv);
1598
1599        return ret;
1600}
1601
1602/**
1603 * alloc_dma_desc_resources - alloc TX/RX resources.
1604 * @priv: private structure
1605 * Description: according to which descriptor can be used (extend or basic)
1606 * this function allocates the resources for TX and RX paths. In case of
1607 * reception, for example, it pre-allocated the RX socket buffer in order to
1608 * allow zero-copy mechanism.
1609 */
1610static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1611{
1612        /* RX Allocation */
1613        int ret = alloc_dma_rx_desc_resources(priv);
1614
1615        if (ret)
1616                return ret;
1617
1618        ret = alloc_dma_tx_desc_resources(priv);
1619
1620        return ret;
1621}
1622
1623/**
1624 * free_dma_desc_resources - free dma desc resources
1625 * @priv: private structure
1626 */
1627static void free_dma_desc_resources(struct stmmac_priv *priv)
1628{
1629        /* Release the DMA RX socket buffers */
1630        free_dma_rx_desc_resources(priv);
1631
1632        /* Release the DMA TX socket buffers */
1633        free_dma_tx_desc_resources(priv);
1634}
1635
1636/**
1637 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1638 *  @priv: driver private structure
1639 *  Description: It is used for enabling the rx queues in the MAC
1640 */
1641static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1642{
1643        u32 rx_queues_count = priv->plat->rx_queues_to_use;
1644        int queue;
1645        u8 mode;
1646
1647        for (queue = 0; queue < rx_queues_count; queue++) {
1648                mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1649                priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1650        }
1651}
1652
1653/**
1654 * stmmac_start_rx_dma - start RX DMA channel
1655 * @priv: driver private structure
1656 * @chan: RX channel index
1657 * Description:
1658 * This starts a RX DMA channel
1659 */
1660static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1661{
1662        netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1663        priv->hw->dma->start_rx(priv->ioaddr, chan);
1664}
1665
1666/**
1667 * stmmac_start_tx_dma - start TX DMA channel
1668 * @priv: driver private structure
1669 * @chan: TX channel index
1670 * Description:
1671 * This starts a TX DMA channel
1672 */
1673static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1674{
1675        netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1676        priv->hw->dma->start_tx(priv->ioaddr, chan);
1677}
1678
1679/**
1680 * stmmac_stop_rx_dma - stop RX DMA channel
1681 * @priv: driver private structure
1682 * @chan: RX channel index
1683 * Description:
1684 * This stops a RX DMA channel
1685 */
1686static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1687{
1688        netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1689        priv->hw->dma->stop_rx(priv->ioaddr, chan);
1690}
1691
1692/**
1693 * stmmac_stop_tx_dma - stop TX DMA channel
1694 * @priv: driver private structure
1695 * @chan: TX channel index
1696 * Description:
1697 * This stops a TX DMA channel
1698 */
1699static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1700{
1701        netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1702        priv->hw->dma->stop_tx(priv->ioaddr, chan);
1703}
1704
1705/**
1706 * stmmac_start_all_dma - start all RX and TX DMA channels
1707 * @priv: driver private structure
1708 * Description:
1709 * This starts all the RX and TX DMA channels
1710 */
1711static void stmmac_start_all_dma(struct stmmac_priv *priv)
1712{
1713        u32 rx_channels_count = priv->plat->rx_queues_to_use;
1714        u32 tx_channels_count = priv->plat->tx_queues_to_use;
1715        u32 chan = 0;
1716
1717        for (chan = 0; chan < rx_channels_count; chan++)
1718                stmmac_start_rx_dma(priv, chan);
1719
1720        for (chan = 0; chan < tx_channels_count; chan++)
1721                stmmac_start_tx_dma(priv, chan);
1722}
1723
1724/**
1725 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1726 * @priv: driver private structure
1727 * Description:
1728 * This stops the RX and TX DMA channels
1729 */
1730static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1731{
1732        u32 rx_channels_count = priv->plat->rx_queues_to_use;
1733        u32 tx_channels_count = priv->plat->tx_queues_to_use;
1734        u32 chan = 0;
1735
1736        for (chan = 0; chan < rx_channels_count; chan++)
1737                stmmac_stop_rx_dma(priv, chan);
1738
1739        for (chan = 0; chan < tx_channels_count; chan++)
1740                stmmac_stop_tx_dma(priv, chan);
1741}
1742
1743/**
1744 *  stmmac_dma_operation_mode - HW DMA operation mode
1745 *  @priv: driver private structure
1746 *  Description: it is used for configuring the DMA operation mode register in
1747 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1748 */
1749static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1750{
1751        u32 rx_channels_count = priv->plat->rx_queues_to_use;
1752        u32 tx_channels_count = priv->plat->tx_queues_to_use;
1753        int rxfifosz = priv->plat->rx_fifo_size;
1754        u32 txmode = 0;
1755        u32 rxmode = 0;
1756        u32 chan = 0;
1757
1758        if (rxfifosz == 0)
1759                rxfifosz = priv->dma_cap.rx_fifo_size;
1760
1761        if (priv->plat->force_thresh_dma_mode) {
1762                txmode = tc;
1763                rxmode = tc;
1764        } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1765                /*
1766                 * In case of GMAC, SF mode can be enabled
1767                 * to perform the TX COE in HW. This depends on:
1768                 * 1) TX COE if actually supported
1769                 * 2) There is no bugged Jumbo frame support
1770                 *    that needs to not insert csum in the TDES.
1771                 */
1772                txmode = SF_DMA_MODE;
1773                rxmode = SF_DMA_MODE;
1774                priv->xstats.threshold = SF_DMA_MODE;
1775        } else {
1776                txmode = tc;
1777                rxmode = SF_DMA_MODE;
1778        }
1779
1780        /* configure all channels */
1781        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1782                for (chan = 0; chan < rx_channels_count; chan++)
1783                        priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1784                                                   rxfifosz);
1785
1786                for (chan = 0; chan < tx_channels_count; chan++)
1787                        priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1788        } else {
1789                priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1790                                        rxfifosz);
1791        }
1792}
1793
1794/**
1795 * stmmac_tx_clean - to manage the transmission completion
1796 * @priv: driver private structure
1797 * @queue: TX queue index
1798 * Description: it reclaims the transmit resources after transmission completes.
1799 */
1800static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1801{
1802        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1803        unsigned int bytes_compl = 0, pkts_compl = 0;
1804        unsigned int entry = tx_q->dirty_tx;
1805
1806        netif_tx_lock(priv->dev);
1807
1808        priv->xstats.tx_clean++;
1809
1810        while (entry != tx_q->cur_tx) {
1811                struct sk_buff *skb = tx_q->tx_skbuff[entry];
1812                struct dma_desc *p;
1813                int status;
1814
1815                if (priv->extend_desc)
1816                        p = (struct dma_desc *)(tx_q->dma_etx + entry);
1817                else
1818                        p = tx_q->dma_tx + entry;
1819
1820                status = priv->hw->desc->tx_status(&priv->dev->stats,
1821                                                      &priv->xstats, p,
1822                                                      priv->ioaddr);
1823                /* Check if the descriptor is owned by the DMA */
1824                if (unlikely(status & tx_dma_own))
1825                        break;
1826
1827                /* Just consider the last segment and ...*/
1828                if (likely(!(status & tx_not_ls))) {
1829                        /* ... verify the status error condition */
1830                        if (unlikely(status & tx_err)) {
1831                                priv->dev->stats.tx_errors++;
1832                        } else {
1833                                priv->dev->stats.tx_packets++;
1834                                priv->xstats.tx_pkt_n++;
1835                        }
1836                        stmmac_get_tx_hwtstamp(priv, p, skb);
1837                }
1838
1839                if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1840                        if (tx_q->tx_skbuff_dma[entry].map_as_page)
1841                                dma_unmap_page(priv->device,
1842                                               tx_q->tx_skbuff_dma[entry].buf,
1843                                               tx_q->tx_skbuff_dma[entry].len,
1844                                               DMA_TO_DEVICE);
1845                        else
1846                                dma_unmap_single(priv->device,
1847                                                 tx_q->tx_skbuff_dma[entry].buf,
1848                                                 tx_q->tx_skbuff_dma[entry].len,
1849                                                 DMA_TO_DEVICE);
1850                        tx_q->tx_skbuff_dma[entry].buf = 0;
1851                        tx_q->tx_skbuff_dma[entry].len = 0;
1852                        tx_q->tx_skbuff_dma[entry].map_as_page = false;
1853                }
1854
1855                if (priv->hw->mode->clean_desc3)
1856                        priv->hw->mode->clean_desc3(tx_q, p);
1857
1858                tx_q->tx_skbuff_dma[entry].last_segment = false;
1859                tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1860
1861                if (likely(skb != NULL)) {
1862                        pkts_compl++;
1863                        bytes_compl += skb->len;
1864                        dev_consume_skb_any(skb);
1865                        tx_q->tx_skbuff[entry] = NULL;
1866                }
1867
1868                priv->hw->desc->release_tx_desc(p, priv->mode);
1869
1870                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1871        }
1872        tx_q->dirty_tx = entry;
1873
1874        netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1875                                  pkts_compl, bytes_compl);
1876
1877        if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1878                                                                queue))) &&
1879            stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1880
1881                netif_dbg(priv, tx_done, priv->dev,
1882                          "%s: restart transmit\n", __func__);
1883                netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1884        }
1885
1886        if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1887                stmmac_enable_eee_mode(priv);
1888                mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1889        }
1890        netif_tx_unlock(priv->dev);
1891}
1892
1893static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1894{
1895        priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1896}
1897
1898static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1899{
1900        priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1901}
1902
1903/**
1904 * stmmac_tx_err - to manage the tx error
1905 * @priv: driver private structure
1906 * @chan: channel index
1907 * Description: it cleans the descriptors and restarts the transmission
1908 * in case of transmission errors.
1909 */
1910static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1911{
1912        struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1913        int i;
1914
1915        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1916
1917        stmmac_stop_tx_dma(priv, chan);
1918        dma_free_tx_skbufs(priv, chan);
1919        for (i = 0; i < DMA_TX_SIZE; i++)
1920                if (priv->extend_desc)
1921                        priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1922                                                     priv->mode,
1923                                                     (i == DMA_TX_SIZE - 1));
1924                else
1925                        priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1926                                                     priv->mode,
1927                                                     (i == DMA_TX_SIZE - 1));
1928        tx_q->dirty_tx = 0;
1929        tx_q->cur_tx = 0;
1930        netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1931        stmmac_start_tx_dma(priv, chan);
1932
1933        priv->dev->stats.tx_errors++;
1934        netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1935}
1936
1937/**
1938 *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1939 *  @priv: driver private structure
1940 *  @txmode: TX operating mode
1941 *  @rxmode: RX operating mode
1942 *  @chan: channel index
1943 *  Description: it is used for configuring of the DMA operation mode in
1944 *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1945 *  mode.
1946 */
1947static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1948                                          u32 rxmode, u32 chan)
1949{
1950        int rxfifosz = priv->plat->rx_fifo_size;
1951
1952        if (rxfifosz == 0)
1953                rxfifosz = priv->dma_cap.rx_fifo_size;
1954
1955        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1956                priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1957                                           rxfifosz);
1958                priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1959        } else {
1960                priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1961                                        rxfifosz);
1962        }
1963}
1964
1965/**
1966 * stmmac_dma_interrupt - DMA ISR
1967 * @priv: driver private structure
1968 * Description: this is the DMA ISR. It is called by the main ISR.
1969 * It calls the dwmac dma routine and schedule poll method in case of some
1970 * work can be done.
1971 */
1972static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1973{
1974        u32 tx_channel_count = priv->plat->tx_queues_to_use;
1975        int status;
1976        u32 chan;
1977
1978        for (chan = 0; chan < tx_channel_count; chan++) {
1979                struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1980
1981                status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1982                                                      &priv->xstats, chan);
1983                if (likely((status & handle_rx)) || (status & handle_tx)) {
1984                        if (likely(napi_schedule_prep(&rx_q->napi))) {
1985                                stmmac_disable_dma_irq(priv, chan);
1986                                __napi_schedule(&rx_q->napi);
1987                        }
1988                }
1989
1990                if (unlikely(status & tx_hard_error_bump_tc)) {
1991                        /* Try to bump up the dma threshold on this failure */
1992                        if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1993                            (tc <= 256)) {
1994                                tc += 64;
1995                                if (priv->plat->force_thresh_dma_mode)
1996                                        stmmac_set_dma_operation_mode(priv,
1997                                                                      tc,
1998                                                                      tc,
1999                                                                      chan);
2000                                else
2001                                        stmmac_set_dma_operation_mode(priv,
2002                                                                    tc,
2003                                                                    SF_DMA_MODE,
2004                                                                    chan);
2005                                priv->xstats.threshold = tc;
2006                        }
2007                } else if (unlikely(status == tx_hard_error)) {
2008                        stmmac_tx_err(priv, chan);
2009                }
2010        }
2011}
2012
2013/**
2014 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2015 * @priv: driver private structure
2016 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2017 */
2018static void stmmac_mmc_setup(struct stmmac_priv *priv)
2019{
2020        unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2021                            MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2022
2023        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2024                priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2025                priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2026        } else {
2027                priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2028                priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2029        }
2030
2031        dwmac_mmc_intr_all_mask(priv->mmcaddr);
2032
2033        if (priv->dma_cap.rmon) {
2034                dwmac_mmc_ctrl(priv->mmcaddr, mode);
2035                memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2036        } else
2037                netdev_info(priv->dev, "No MAC Management Counters available\n");
2038}
2039
2040/**
2041 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2042 * @priv: driver private structure
2043 * Description: select the Enhanced/Alternate or Normal descriptors.
2044 * In case of Enhanced/Alternate, it checks if the extended descriptors are
2045 * supported by the HW capability register.
2046 */
2047static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2048{
2049        if (priv->plat->enh_desc) {
2050                dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2051
2052                /* GMAC older than 3.50 has no extended descriptors */
2053                if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2054                        dev_info(priv->device, "Enabled extended descriptors\n");
2055                        priv->extend_desc = 1;
2056                } else
2057                        dev_warn(priv->device, "Extended descriptors not supported\n");
2058
2059                priv->hw->desc = &enh_desc_ops;
2060        } else {
2061                dev_info(priv->device, "Normal descriptors\n");
2062                priv->hw->desc = &ndesc_ops;
2063        }
2064}
2065
2066/**
2067 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2068 * @priv: driver private structure
2069 * Description:
2070 *  new GMAC chip generations have a new register to indicate the
2071 *  presence of the optional feature/functions.
2072 *  This can be also used to override the value passed through the
2073 *  platform and necessary for old MAC10/100 and GMAC chips.
2074 */
2075static int stmmac_get_hw_features(struct stmmac_priv *priv)
2076{
2077        u32 ret = 0;
2078
2079        if (priv->hw->dma->get_hw_feature) {
2080                priv->hw->dma->get_hw_feature(priv->ioaddr,
2081                                              &priv->dma_cap);
2082                ret = 1;
2083        }
2084
2085        return ret;
2086}
2087
2088/**
2089 * stmmac_check_ether_addr - check if the MAC addr is valid
2090 * @priv: driver private structure
2091 * Description:
2092 * it is to verify if the MAC address is valid, in case of failures it
2093 * generates a random MAC address
2094 */
2095static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2096{
2097        if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2098                priv->hw->mac->get_umac_addr(priv->hw,
2099                                             priv->dev->dev_addr, 0);
2100                if (!is_valid_ether_addr(priv->dev->dev_addr))
2101                        eth_hw_addr_random(priv->dev);
2102                netdev_info(priv->dev, "device MAC address %pM\n",
2103                            priv->dev->dev_addr);
2104        }
2105}
2106
2107/**
2108 * stmmac_init_dma_engine - DMA init.
2109 * @priv: driver private structure
2110 * Description:
2111 * It inits the DMA invoking the specific MAC/GMAC callback.
2112 * Some DMA parameters can be passed from the platform;
2113 * in case of these are not passed a default is kept for the MAC or GMAC.
2114 */
2115static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2116{
2117        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2118        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2119        struct stmmac_rx_queue *rx_q;
2120        struct stmmac_tx_queue *tx_q;
2121        u32 dummy_dma_rx_phy = 0;
2122        u32 dummy_dma_tx_phy = 0;
2123        u32 chan = 0;
2124        int atds = 0;
2125        int ret = 0;
2126
2127        if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2128                dev_err(priv->device, "Invalid DMA configuration\n");
2129                return -EINVAL;
2130        }
2131
2132        if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2133                atds = 1;
2134
2135        ret = priv->hw->dma->reset(priv->ioaddr);
2136        if (ret) {
2137                dev_err(priv->device, "Failed to reset the dma\n");
2138                return ret;
2139        }
2140
2141        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2142                /* DMA Configuration */
2143                priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2144                                    dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2145
2146                /* DMA RX Channel Configuration */
2147                for (chan = 0; chan < rx_channels_count; chan++) {
2148                        rx_q = &priv->rx_queue[chan];
2149
2150                        priv->hw->dma->init_rx_chan(priv->ioaddr,
2151                                                    priv->plat->dma_cfg,
2152                                                    rx_q->dma_rx_phy, chan);
2153
2154                        rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2155                                    (DMA_RX_SIZE * sizeof(struct dma_desc));
2156                        priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2157                                                       rx_q->rx_tail_addr,
2158                                                       chan);
2159                }
2160
2161                /* DMA TX Channel Configuration */
2162                for (chan = 0; chan < tx_channels_count; chan++) {
2163                        tx_q = &priv->tx_queue[chan];
2164
2165                        priv->hw->dma->init_chan(priv->ioaddr,
2166                                                 priv->plat->dma_cfg,
2167                                                 chan);
2168
2169                        priv->hw->dma->init_tx_chan(priv->ioaddr,
2170                                                    priv->plat->dma_cfg,
2171                                                    tx_q->dma_tx_phy, chan);
2172
2173                        tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2174                                    (DMA_TX_SIZE * sizeof(struct dma_desc));
2175                        priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2176                                                       tx_q->tx_tail_addr,
2177                                                       chan);
2178                }
2179        } else {
2180                rx_q = &priv->rx_queue[chan];
2181                tx_q = &priv->tx_queue[chan];
2182                priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2183                                    tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2184        }
2185
2186        if (priv->plat->axi && priv->hw->dma->axi)
2187                priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2188
2189        return ret;
2190}
2191
2192/**
2193 * stmmac_tx_timer - mitigation sw timer for tx.
2194 * @data: data pointer
2195 * Description:
2196 * This is the timer handler to directly invoke the stmmac_tx_clean.
2197 */
2198static void stmmac_tx_timer(unsigned long data)
2199{
2200        struct stmmac_priv *priv = (struct stmmac_priv *)data;
2201        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2202        u32 queue;
2203
2204        /* let's scan all the tx queues */
2205        for (queue = 0; queue < tx_queues_count; queue++)
2206                stmmac_tx_clean(priv, queue);
2207}
2208
2209/**
2210 * stmmac_init_tx_coalesce - init tx mitigation options.
2211 * @priv: driver private structure
2212 * Description:
2213 * This inits the transmit coalesce parameters: i.e. timer rate,
2214 * timer handler and default threshold used for enabling the
2215 * interrupt on completion bit.
2216 */
2217static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2218{
2219        priv->tx_coal_frames = STMMAC_TX_FRAMES;
2220        priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2221        init_timer(&priv->txtimer);
2222        priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2223        priv->txtimer.data = (unsigned long)priv;
2224        priv->txtimer.function = stmmac_tx_timer;
2225        add_timer(&priv->txtimer);
2226}
2227
2228static void stmmac_set_rings_length(struct stmmac_priv *priv)
2229{
2230        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2231        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2232        u32 chan;
2233
2234        /* set TX ring length */
2235        if (priv->hw->dma->set_tx_ring_len) {
2236                for (chan = 0; chan < tx_channels_count; chan++)
2237                        priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2238                                                       (DMA_TX_SIZE - 1), chan);
2239        }
2240
2241        /* set RX ring length */
2242        if (priv->hw->dma->set_rx_ring_len) {
2243                for (chan = 0; chan < rx_channels_count; chan++)
2244                        priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2245                                                       (DMA_RX_SIZE - 1), chan);
2246        }
2247}
2248
2249/**
2250 *  stmmac_set_tx_queue_weight - Set TX queue weight
2251 *  @priv: driver private structure
2252 *  Description: It is used for setting TX queues weight
2253 */
2254static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2255{
2256        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2257        u32 weight;
2258        u32 queue;
2259
2260        for (queue = 0; queue < tx_queues_count; queue++) {
2261                weight = priv->plat->tx_queues_cfg[queue].weight;
2262                priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2263        }
2264}
2265
2266/**
2267 *  stmmac_configure_cbs - Configure CBS in TX queue
2268 *  @priv: driver private structure
2269 *  Description: It is used for configuring CBS in AVB TX queues
2270 */
2271static void stmmac_configure_cbs(struct stmmac_priv *priv)
2272{
2273        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2274        u32 mode_to_use;
2275        u32 queue;
2276
2277        /* queue 0 is reserved for legacy traffic */
2278        for (queue = 1; queue < tx_queues_count; queue++) {
2279                mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2280                if (mode_to_use == MTL_QUEUE_DCB)
2281                        continue;
2282
2283                priv->hw->mac->config_cbs(priv->hw,
2284                                priv->plat->tx_queues_cfg[queue].send_slope,
2285                                priv->plat->tx_queues_cfg[queue].idle_slope,
2286                                priv->plat->tx_queues_cfg[queue].high_credit,
2287                                priv->plat->tx_queues_cfg[queue].low_credit,
2288                                queue);
2289        }
2290}
2291
2292/**
2293 *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2294 *  @priv: driver private structure
2295 *  Description: It is used for mapping RX queues to RX dma channels
2296 */
2297static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2298{
2299        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2300        u32 queue;
2301        u32 chan;
2302
2303        for (queue = 0; queue < rx_queues_count; queue++) {
2304                chan = priv->plat->rx_queues_cfg[queue].chan;
2305                priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2306        }
2307}
2308
2309/**
2310 *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2311 *  @priv: driver private structure
2312 *  Description: It is used for configuring the RX Queue Priority
2313 */
2314static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2315{
2316        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2317        u32 queue;
2318        u32 prio;
2319
2320        for (queue = 0; queue < rx_queues_count; queue++) {
2321                if (!priv->plat->rx_queues_cfg[queue].use_prio)
2322                        continue;
2323
2324                prio = priv->plat->rx_queues_cfg[queue].prio;
2325                priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2326        }
2327}
2328
2329/**
2330 *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2331 *  @priv: driver private structure
2332 *  Description: It is used for configuring the TX Queue Priority
2333 */
2334static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2335{
2336        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2337        u32 queue;
2338        u32 prio;
2339
2340        for (queue = 0; queue < tx_queues_count; queue++) {
2341                if (!priv->plat->tx_queues_cfg[queue].use_prio)
2342                        continue;
2343
2344                prio = priv->plat->tx_queues_cfg[queue].prio;
2345                priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2346        }
2347}
2348
2349/**
2350 *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2351 *  @priv: driver private structure
2352 *  Description: It is used for configuring the RX queue routing
2353 */
2354static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2355{
2356        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2357        u32 queue;
2358        u8 packet;
2359
2360        for (queue = 0; queue < rx_queues_count; queue++) {
2361                /* no specific packet type routing specified for the queue */
2362                if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2363                        continue;
2364
2365                packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2366                priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2367        }
2368}
2369
2370/**
2371 *  stmmac_mtl_configuration - Configure MTL
2372 *  @priv: driver private structure
2373 *  Description: It is used for configurring MTL
2374 */
2375static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2376{
2377        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2378        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2379
2380        if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2381                stmmac_set_tx_queue_weight(priv);
2382
2383        /* Configure MTL RX algorithms */
2384        if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2385                priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2386                                                priv->plat->rx_sched_algorithm);
2387
2388        /* Configure MTL TX algorithms */
2389        if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2390                priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2391                                                priv->plat->tx_sched_algorithm);
2392
2393        /* Configure CBS in AVB TX queues */
2394        if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2395                stmmac_configure_cbs(priv);
2396
2397        /* Map RX MTL to DMA channels */
2398        if (priv->hw->mac->map_mtl_to_dma)
2399                stmmac_rx_queue_dma_chan_map(priv);
2400
2401        /* Enable MAC RX Queues */
2402        if (priv->hw->mac->rx_queue_enable)
2403                stmmac_mac_enable_rx_queues(priv);
2404
2405        /* Set RX priorities */
2406        if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2407                stmmac_mac_config_rx_queues_prio(priv);
2408
2409        /* Set TX priorities */
2410        if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2411                stmmac_mac_config_tx_queues_prio(priv);
2412
2413        /* Set RX routing */
2414        if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2415                stmmac_mac_config_rx_queues_routing(priv);
2416}
2417
2418/**
2419 * stmmac_hw_setup - setup mac in a usable state.
2420 *  @dev : pointer to the device structure.
2421 *  Description:
2422 *  this is the main function to setup the HW in a usable state because the
2423 *  dma engine is reset, the core registers are configured (e.g. AXI,
2424 *  Checksum features, timers). The DMA is ready to start receiving and
2425 *  transmitting.
2426 *  Return value:
2427 *  0 on success and an appropriate (-)ve integer as defined in errno.h
2428 *  file on failure.
2429 */
2430static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2431{
2432        struct stmmac_priv *priv = netdev_priv(dev);
2433        u32 rx_cnt = priv->plat->rx_queues_to_use;
2434        u32 tx_cnt = priv->plat->tx_queues_to_use;
2435        u32 chan;
2436        int ret;
2437
2438        /* DMA initialization and SW reset */
2439        ret = stmmac_init_dma_engine(priv);
2440        if (ret < 0) {
2441                netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2442                           __func__);
2443                return ret;
2444        }
2445
2446        /* Copy the MAC addr into the HW  */
2447        priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2448
2449        /* PS and related bits will be programmed according to the speed */
2450        if (priv->hw->pcs) {
2451                int speed = priv->plat->mac_port_sel_speed;
2452
2453                if ((speed == SPEED_10) || (speed == SPEED_100) ||
2454                    (speed == SPEED_1000)) {
2455                        priv->hw->ps = speed;
2456                } else {
2457                        dev_warn(priv->device, "invalid port speed\n");
2458                        priv->hw->ps = 0;
2459                }
2460        }
2461
2462        /* Initialize the MAC Core */
2463        priv->hw->mac->core_init(priv->hw, dev->mtu);
2464
2465        /* Initialize MTL*/
2466        if (priv->synopsys_id >= DWMAC_CORE_4_00)
2467                stmmac_mtl_configuration(priv);
2468
2469        ret = priv->hw->mac->rx_ipc(priv->hw);
2470        if (!ret) {
2471                netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2472                priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2473                priv->hw->rx_csum = 0;
2474        }
2475
2476        /* Enable the MAC Rx/Tx */
2477        priv->hw->mac->set_mac(priv->ioaddr, true);
2478
2479        /* Set the HW DMA mode and the COE */
2480        stmmac_dma_operation_mode(priv);
2481
2482        stmmac_mmc_setup(priv);
2483
2484        if (init_ptp) {
2485                ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2486                if (ret < 0)
2487                        netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2488
2489                ret = stmmac_init_ptp(priv);
2490                if (ret == -EOPNOTSUPP)
2491                        netdev_warn(priv->dev, "PTP not supported by HW\n");
2492                else if (ret)
2493                        netdev_warn(priv->dev, "PTP init failed\n");
2494        }
2495
2496#ifdef CONFIG_DEBUG_FS
2497        ret = stmmac_init_fs(dev);
2498        if (ret < 0)
2499                netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2500                            __func__);
2501#endif
2502        /* Start the ball rolling... */
2503        stmmac_start_all_dma(priv);
2504
2505        priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2506
2507        if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2508                priv->rx_riwt = MAX_DMA_RIWT;
2509                priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2510        }
2511
2512        if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2513                priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2514
2515        /* set TX and RX rings length */
2516        stmmac_set_rings_length(priv);
2517
2518        /* Enable TSO */
2519        if (priv->tso) {
2520                for (chan = 0; chan < tx_cnt; chan++)
2521                        priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2522        }
2523
2524        return 0;
2525}
2526
2527static void stmmac_hw_teardown(struct net_device *dev)
2528{
2529        struct stmmac_priv *priv = netdev_priv(dev);
2530
2531        clk_disable_unprepare(priv->plat->clk_ptp_ref);
2532}
2533
2534/**
2535 *  stmmac_open - open entry point of the driver
2536 *  @dev : pointer to the device structure.
2537 *  Description:
2538 *  This function is the open entry point of the driver.
2539 *  Return value:
2540 *  0 on success and an appropriate (-)ve integer as defined in errno.h
2541 *  file on failure.
2542 */
2543static int stmmac_open(struct net_device *dev)
2544{
2545        struct stmmac_priv *priv = netdev_priv(dev);
2546        int ret;
2547
2548        stmmac_check_ether_addr(priv);
2549
2550        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2551            priv->hw->pcs != STMMAC_PCS_TBI &&
2552            priv->hw->pcs != STMMAC_PCS_RTBI) {
2553                ret = stmmac_init_phy(dev);
2554                if (ret) {
2555                        netdev_err(priv->dev,
2556                                   "%s: Cannot attach to PHY (error: %d)\n",
2557                                   __func__, ret);
2558                        return ret;
2559                }
2560        }
2561
2562        /* Extra statistics */
2563        memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2564        priv->xstats.threshold = tc;
2565
2566        priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2567        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2568
2569        ret = alloc_dma_desc_resources(priv);
2570        if (ret < 0) {
2571                netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2572                           __func__);
2573                goto dma_desc_error;
2574        }
2575
2576        ret = init_dma_desc_rings(dev, GFP_KERNEL);
2577        if (ret < 0) {
2578                netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2579                           __func__);
2580                goto init_error;
2581        }
2582
2583        ret = stmmac_hw_setup(dev, true);
2584        if (ret < 0) {
2585                netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2586                goto init_error;
2587        }
2588
2589        stmmac_init_tx_coalesce(priv);
2590
2591        if (dev->phydev)
2592                phy_start(dev->phydev);
2593
2594        /* Request the IRQ lines */
2595        ret = request_irq(dev->irq, stmmac_interrupt,
2596                          IRQF_SHARED, dev->name, dev);
2597        if (unlikely(ret < 0)) {
2598                netdev_err(priv->dev,
2599                           "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2600                           __func__, dev->irq, ret);
2601                goto irq_error;
2602        }
2603
2604        /* Request the Wake IRQ in case of another line is used for WoL */
2605        if (priv->wol_irq != dev->irq) {
2606                ret = request_irq(priv->wol_irq, stmmac_interrupt,
2607                                  IRQF_SHARED, dev->name, dev);
2608                if (unlikely(ret < 0)) {
2609                        netdev_err(priv->dev,
2610                                   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2611                                   __func__, priv->wol_irq, ret);
2612                        goto wolirq_error;
2613                }
2614        }
2615
2616        /* Request the IRQ lines */
2617        if (priv->lpi_irq > 0) {
2618                ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2619                                  dev->name, dev);
2620                if (unlikely(ret < 0)) {
2621                        netdev_err(priv->dev,
2622                                   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2623                                   __func__, priv->lpi_irq, ret);
2624                        goto lpiirq_error;
2625                }
2626        }
2627
2628        stmmac_enable_all_queues(priv);
2629        stmmac_start_all_queues(priv);
2630
2631        return 0;
2632
2633lpiirq_error:
2634        if (priv->wol_irq != dev->irq)
2635                free_irq(priv->wol_irq, dev);
2636wolirq_error:
2637        free_irq(dev->irq, dev);
2638irq_error:
2639        if (dev->phydev)
2640                phy_stop(dev->phydev);
2641
2642        del_timer_sync(&priv->txtimer);
2643        stmmac_hw_teardown(dev);
2644init_error:
2645        free_dma_desc_resources(priv);
2646dma_desc_error:
2647        if (dev->phydev)
2648                phy_disconnect(dev->phydev);
2649
2650        return ret;
2651}
2652
2653/**
2654 *  stmmac_release - close entry point of the driver
2655 *  @dev : device pointer.
2656 *  Description:
2657 *  This is the stop entry point of the driver.
2658 */
2659static int stmmac_release(struct net_device *dev)
2660{
2661        struct stmmac_priv *priv = netdev_priv(dev);
2662
2663        if (priv->eee_enabled)
2664                del_timer_sync(&priv->eee_ctrl_timer);
2665
2666        /* Stop and disconnect the PHY */
2667        if (dev->phydev) {
2668                phy_stop(dev->phydev);
2669                phy_disconnect(dev->phydev);
2670        }
2671
2672        stmmac_stop_all_queues(priv);
2673
2674        stmmac_disable_all_queues(priv);
2675
2676        del_timer_sync(&priv->txtimer);
2677
2678        /* Free the IRQ lines */
2679        free_irq(dev->irq, dev);
2680        if (priv->wol_irq != dev->irq)
2681                free_irq(priv->wol_irq, dev);
2682        if (priv->lpi_irq > 0)
2683                free_irq(priv->lpi_irq, dev);
2684
2685        /* Stop TX/RX DMA and clear the descriptors */
2686        stmmac_stop_all_dma(priv);
2687
2688        /* Release and free the Rx/Tx resources */
2689        free_dma_desc_resources(priv);
2690
2691        /* Disable the MAC Rx/Tx */
2692        priv->hw->mac->set_mac(priv->ioaddr, false);
2693
2694        netif_carrier_off(dev);
2695
2696#ifdef CONFIG_DEBUG_FS
2697        stmmac_exit_fs(dev);
2698#endif
2699
2700        stmmac_release_ptp(priv);
2701
2702        return 0;
2703}
2704
2705/**
2706 *  stmmac_tso_allocator - close entry point of the driver
2707 *  @priv: driver private structure
2708 *  @des: buffer start address
2709 *  @total_len: total length to fill in descriptors
2710 *  @last_segmant: condition for the last descriptor
2711 *  @queue: TX queue index
2712 *  Description:
2713 *  This function fills descriptor and request new descriptors according to
2714 *  buffer length to fill
2715 */
2716static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2717                                 int total_len, bool last_segment, u32 queue)
2718{
2719        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2720        struct dma_desc *desc;
2721        u32 buff_size;
2722        int tmp_len;
2723
2724        tmp_len = total_len;
2725
2726        while (tmp_len > 0) {
2727                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2728                desc = tx_q->dma_tx + tx_q->cur_tx;
2729
2730                desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2731                buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2732                            TSO_MAX_BUFF_SIZE : tmp_len;
2733
2734                priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2735                        0, 1,
2736                        (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2737                        0, 0);
2738
2739                tmp_len -= TSO_MAX_BUFF_SIZE;
2740        }
2741}
2742
2743/**
2744 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2745 *  @skb : the socket buffer
2746 *  @dev : device pointer
2747 *  Description: this is the transmit function that is called on TSO frames
2748 *  (support available on GMAC4 and newer chips).
2749 *  Diagram below show the ring programming in case of TSO frames:
2750 *
2751 *  First Descriptor
2752 *   --------
2753 *   | DES0 |---> buffer1 = L2/L3/L4 header
2754 *   | DES1 |---> TCP Payload (can continue on next descr...)
2755 *   | DES2 |---> buffer 1 and 2 len
2756 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2757 *   --------
2758 *      |
2759 *     ...
2760 *      |
2761 *   --------
2762 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2763 *   | DES1 | --|
2764 *   | DES2 | --> buffer 1 and 2 len
2765 *   | DES3 |
2766 *   --------
2767 *
2768 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2769 */
2770static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2771{
2772        struct dma_desc *desc, *first, *mss_desc = NULL;
2773        struct stmmac_priv *priv = netdev_priv(dev);
2774        int nfrags = skb_shinfo(skb)->nr_frags;
2775        u32 queue = skb_get_queue_mapping(skb);
2776        unsigned int first_entry, des;
2777        struct stmmac_tx_queue *tx_q;
2778        int tmp_pay_len = 0;
2779        u32 pay_len, mss;
2780        u8 proto_hdr_len;
2781        int i;
2782
2783        tx_q = &priv->tx_queue[queue];
2784
2785        /* Compute header lengths */
2786        proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2787
2788        /* Desc availability based on threshold should be enough safe */
2789        if (unlikely(stmmac_tx_avail(priv, queue) <
2790                (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2791                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2792                        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2793                                                                queue));
2794                        /* This is a hard error, log it. */
2795                        netdev_err(priv->dev,
2796                                   "%s: Tx Ring full when queue awake\n",
2797                                   __func__);
2798                }
2799                return NETDEV_TX_BUSY;
2800        }
2801
2802        pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2803
2804        mss = skb_shinfo(skb)->gso_size;
2805
2806        /* set new MSS value if needed */
2807        if (mss != priv->mss) {
2808                mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2809                priv->hw->desc->set_mss(mss_desc, mss);
2810                priv->mss = mss;
2811                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2812        }
2813
2814        if (netif_msg_tx_queued(priv)) {
2815                pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2816                        __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2817                pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2818                        skb->data_len);
2819        }
2820
2821        first_entry = tx_q->cur_tx;
2822
2823        desc = tx_q->dma_tx + first_entry;
2824        first = desc;
2825
2826        /* first descriptor: fill Headers on Buf1 */
2827        des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2828                             DMA_TO_DEVICE);
2829        if (dma_mapping_error(priv->device, des))
2830                goto dma_map_err;
2831
2832        tx_q->tx_skbuff_dma[first_entry].buf = des;
2833        tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2834
2835        first->des0 = cpu_to_le32(des);
2836
2837        /* Fill start of payload in buff2 of first descriptor */
2838        if (pay_len)
2839                first->des1 = cpu_to_le32(des + proto_hdr_len);
2840
2841        /* If needed take extra descriptors to fill the remaining payload */
2842        tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2843
2844        stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2845
2846        /* Prepare fragments */
2847        for (i = 0; i < nfrags; i++) {
2848                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2849
2850                des = skb_frag_dma_map(priv->device, frag, 0,
2851                                       skb_frag_size(frag),
2852                                       DMA_TO_DEVICE);
2853                if (dma_mapping_error(priv->device, des))
2854                        goto dma_map_err;
2855
2856                stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2857                                     (i == nfrags - 1), queue);
2858
2859                tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2860                tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2861                tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2862                tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2863        }
2864
2865        tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2866
2867        /* Only the last descriptor gets to point to the skb. */
2868        tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2869
2870        /* We've used all descriptors we need for this skb, however,
2871         * advance cur_tx so that it references a fresh descriptor.
2872         * ndo_start_xmit will fill this descriptor the next time it's
2873         * called and stmmac_tx_clean may clean up to this descriptor.
2874         */
2875        tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2876
2877        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2878                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2879                          __func__);
2880                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2881        }
2882
2883        dev->stats.tx_bytes += skb->len;
2884        priv->xstats.tx_tso_frames++;
2885        priv->xstats.tx_tso_nfrags += nfrags;
2886
2887        /* Manage tx mitigation */
2888        priv->tx_count_frames += nfrags + 1;
2889        if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2890                mod_timer(&priv->txtimer,
2891                          STMMAC_COAL_TIMER(priv->tx_coal_timer));
2892        } else {
2893                priv->tx_count_frames = 0;
2894                priv->hw->desc->set_tx_ic(desc);
2895                priv->xstats.tx_set_ic_bit++;
2896        }
2897
2898        if (!priv->hwts_tx_en)
2899                skb_tx_timestamp(skb);
2900
2901        if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2902                     priv->hwts_tx_en)) {
2903                /* declare that device is doing timestamping */
2904                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2905                priv->hw->desc->enable_tx_timestamp(first);
2906        }
2907
2908        /* Complete the first descriptor before granting the DMA */
2909        priv->hw->desc->prepare_tso_tx_desc(first, 1,
2910                        proto_hdr_len,
2911                        pay_len,
2912                        1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2913                        tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2914
2915        /* If context desc is used to change MSS */
2916        if (mss_desc)
2917                priv->hw->desc->set_tx_owner(mss_desc);
2918
2919        /* The own bit must be the latest setting done when prepare the
2920         * descriptor and then barrier is needed to make sure that
2921         * all is coherent before granting the DMA engine.
2922         */
2923        dma_wmb();
2924
2925        if (netif_msg_pktdata(priv)) {
2926                pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2927                        __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2928                        tx_q->cur_tx, first, nfrags);
2929
2930                priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2931                                             0);
2932
2933                pr_info(">>> frame to be transmitted: ");
2934                print_pkt(skb->data, skb_headlen(skb));
2935        }
2936
2937        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2938
2939        priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2940                                       queue);
2941
2942        return NETDEV_TX_OK;
2943
2944dma_map_err:
2945        dev_err(priv->device, "Tx dma map failed\n");
2946        dev_kfree_skb(skb);
2947        priv->dev->stats.tx_dropped++;
2948        return NETDEV_TX_OK;
2949}
2950
2951/**
2952 *  stmmac_xmit - Tx entry point of the driver
2953 *  @skb : the socket buffer
2954 *  @dev : device pointer
2955 *  Description : this is the tx entry point of the driver.
2956 *  It programs the chain or the ring and supports oversized frames
2957 *  and SG feature.
2958 */
2959static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2960{
2961        struct stmmac_priv *priv = netdev_priv(dev);
2962        unsigned int nopaged_len = skb_headlen(skb);
2963        int i, csum_insertion = 0, is_jumbo = 0;
2964        u32 queue = skb_get_queue_mapping(skb);
2965        int nfrags = skb_shinfo(skb)->nr_frags;
2966        int entry;
2967        unsigned int first_entry;
2968        struct dma_desc *desc, *first;
2969        struct stmmac_tx_queue *tx_q;
2970        unsigned int enh_desc;
2971        unsigned int des;
2972
2973        tx_q = &priv->tx_queue[queue];
2974
2975        /* Manage oversized TCP frames for GMAC4 device */
2976        if (skb_is_gso(skb) && priv->tso) {
2977                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2978                        return stmmac_tso_xmit(skb, dev);
2979        }
2980
2981        if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2982                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2983                        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2984                                                                queue));
2985                        /* This is a hard error, log it. */
2986                        netdev_err(priv->dev,
2987                                   "%s: Tx Ring full when queue awake\n",
2988                                   __func__);
2989                }
2990                return NETDEV_TX_BUSY;
2991        }
2992
2993        if (priv->tx_path_in_lpi_mode)
2994                stmmac_disable_eee_mode(priv);
2995
2996        entry = tx_q->cur_tx;
2997        first_entry = entry;
2998
2999        csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3000
3001        if (likely(priv->extend_desc))
3002                desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3003        else
3004                desc = tx_q->dma_tx + entry;
3005
3006        first = desc;
3007
3008        enh_desc = priv->plat->enh_desc;
3009        /* To program the descriptors according to the size of the frame */
3010        if (enh_desc)
3011                is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3012
3013        if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3014                                         DWMAC_CORE_4_00)) {
3015                entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3016                if (unlikely(entry < 0))
3017                        goto dma_map_err;
3018        }
3019
3020        for (i = 0; i < nfrags; i++) {
3021                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3022                int len = skb_frag_size(frag);
3023                bool last_segment = (i == (nfrags - 1));
3024
3025                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3026
3027                if (likely(priv->extend_desc))
3028                        desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3029                else
3030                        desc = tx_q->dma_tx + entry;
3031
3032                des = skb_frag_dma_map(priv->device, frag, 0, len,
3033                                       DMA_TO_DEVICE);
3034                if (dma_mapping_error(priv->device, des))
3035                        goto dma_map_err; /* should reuse desc w/o issues */
3036
3037                tx_q->tx_skbuff[entry] = NULL;
3038
3039                tx_q->tx_skbuff_dma[entry].buf = des;
3040                if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3041                        desc->des0 = cpu_to_le32(des);
3042                else
3043                        desc->des2 = cpu_to_le32(des);
3044
3045                tx_q->tx_skbuff_dma[entry].map_as_page = true;
3046                tx_q->tx_skbuff_dma[entry].len = len;
3047                tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3048
3049                /* Prepare the descriptor and set the own bit too */
3050                priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3051                                                priv->mode, 1, last_segment,
3052                                                skb->len);
3053        }
3054
3055        /* Only the last descriptor gets to point to the skb. */
3056        tx_q->tx_skbuff[entry] = skb;
3057
3058        /* We've used all descriptors we need for this skb, however,
3059         * advance cur_tx so that it references a fresh descriptor.
3060         * ndo_start_xmit will fill this descriptor the next time it's
3061         * called and stmmac_tx_clean may clean up to this descriptor.
3062         */
3063        entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3064        tx_q->cur_tx = entry;
3065
3066        if (netif_msg_pktdata(priv)) {
3067                void *tx_head;
3068
3069                netdev_dbg(priv->dev,
3070                           "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3071                           __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3072                           entry, first, nfrags);
3073
3074                if (priv->extend_desc)
3075                        tx_head = (void *)tx_q->dma_etx;
3076                else
3077                        tx_head = (void *)tx_q->dma_tx;
3078
3079                priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3080
3081                netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3082                print_pkt(skb->data, skb->len);
3083        }
3084
3085        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3086                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3087                          __func__);
3088                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3089        }
3090
3091        dev->stats.tx_bytes += skb->len;
3092
3093        /* According to the coalesce parameter the IC bit for the latest
3094         * segment is reset and the timer re-started to clean the tx status.
3095         * This approach takes care about the fragments: desc is the first
3096         * element in case of no SG.
3097         */
3098        priv->tx_count_frames += nfrags + 1;
3099        if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3100                mod_timer(&priv->txtimer,
3101                          STMMAC_COAL_TIMER(priv->tx_coal_timer));
3102        } else {
3103                priv->tx_count_frames = 0;
3104                priv->hw->desc->set_tx_ic(desc);
3105                priv->xstats.tx_set_ic_bit++;
3106        }
3107
3108        if (!priv->hwts_tx_en)
3109                skb_tx_timestamp(skb);
3110
3111        /* Ready to fill the first descriptor and set the OWN bit w/o any
3112         * problems because all the descriptors are actually ready to be
3113         * passed to the DMA engine.
3114         */
3115        if (likely(!is_jumbo)) {
3116                bool last_segment = (nfrags == 0);
3117
3118                des = dma_map_single(priv->device, skb->data,
3119                                     nopaged_len, DMA_TO_DEVICE);
3120                if (dma_mapping_error(priv->device, des))
3121                        goto dma_map_err;
3122
3123                tx_q->tx_skbuff_dma[first_entry].buf = des;
3124                if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3125                        first->des0 = cpu_to_le32(des);
3126                else
3127                        first->des2 = cpu_to_le32(des);
3128
3129                tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3130                tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3131
3132                if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3133                             priv->hwts_tx_en)) {
3134                        /* declare that device is doing timestamping */
3135                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3136                        priv->hw->desc->enable_tx_timestamp(first);
3137                }
3138
3139                /* Prepare the first descriptor setting the OWN bit too */
3140                priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3141                                                csum_insertion, priv->mode, 1,
3142                                                last_segment, skb->len);
3143
3144                /* The own bit must be the latest setting done when prepare the
3145                 * descriptor and then barrier is needed to make sure that
3146                 * all is coherent before granting the DMA engine.
3147                 */
3148                dma_wmb();
3149        }
3150
3151        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3152
3153        if (priv->synopsys_id < DWMAC_CORE_4_00)
3154                priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3155        else
3156                priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3157                                               queue);
3158
3159        return NETDEV_TX_OK;
3160
3161dma_map_err:
3162        netdev_err(priv->dev, "Tx DMA map failed\n");
3163        dev_kfree_skb(skb);
3164        priv->dev->stats.tx_dropped++;
3165        return NETDEV_TX_OK;
3166}
3167
3168static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3169{
3170        struct ethhdr *ehdr;
3171        u16 vlanid;
3172
3173        if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3174            NETIF_F_HW_VLAN_CTAG_RX &&
3175            !__vlan_get_tag(skb, &vlanid)) {
3176                /* pop the vlan tag */
3177                ehdr = (struct ethhdr *)skb->data;
3178                memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3179                skb_pull(skb, VLAN_HLEN);
3180                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3181        }
3182}
3183
3184
3185static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3186{
3187        if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3188                return 0;
3189
3190        return 1;
3191}
3192
3193/**
3194 * stmmac_rx_refill - refill used skb preallocated buffers
3195 * @priv: driver private structure
3196 * @queue: RX queue index
3197 * Description : this is to reallocate the skb for the reception process
3198 * that is based on zero-copy.
3199 */
3200static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3201{
3202        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3203        int dirty = stmmac_rx_dirty(priv, queue);
3204        unsigned int entry = rx_q->dirty_rx;
3205
3206        int bfsize = priv->dma_buf_sz;
3207
3208        while (dirty-- > 0) {
3209                struct dma_desc *p;
3210
3211                if (priv->extend_desc)
3212                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
3213                else
3214                        p = rx_q->dma_rx + entry;
3215
3216                if (likely(!rx_q->rx_skbuff[entry])) {
3217                        struct sk_buff *skb;
3218
3219                        skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3220                        if (unlikely(!skb)) {
3221                                /* so for a while no zero-copy! */
3222                                rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3223                                if (unlikely(net_ratelimit()))
3224                                        dev_err(priv->device,
3225                                                "fail to alloc skb entry %d\n",
3226                                                entry);
3227                                break;
3228                        }
3229
3230                        rx_q->rx_skbuff[entry] = skb;
3231                        rx_q->rx_skbuff_dma[entry] =
3232                            dma_map_single(priv->device, skb->data, bfsize,
3233                                           DMA_FROM_DEVICE);
3234                        if (dma_mapping_error(priv->device,
3235                                              rx_q->rx_skbuff_dma[entry])) {
3236                                netdev_err(priv->dev, "Rx DMA map failed\n");
3237                                dev_kfree_skb(skb);
3238                                break;
3239                        }
3240
3241                        if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3242                                p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3243                                p->des1 = 0;
3244                        } else {
3245                                p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3246                        }
3247                        if (priv->hw->mode->refill_desc3)
3248                                priv->hw->mode->refill_desc3(rx_q, p);
3249
3250                        if (rx_q->rx_zeroc_thresh > 0)
3251                                rx_q->rx_zeroc_thresh--;
3252
3253                        netif_dbg(priv, rx_status, priv->dev,
3254                                  "refill entry #%d\n", entry);
3255                }
3256                dma_wmb();
3257
3258                if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3259                        priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3260                else
3261                        priv->hw->desc->set_rx_owner(p);
3262
3263                dma_wmb();
3264
3265                entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3266        }
3267        rx_q->dirty_rx = entry;
3268}
3269
3270/**
3271 * stmmac_rx - manage the receive process
3272 * @priv: driver private structure
3273 * @limit: napi bugget
3274 * @queue: RX queue index.
3275 * Description :  this the function called by the napi poll method.
3276 * It gets all the frames inside the ring.
3277 */
3278static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3279{
3280        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3281        unsigned int entry = rx_q->cur_rx;
3282        int coe = priv->hw->rx_csum;
3283        unsigned int next_entry;
3284        unsigned int count = 0;
3285
3286        if (netif_msg_rx_status(priv)) {
3287                void *rx_head;
3288
3289                netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3290                if (priv->extend_desc)
3291                        rx_head = (void *)rx_q->dma_erx;
3292                else
3293                        rx_head = (void *)rx_q->dma_rx;
3294
3295                priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3296        }
3297        while (count < limit) {
3298                int status;
3299                struct dma_desc *p;
3300                struct dma_desc *np;
3301
3302                if (priv->extend_desc)
3303                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
3304                else
3305                        p = rx_q->dma_rx + entry;
3306
3307                /* read the status of the incoming frame */
3308                status = priv->hw->desc->rx_status(&priv->dev->stats,
3309                                                   &priv->xstats, p);
3310                /* check if managed by the DMA otherwise go ahead */
3311                if (unlikely(status & dma_own))
3312                        break;
3313
3314                count++;
3315
3316                rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3317                next_entry = rx_q->cur_rx;
3318
3319                if (priv->extend_desc)
3320                        np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3321                else
3322                        np = rx_q->dma_rx + next_entry;
3323
3324                prefetch(np);
3325
3326                if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3327                        priv->hw->desc->rx_extended_status(&priv->dev->stats,
3328                                                           &priv->xstats,
3329                                                           rx_q->dma_erx +
3330                                                           entry);
3331                if (unlikely(status == discard_frame)) {
3332                        priv->dev->stats.rx_errors++;
3333                        if (priv->hwts_rx_en && !priv->extend_desc) {
3334                                /* DESC2 & DESC3 will be overwritten by device
3335                                 * with timestamp value, hence reinitialize
3336                                 * them in stmmac_rx_refill() function so that
3337                                 * device can reuse it.
3338                                 */
3339                                rx_q->rx_skbuff[entry] = NULL;
3340                                dma_unmap_single(priv->device,
3341                                                 rx_q->rx_skbuff_dma[entry],
3342                                                 priv->dma_buf_sz,
3343                                                 DMA_FROM_DEVICE);
3344                        }
3345                } else {
3346                        struct sk_buff *skb;
3347                        int frame_len;
3348                        unsigned int des;
3349
3350                        if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3351                                des = le32_to_cpu(p->des0);
3352                        else
3353                                des = le32_to_cpu(p->des2);
3354
3355                        frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3356
3357                        /*  If frame length is greater than skb buffer size
3358                         *  (preallocated during init) then the packet is
3359                         *  ignored
3360                         */
3361                        if (frame_len > priv->dma_buf_sz) {
3362                                netdev_err(priv->dev,
3363                                           "len %d larger than size (%d)\n",
3364                                           frame_len, priv->dma_buf_sz);
3365                                priv->dev->stats.rx_length_errors++;
3366                                break;
3367                        }
3368
3369                        /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3370                         * Type frames (LLC/LLC-SNAP)
3371                         */
3372                        if (unlikely(status != llc_snap))
3373                                frame_len -= ETH_FCS_LEN;
3374
3375                        if (netif_msg_rx_status(priv)) {
3376                                netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3377                                           p, entry, des);
3378                                if (frame_len > ETH_FRAME_LEN)
3379                                        netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3380                                                   frame_len, status);
3381                        }
3382
3383                        /* The zero-copy is always used for all the sizes
3384                         * in case of GMAC4 because it needs
3385                         * to refill the used descriptors, always.
3386                         */
3387                        if (unlikely(!priv->plat->has_gmac4 &&
3388                                     ((frame_len < priv->rx_copybreak) ||
3389                                     stmmac_rx_threshold_count(rx_q)))) {
3390                                skb = netdev_alloc_skb_ip_align(priv->dev,
3391                                                                frame_len);
3392                                if (unlikely(!skb)) {
3393                                        if (net_ratelimit())
3394                                                dev_warn(priv->device,
3395                                                         "packet dropped\n");
3396                                        priv->dev->stats.rx_dropped++;
3397                                        break;
3398                                }
3399
3400                                dma_sync_single_for_cpu(priv->device,
3401                                                        rx_q->rx_skbuff_dma
3402                                                        [entry], frame_len,
3403                                                        DMA_FROM_DEVICE);
3404                                skb_copy_to_linear_data(skb,
3405                                                        rx_q->
3406                                                        rx_skbuff[entry]->data,
3407                                                        frame_len);
3408
3409                                skb_put(skb, frame_len);
3410                                dma_sync_single_for_device(priv->device,
3411                                                           rx_q->rx_skbuff_dma
3412                                                           [entry], frame_len,
3413                                                           DMA_FROM_DEVICE);
3414                        } else {
3415                                skb = rx_q->rx_skbuff[entry];
3416                                if (unlikely(!skb)) {
3417                                        netdev_err(priv->dev,
3418                                                   "%s: Inconsistent Rx chain\n",
3419                                                   priv->dev->name);
3420                                        priv->dev->stats.rx_dropped++;
3421                                        break;
3422                                }
3423                                prefetch(skb->data - NET_IP_ALIGN);
3424                                rx_q->rx_skbuff[entry] = NULL;
3425                                rx_q->rx_zeroc_thresh++;
3426
3427                                skb_put(skb, frame_len);
3428                                dma_unmap_single(priv->device,
3429                                                 rx_q->rx_skbuff_dma[entry],
3430                                                 priv->dma_buf_sz,
3431                                                 DMA_FROM_DEVICE);
3432                        }
3433
3434                        if (netif_msg_pktdata(priv)) {
3435                                netdev_dbg(priv->dev, "frame received (%dbytes)",
3436                                           frame_len);
3437                                print_pkt(skb->data, frame_len);
3438                        }
3439
3440                        stmmac_get_rx_hwtstamp(priv, p, np, skb);
3441
3442                        stmmac_rx_vlan(priv->dev, skb);
3443
3444                        skb->protocol = eth_type_trans(skb, priv->dev);
3445
3446                        if (unlikely(!coe))
3447                                skb_checksum_none_assert(skb);
3448                        else
3449                                skb->ip_summed = CHECKSUM_UNNECESSARY;
3450
3451                        napi_gro_receive(&rx_q->napi, skb);
3452
3453                        priv->dev->stats.rx_packets++;
3454                        priv->dev->stats.rx_bytes += frame_len;
3455                }
3456                entry = next_entry;
3457        }
3458
3459        stmmac_rx_refill(priv, queue);
3460
3461        priv->xstats.rx_pkt_n += count;
3462
3463        return count;
3464}
3465
3466/**
3467 *  stmmac_poll - stmmac poll method (NAPI)
3468 *  @napi : pointer to the napi structure.
3469 *  @budget : maximum number of packets that the current CPU can receive from
3470 *            all interfaces.
3471 *  Description :
3472 *  To look at the incoming frames and clear the tx resources.
3473 */
3474static int stmmac_poll(struct napi_struct *napi, int budget)
3475{
3476        struct stmmac_rx_queue *rx_q =
3477                container_of(napi, struct stmmac_rx_queue, napi);
3478        struct stmmac_priv *priv = rx_q->priv_data;
3479        u32 tx_count = priv->plat->tx_queues_to_use;
3480        u32 chan = rx_q->queue_index;
3481        int work_done = 0;
3482        u32 queue;
3483
3484        priv->xstats.napi_poll++;
3485
3486        /* check all the queues */
3487        for (queue = 0; queue < tx_count; queue++)
3488                stmmac_tx_clean(priv, queue);
3489
3490        work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3491        if (work_done < budget) {
3492                napi_complete_done(napi, work_done);
3493                stmmac_enable_dma_irq(priv, chan);
3494        }
3495        return work_done;
3496}
3497
3498/**
3499 *  stmmac_tx_timeout
3500 *  @dev : Pointer to net device structure
3501 *  Description: this function is called when a packet transmission fails to
3502 *   complete within a reasonable time. The driver will mark the error in the
3503 *   netdev structure and arrange for the device to be reset to a sane state
3504 *   in order to transmit a new packet.
3505 */
3506static void stmmac_tx_timeout(struct net_device *dev)
3507{
3508        struct stmmac_priv *priv = netdev_priv(dev);
3509        u32 tx_count = priv->plat->tx_queues_to_use;
3510        u32 chan;
3511
3512        /* Clear Tx resources and restart transmitting again */
3513        for (chan = 0; chan < tx_count; chan++)
3514                stmmac_tx_err(priv, chan);
3515}
3516
3517/**
3518 *  stmmac_set_rx_mode - entry point for multicast addressing
3519 *  @dev : pointer to the device structure
3520 *  Description:
3521 *  This function is a driver entry point which gets called by the kernel
3522 *  whenever multicast addresses must be enabled/disabled.
3523 *  Return value:
3524 *  void.
3525 */
3526static void stmmac_set_rx_mode(struct net_device *dev)
3527{
3528        struct stmmac_priv *priv = netdev_priv(dev);
3529
3530        priv->hw->mac->set_filter(priv->hw, dev);
3531}
3532
3533/**
3534 *  stmmac_change_mtu - entry point to change MTU size for the device.
3535 *  @dev : device pointer.
3536 *  @new_mtu : the new MTU size for the device.
3537 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3538 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3539 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3540 *  Return value:
3541 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3542 *  file on failure.
3543 */
3544static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3545{
3546        struct stmmac_priv *priv = netdev_priv(dev);
3547
3548        if (netif_running(dev)) {
3549                netdev_err(priv->dev, "must be stopped to change its MTU\n");
3550                return -EBUSY;
3551        }
3552
3553        dev->mtu = new_mtu;
3554
3555        netdev_update_features(dev);
3556
3557        return 0;
3558}
3559
3560static netdev_features_t stmmac_fix_features(struct net_device *dev,
3561                                             netdev_features_t features)
3562{
3563        struct stmmac_priv *priv = netdev_priv(dev);
3564
3565        if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3566                features &= ~NETIF_F_RXCSUM;
3567
3568        if (!priv->plat->tx_coe)
3569                features &= ~NETIF_F_CSUM_MASK;
3570
3571        /* Some GMAC devices have a bugged Jumbo frame support that
3572         * needs to have the Tx COE disabled for oversized frames
3573         * (due to limited buffer sizes). In this case we disable
3574         * the TX csum insertion in the TDES and not use SF.
3575         */
3576        if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3577                features &= ~NETIF_F_CSUM_MASK;
3578
3579        /* Disable tso if asked by ethtool */
3580        if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3581                if (features & NETIF_F_TSO)
3582                        priv->tso = true;
3583                else
3584                        priv->tso = false;
3585        }
3586
3587        return features;
3588}
3589
3590static int stmmac_set_features(struct net_device *netdev,
3591                               netdev_features_t features)
3592{
3593        struct stmmac_priv *priv = netdev_priv(netdev);
3594
3595        /* Keep the COE Type in case of csum is supporting */
3596        if (features & NETIF_F_RXCSUM)
3597                priv->hw->rx_csum = priv->plat->rx_coe;
3598        else
3599                priv->hw->rx_csum = 0;
3600        /* No check needed because rx_coe has been set before and it will be
3601         * fixed in case of issue.
3602         */
3603        priv->hw->mac->rx_ipc(priv->hw);
3604
3605        return 0;
3606}
3607
3608/**
3609 *  stmmac_interrupt - main ISR
3610 *  @irq: interrupt number.
3611 *  @dev_id: to pass the net device pointer.
3612 *  Description: this is the main driver interrupt service routine.
3613 *  It can call:
3614 *  o DMA service routine (to manage incoming frame reception and transmission
3615 *    status)
3616 *  o Core interrupts to manage: remote wake-up, management counter, LPI
3617 *    interrupts.
3618 */
3619static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3620{
3621        struct net_device *dev = (struct net_device *)dev_id;
3622        struct stmmac_priv *priv = netdev_priv(dev);
3623        u32 rx_cnt = priv->plat->rx_queues_to_use;
3624        u32 tx_cnt = priv->plat->tx_queues_to_use;
3625        u32 queues_count;
3626        u32 queue;
3627
3628        queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3629
3630        if (priv->irq_wake)
3631                pm_wakeup_event(priv->device, 0);
3632
3633        if (unlikely(!dev)) {
3634                netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3635                return IRQ_NONE;
3636        }
3637
3638        /* To handle GMAC own interrupts */
3639        if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3640                int status = priv->hw->mac->host_irq_status(priv->hw,
3641                                                            &priv->xstats);
3642
3643                if (unlikely(status)) {
3644                        /* For LPI we need to save the tx status */
3645                        if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3646                                priv->tx_path_in_lpi_mode = true;
3647                        if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3648                                priv->tx_path_in_lpi_mode = false;
3649                }
3650
3651                if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3652                        for (queue = 0; queue < queues_count; queue++) {
3653                                struct stmmac_rx_queue *rx_q =
3654                                &priv->rx_queue[queue];
3655
3656                                status |=
3657                                priv->hw->mac->host_mtl_irq_status(priv->hw,
3658                                                                   queue);
3659
3660                                if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3661                                    priv->hw->dma->set_rx_tail_ptr)
3662                                        priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3663                                                                rx_q->rx_tail_addr,
3664                                                                queue);
3665                        }
3666                }
3667
3668                /* PCS link status */
3669                if (priv->hw->pcs) {
3670                        if (priv->xstats.pcs_link)
3671                                netif_carrier_on(dev);
3672                        else
3673                                netif_carrier_off(dev);
3674                }
3675        }
3676
3677        /* To handle DMA interrupts */
3678        stmmac_dma_interrupt(priv);
3679
3680        return IRQ_HANDLED;
3681}
3682
3683#ifdef CONFIG_NET_POLL_CONTROLLER
3684/* Polling receive - used by NETCONSOLE and other diagnostic tools
3685 * to allow network I/O with interrupts disabled.
3686 */
3687static void stmmac_poll_controller(struct net_device *dev)
3688{
3689        disable_irq(dev->irq);
3690        stmmac_interrupt(dev->irq, dev);
3691        enable_irq(dev->irq);
3692}
3693#endif
3694
3695/**
3696 *  stmmac_ioctl - Entry point for the Ioctl
3697 *  @dev: Device pointer.
3698 *  @rq: An IOCTL specefic structure, that can contain a pointer to
3699 *  a proprietary structure used to pass information to the driver.
3700 *  @cmd: IOCTL command
3701 *  Description:
3702 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3703 */
3704static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3705{
3706        int ret = -EOPNOTSUPP;
3707
3708        if (!netif_running(dev))
3709                return -EINVAL;
3710
3711        switch (cmd) {
3712        case SIOCGMIIPHY:
3713        case SIOCGMIIREG:
3714        case SIOCSMIIREG:
3715                if (!dev->phydev)
3716                        return -EINVAL;
3717                ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3718                break;
3719        case SIOCSHWTSTAMP:
3720                ret = stmmac_hwtstamp_ioctl(dev, rq);
3721                break;
3722        default:
3723                break;
3724        }
3725
3726        return ret;
3727}
3728
3729#ifdef CONFIG_DEBUG_FS
3730static struct dentry *stmmac_fs_dir;
3731
3732static void sysfs_display_ring(void *head, int size, int extend_desc,
3733                               struct seq_file *seq)
3734{
3735        int i;
3736        struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3737        struct dma_desc *p = (struct dma_desc *)head;
3738
3739        for (i = 0; i < size; i++) {
3740                if (extend_desc) {
3741                        seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3742                                   i, (unsigned int)virt_to_phys(ep),
3743                                   le32_to_cpu(ep->basic.des0),
3744                                   le32_to_cpu(ep->basic.des1),
3745                                   le32_to_cpu(ep->basic.des2),
3746                                   le32_to_cpu(ep->basic.des3));
3747                        ep++;
3748                } else {
3749                        seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3750                                   i, (unsigned int)virt_to_phys(p),
3751                                   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3752                                   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3753                        p++;
3754                }
3755                seq_printf(seq, "\n");
3756        }
3757}
3758
3759static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3760{
3761        struct net_device *dev = seq->private;
3762        struct stmmac_priv *priv = netdev_priv(dev);
3763        u32 rx_count = priv->plat->rx_queues_to_use;
3764        u32 tx_count = priv->plat->tx_queues_to_use;
3765        u32 queue;
3766
3767        for (queue = 0; queue < rx_count; queue++) {
3768                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3769
3770                seq_printf(seq, "RX Queue %d:\n", queue);
3771
3772                if (priv->extend_desc) {
3773                        seq_printf(seq, "Extended descriptor ring:\n");
3774                        sysfs_display_ring((void *)rx_q->dma_erx,
3775                                           DMA_RX_SIZE, 1, seq);
3776                } else {
3777                        seq_printf(seq, "Descriptor ring:\n");
3778                        sysfs_display_ring((void *)rx_q->dma_rx,
3779                                           DMA_RX_SIZE, 0, seq);
3780                }
3781        }
3782
3783        for (queue = 0; queue < tx_count; queue++) {
3784                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3785
3786                seq_printf(seq, "TX Queue %d:\n", queue);
3787
3788                if (priv->extend_desc) {
3789                        seq_printf(seq, "Extended descriptor ring:\n");
3790                        sysfs_display_ring((void *)tx_q->dma_etx,
3791                                           DMA_TX_SIZE, 1, seq);
3792                } else {
3793                        seq_printf(seq, "Descriptor ring:\n");
3794                        sysfs_display_ring((void *)tx_q->dma_tx,
3795                                           DMA_TX_SIZE, 0, seq);
3796                }
3797        }
3798
3799        return 0;
3800}
3801
3802static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3803{
3804        return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3805}
3806
3807/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3808
3809static const struct file_operations stmmac_rings_status_fops = {
3810        .owner = THIS_MODULE,
3811        .open = stmmac_sysfs_ring_open,
3812        .read = seq_read,
3813        .llseek = seq_lseek,
3814        .release = single_release,
3815};
3816
3817static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3818{
3819        struct net_device *dev = seq->private;
3820        struct stmmac_priv *priv = netdev_priv(dev);
3821
3822        if (!priv->hw_cap_support) {
3823                seq_printf(seq, "DMA HW features not supported\n");
3824                return 0;
3825        }
3826
3827        seq_printf(seq, "==============================\n");
3828        seq_printf(seq, "\tDMA HW features\n");
3829        seq_printf(seq, "==============================\n");
3830
3831        seq_printf(seq, "\t10/100 Mbps: %s\n",
3832                   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3833        seq_printf(seq, "\t1000 Mbps: %s\n",
3834                   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3835        seq_printf(seq, "\tHalf duplex: %s\n",
3836                   (priv->dma_cap.half_duplex) ? "Y" : "N");
3837        seq_printf(seq, "\tHash Filter: %s\n",
3838                   (priv->dma_cap.hash_filter) ? "Y" : "N");
3839        seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3840                   (priv->dma_cap.multi_addr) ? "Y" : "N");
3841        seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3842                   (priv->dma_cap.pcs) ? "Y" : "N");
3843        seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3844                   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3845        seq_printf(seq, "\tPMT Remote wake up: %s\n",
3846                   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3847        seq_printf(seq, "\tPMT Magic Frame: %s\n",
3848                   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3849        seq_printf(seq, "\tRMON module: %s\n",
3850                   (priv->dma_cap.rmon) ? "Y" : "N");
3851        seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3852                   (priv->dma_cap.time_stamp) ? "Y" : "N");
3853        seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3854                   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3855        seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3856                   (priv->dma_cap.eee) ? "Y" : "N");
3857        seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3858        seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3859                   (priv->dma_cap.tx_coe) ? "Y" : "N");
3860        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3861                seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3862                           (priv->dma_cap.rx_coe) ? "Y" : "N");
3863        } else {
3864                seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3865                           (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3866                seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3867                           (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3868        }
3869        seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3870                   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3871        seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3872                   priv->dma_cap.number_rx_channel);
3873        seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3874                   priv->dma_cap.number_tx_channel);
3875        seq_printf(seq, "\tEnhanced descriptors: %s\n",
3876                   (priv->dma_cap.enh_desc) ? "Y" : "N");
3877
3878        return 0;
3879}
3880
3881static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3882{
3883        return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3884}
3885
3886static const struct file_operations stmmac_dma_cap_fops = {
3887        .owner = THIS_MODULE,
3888        .open = stmmac_sysfs_dma_cap_open,
3889        .read = seq_read,
3890        .llseek = seq_lseek,
3891        .release = single_release,
3892};
3893
3894static int stmmac_init_fs(struct net_device *dev)
3895{
3896        struct stmmac_priv *priv = netdev_priv(dev);
3897
3898        /* Create per netdev entries */
3899        priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3900
3901        if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3902                netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3903
3904                return -ENOMEM;
3905        }
3906
3907        /* Entry to report DMA RX/TX rings */
3908        priv->dbgfs_rings_status =
3909                debugfs_create_file("descriptors_status", S_IRUGO,
3910                                    priv->dbgfs_dir, dev,
3911                                    &stmmac_rings_status_fops);
3912
3913        if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3914                netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3915                debugfs_remove_recursive(priv->dbgfs_dir);
3916
3917                return -ENOMEM;
3918        }
3919
3920        /* Entry to report the DMA HW features */
3921        priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3922                                            priv->dbgfs_dir,
3923                                            dev, &stmmac_dma_cap_fops);
3924
3925        if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3926                netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3927                debugfs_remove_recursive(priv->dbgfs_dir);
3928
3929                return -ENOMEM;
3930        }
3931
3932        return 0;
3933}
3934
3935static void stmmac_exit_fs(struct net_device *dev)
3936{
3937        struct stmmac_priv *priv = netdev_priv(dev);
3938
3939        debugfs_remove_recursive(priv->dbgfs_dir);
3940}
3941#endif /* CONFIG_DEBUG_FS */
3942
3943static const struct net_device_ops stmmac_netdev_ops = {
3944        .ndo_open = stmmac_open,
3945        .ndo_start_xmit = stmmac_xmit,
3946        .ndo_stop = stmmac_release,
3947        .ndo_change_mtu = stmmac_change_mtu,
3948        .ndo_fix_features = stmmac_fix_features,
3949        .ndo_set_features = stmmac_set_features,
3950        .ndo_set_rx_mode = stmmac_set_rx_mode,
3951        .ndo_tx_timeout = stmmac_tx_timeout,
3952        .ndo_do_ioctl = stmmac_ioctl,
3953#ifdef CONFIG_NET_POLL_CONTROLLER
3954        .ndo_poll_controller = stmmac_poll_controller,
3955#endif
3956        .ndo_set_mac_address = eth_mac_addr,
3957};
3958
3959/**
3960 *  stmmac_hw_init - Init the MAC device
3961 *  @priv: driver private structure
3962 *  Description: this function is to configure the MAC device according to
3963 *  some platform parameters or the HW capability register. It prepares the
3964 *  driver to use either ring or chain modes and to setup either enhanced or
3965 *  normal descriptors.
3966 */
3967static int stmmac_hw_init(struct stmmac_priv *priv)
3968{
3969        struct mac_device_info *mac;
3970
3971        /* Identify the MAC HW device */
3972        if (priv->plat->has_gmac) {
3973                priv->dev->priv_flags |= IFF_UNICAST_FLT;
3974                mac = dwmac1000_setup(priv->ioaddr,
3975                                      priv->plat->multicast_filter_bins,
3976                                      priv->plat->unicast_filter_entries,
3977                                      &priv->synopsys_id);
3978        } else if (priv->plat->has_gmac4) {
3979                priv->dev->priv_flags |= IFF_UNICAST_FLT;
3980                mac = dwmac4_setup(priv->ioaddr,
3981                                   priv->plat->multicast_filter_bins,
3982                                   priv->plat->unicast_filter_entries,
3983                                   &priv->synopsys_id);
3984        } else {
3985                mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3986        }
3987        if (!mac)
3988                return -ENOMEM;
3989
3990        priv->hw = mac;
3991
3992        /* To use the chained or ring mode */
3993        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3994                priv->hw->mode = &dwmac4_ring_mode_ops;
3995        } else {
3996                if (chain_mode) {
3997                        priv->hw->mode = &chain_mode_ops;
3998                        dev_info(priv->device, "Chain mode enabled\n");
3999                        priv->mode = STMMAC_CHAIN_MODE;
4000                } else {
4001                        priv->hw->mode = &ring_mode_ops;
4002                        dev_info(priv->device, "Ring mode enabled\n");
4003                        priv->mode = STMMAC_RING_MODE;
4004                }
4005        }
4006
4007        /* Get the HW capability (new GMAC newer than 3.50a) */
4008        priv->hw_cap_support = stmmac_get_hw_features(priv);
4009        if (priv->hw_cap_support) {
4010                dev_info(priv->device, "DMA HW capability register supported\n");
4011
4012                /* We can override some gmac/dma configuration fields: e.g.
4013                 * enh_desc, tx_coe (e.g. that are passed through the
4014                 * platform) with the values from the HW capability
4015                 * register (if supported).
4016                 */
4017                priv->plat->enh_desc = priv->dma_cap.enh_desc;
4018                priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4019                priv->hw->pmt = priv->plat->pmt;
4020
4021                /* TXCOE doesn't work in thresh DMA mode */
4022                if (priv->plat->force_thresh_dma_mode)
4023                        priv->plat->tx_coe = 0;
4024                else
4025                        priv->plat->tx_coe = priv->dma_cap.tx_coe;
4026
4027                /* In case of GMAC4 rx_coe is from HW cap register. */
4028                priv->plat->rx_coe = priv->dma_cap.rx_coe;
4029
4030                if (priv->dma_cap.rx_coe_type2)
4031                        priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4032                else if (priv->dma_cap.rx_coe_type1)
4033                        priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4034
4035        } else {
4036                dev_info(priv->device, "No HW DMA feature register supported\n");
4037        }
4038
4039        /* To use alternate (extended), normal or GMAC4 descriptor structures */
4040        if (priv->synopsys_id >= DWMAC_CORE_4_00)
4041                priv->hw->desc = &dwmac4_desc_ops;
4042        else
4043                stmmac_selec_desc_mode(priv);
4044
4045        if (priv->plat->rx_coe) {
4046                priv->hw->rx_csum = priv->plat->rx_coe;
4047                dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4048                if (priv->synopsys_id < DWMAC_CORE_4_00)
4049                        dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4050        }
4051        if (priv->plat->tx_coe)
4052                dev_info(priv->device, "TX Checksum insertion supported\n");
4053
4054        if (priv->plat->pmt) {
4055                dev_info(priv->device, "Wake-Up On Lan supported\n");
4056                device_set_wakeup_capable(priv->device, 1);
4057        }
4058
4059        if (priv->dma_cap.tsoen)
4060                dev_info(priv->device, "TSO supported\n");
4061
4062        return 0;
4063}
4064
4065/**
4066 * stmmac_dvr_probe
4067 * @device: device pointer
4068 * @plat_dat: platform data pointer
4069 * @res: stmmac resource pointer
4070 * Description: this is the main probe function used to
4071 * call the alloc_etherdev, allocate the priv structure.
4072 * Return:
4073 * returns 0 on success, otherwise errno.
4074 */
4075int stmmac_dvr_probe(struct device *device,
4076                     struct plat_stmmacenet_data *plat_dat,
4077                     struct stmmac_resources *res)
4078{
4079        struct net_device *ndev = NULL;
4080        struct stmmac_priv *priv;
4081        int ret = 0;
4082        u32 queue;
4083
4084        ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4085                                  MTL_MAX_TX_QUEUES,
4086                                  MTL_MAX_RX_QUEUES);
4087        if (!ndev)
4088                return -ENOMEM;
4089
4090        SET_NETDEV_DEV(ndev, device);
4091
4092        priv = netdev_priv(ndev);
4093        priv->device = device;
4094        priv->dev = ndev;
4095
4096        stmmac_set_ethtool_ops(ndev);
4097        priv->pause = pause;
4098        priv->plat = plat_dat;
4099        priv->ioaddr = res->addr;
4100        priv->dev->base_addr = (unsigned long)res->addr;
4101
4102        priv->dev->irq = res->irq;
4103        priv->wol_irq = res->wol_irq;
4104        priv->lpi_irq = res->lpi_irq;
4105
4106        if (res->mac)
4107                memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4108
4109        dev_set_drvdata(device, priv->dev);
4110
4111        /* Verify driver arguments */
4112        stmmac_verify_args();
4113
4114        /* Override with kernel parameters if supplied XXX CRS XXX
4115         * this needs to have multiple instances
4116         */
4117        if ((phyaddr >= 0) && (phyaddr <= 31))
4118                priv->plat->phy_addr = phyaddr;
4119
4120        if (priv->plat->stmmac_rst)
4121                reset_control_deassert(priv->plat->stmmac_rst);
4122
4123        /* Init MAC and get the capabilities */
4124        ret = stmmac_hw_init(priv);
4125        if (ret)
4126                goto error_hw_init;
4127
4128        /* Configure real RX and TX queues */
4129        netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4130        netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4131
4132        ndev->netdev_ops = &stmmac_netdev_ops;
4133
4134        ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4135                            NETIF_F_RXCSUM;
4136
4137        if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4138                ndev->hw_features |= NETIF_F_TSO;
4139                priv->tso = true;
4140                dev_info(priv->device, "TSO feature enabled\n");
4141        }
4142        ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4143        ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4144#ifdef STMMAC_VLAN_TAG_USED
4145        /* Both mac100 and gmac support receive VLAN tag detection */
4146        ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4147#endif
4148        priv->msg_enable = netif_msg_init(debug, default_msg_level);
4149
4150        /* MTU range: 46 - hw-specific max */
4151        ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4152        if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4153                ndev->max_mtu = JUMBO_LEN;
4154        else
4155                ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4156        /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4157         * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4158         */
4159        if ((priv->plat->maxmtu < ndev->max_mtu) &&
4160            (priv->plat->maxmtu >= ndev->min_mtu))
4161                ndev->max_mtu = priv->plat->maxmtu;
4162        else if (priv->plat->maxmtu < ndev->min_mtu)
4163                dev_warn(priv->device,
4164                         "%s: warning: maxmtu having invalid value (%d)\n",
4165                         __func__, priv->plat->maxmtu);
4166
4167        if (flow_ctrl)
4168                priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4169
4170        /* Rx Watchdog is available in the COREs newer than the 3.40.
4171         * In some case, for example on bugged HW this feature
4172         * has to be disable and this can be done by passing the
4173         * riwt_off field from the platform.
4174         */
4175        if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4176                priv->use_riwt = 1;
4177                dev_info(priv->device,
4178                         "Enable RX Mitigation via HW Watchdog Timer\n");
4179        }
4180
4181        for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4182                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4183
4184                netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4185                               (8 * priv->plat->rx_queues_to_use));
4186        }
4187
4188        spin_lock_init(&priv->lock);
4189
4190        /* If a specific clk_csr value is passed from the platform
4191         * this means that the CSR Clock Range selection cannot be
4192         * changed at run-time and it is fixed. Viceversa the driver'll try to
4193         * set the MDC clock dynamically according to the csr actual
4194         * clock input.
4195         */
4196        if (!priv->plat->clk_csr)
4197                stmmac_clk_csr_set(priv);
4198        else
4199                priv->clk_csr = priv->plat->clk_csr;
4200
4201        stmmac_check_pcs_mode(priv);
4202
4203        if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4204            priv->hw->pcs != STMMAC_PCS_TBI &&
4205            priv->hw->pcs != STMMAC_PCS_RTBI) {
4206                /* MDIO bus Registration */
4207                ret = stmmac_mdio_register(ndev);
4208                if (ret < 0) {
4209                        dev_err(priv->device,
4210                                "%s: MDIO bus (id: %d) registration failed",
4211                                __func__, priv->plat->bus_id);
4212                        goto error_mdio_register;
4213                }
4214        }
4215
4216        ret = register_netdev(ndev);
4217        if (ret) {
4218                dev_err(priv->device, "%s: ERROR %i registering the device\n",
4219                        __func__, ret);
4220                goto error_netdev_register;
4221        }
4222
4223        return ret;
4224
4225error_netdev_register:
4226        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4227            priv->hw->pcs != STMMAC_PCS_TBI &&
4228            priv->hw->pcs != STMMAC_PCS_RTBI)
4229                stmmac_mdio_unregister(ndev);
4230error_mdio_register:
4231        for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4232                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4233
4234                netif_napi_del(&rx_q->napi);
4235        }
4236error_hw_init:
4237        free_netdev(ndev);
4238
4239        return ret;
4240}
4241EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4242
4243/**
4244 * stmmac_dvr_remove
4245 * @dev: device pointer
4246 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4247 * changes the link status, releases the DMA descriptor rings.
4248 */
4249int stmmac_dvr_remove(struct device *dev)
4250{
4251        struct net_device *ndev = dev_get_drvdata(dev);
4252        struct stmmac_priv *priv = netdev_priv(ndev);
4253
4254        netdev_info(priv->dev, "%s: removing driver", __func__);
4255
4256        stmmac_stop_all_dma(priv);
4257
4258        priv->hw->mac->set_mac(priv->ioaddr, false);
4259        netif_carrier_off(ndev);
4260        unregister_netdev(ndev);
4261        if (priv->plat->stmmac_rst)
4262                reset_control_assert(priv->plat->stmmac_rst);
4263        clk_disable_unprepare(priv->plat->pclk);
4264        clk_disable_unprepare(priv->plat->stmmac_clk);
4265        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4266            priv->hw->pcs != STMMAC_PCS_TBI &&
4267            priv->hw->pcs != STMMAC_PCS_RTBI)
4268                stmmac_mdio_unregister(ndev);
4269        free_netdev(ndev);
4270
4271        return 0;
4272}
4273EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4274
4275/**
4276 * stmmac_suspend - suspend callback
4277 * @dev: device pointer
4278 * Description: this is the function to suspend the device and it is called
4279 * by the platform driver to stop the network queue, release the resources,
4280 * program the PMT register (for WoL), clean and release driver resources.
4281 */
4282int stmmac_suspend(struct device *dev)
4283{
4284        struct net_device *ndev = dev_get_drvdata(dev);
4285        struct stmmac_priv *priv = netdev_priv(ndev);
4286        unsigned long flags;
4287
4288        if (!ndev || !netif_running(ndev))
4289                return 0;
4290
4291        if (ndev->phydev)
4292                phy_stop(ndev->phydev);
4293
4294        spin_lock_irqsave(&priv->lock, flags);
4295
4296        netif_device_detach(ndev);
4297        stmmac_stop_all_queues(priv);
4298
4299        stmmac_disable_all_queues(priv);
4300
4301        /* Stop TX/RX DMA */
4302        stmmac_stop_all_dma(priv);
4303
4304        /* Enable Power down mode by programming the PMT regs */
4305        if (device_may_wakeup(priv->device)) {
4306                priv->hw->mac->pmt(priv->hw, priv->wolopts);
4307                priv->irq_wake = 1;
4308        } else {
4309                priv->hw->mac->set_mac(priv->ioaddr, false);
4310                pinctrl_pm_select_sleep_state(priv->device);
4311                /* Disable clock in case of PWM is off */
4312                clk_disable(priv->plat->pclk);
4313                clk_disable(priv->plat->stmmac_clk);
4314        }
4315        spin_unlock_irqrestore(&priv->lock, flags);
4316
4317        priv->oldlink = 0;
4318        priv->speed = SPEED_UNKNOWN;
4319        priv->oldduplex = DUPLEX_UNKNOWN;
4320        return 0;
4321}
4322EXPORT_SYMBOL_GPL(stmmac_suspend);
4323
4324/**
4325 * stmmac_reset_queues_param - reset queue parameters
4326 * @dev: device pointer
4327 */
4328static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4329{
4330        u32 rx_cnt = priv->plat->rx_queues_to_use;
4331        u32 tx_cnt = priv->plat->tx_queues_to_use;
4332        u32 queue;
4333
4334        for (queue = 0; queue < rx_cnt; queue++) {
4335                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4336
4337                rx_q->cur_rx = 0;
4338                rx_q->dirty_rx = 0;
4339        }
4340
4341        for (queue = 0; queue < tx_cnt; queue++) {
4342                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4343
4344                tx_q->cur_tx = 0;
4345                tx_q->dirty_tx = 0;
4346        }
4347}
4348
4349/**
4350 * stmmac_resume - resume callback
4351 * @dev: device pointer
4352 * Description: when resume this function is invoked to setup the DMA and CORE
4353 * in a usable state.
4354 */
4355int stmmac_resume(struct device *dev)
4356{
4357        struct net_device *ndev = dev_get_drvdata(dev);
4358        struct stmmac_priv *priv = netdev_priv(ndev);
4359        unsigned long flags;
4360
4361        if (!netif_running(ndev))
4362                return 0;
4363
4364        /* Power Down bit, into the PM register, is cleared
4365         * automatically as soon as a magic packet or a Wake-up frame
4366         * is received. Anyway, it's better to manually clear
4367         * this bit because it can generate problems while resuming
4368         * from another devices (e.g. serial console).
4369         */
4370        if (device_may_wakeup(priv->device)) {
4371                spin_lock_irqsave(&priv->lock, flags);
4372                priv->hw->mac->pmt(priv->hw, 0);
4373                spin_unlock_irqrestore(&priv->lock, flags);
4374                priv->irq_wake = 0;
4375        } else {
4376                pinctrl_pm_select_default_state(priv->device);
4377                /* enable the clk previously disabled */
4378                clk_enable(priv->plat->stmmac_clk);
4379                clk_enable(priv->plat->pclk);
4380                /* reset the phy so that it's ready */
4381                if (priv->mii)
4382                        stmmac_mdio_reset(priv->mii);
4383        }
4384
4385        netif_device_attach(ndev);
4386
4387        spin_lock_irqsave(&priv->lock, flags);
4388
4389        stmmac_reset_queues_param(priv);
4390
4391        /* reset private mss value to force mss context settings at
4392         * next tso xmit (only used for gmac4).
4393         */
4394        priv->mss = 0;
4395
4396        stmmac_clear_descriptors(priv);
4397
4398        stmmac_hw_setup(ndev, false);
4399        stmmac_init_tx_coalesce(priv);
4400        stmmac_set_rx_mode(ndev);
4401
4402        stmmac_enable_all_queues(priv);
4403
4404        stmmac_start_all_queues(priv);
4405
4406        spin_unlock_irqrestore(&priv->lock, flags);
4407
4408        if (ndev->phydev)
4409                phy_start(ndev->phydev);
4410
4411        return 0;
4412}
4413EXPORT_SYMBOL_GPL(stmmac_resume);
4414
4415#ifndef MODULE
4416static int __init stmmac_cmdline_opt(char *str)
4417{
4418        char *opt;
4419
4420        if (!str || !*str)
4421                return -EINVAL;
4422        while ((opt = strsep(&str, ",")) != NULL) {
4423                if (!strncmp(opt, "debug:", 6)) {
4424                        if (kstrtoint(opt + 6, 0, &debug))
4425                                goto err;
4426                } else if (!strncmp(opt, "phyaddr:", 8)) {
4427                        if (kstrtoint(opt + 8, 0, &phyaddr))
4428                                goto err;
4429                } else if (!strncmp(opt, "buf_sz:", 7)) {
4430                        if (kstrtoint(opt + 7, 0, &buf_sz))
4431                                goto err;
4432                } else if (!strncmp(opt, "tc:", 3)) {
4433                        if (kstrtoint(opt + 3, 0, &tc))
4434                                goto err;
4435                } else if (!strncmp(opt, "watchdog:", 9)) {
4436                        if (kstrtoint(opt + 9, 0, &watchdog))
4437                                goto err;
4438                } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4439                        if (kstrtoint(opt + 10, 0, &flow_ctrl))
4440                                goto err;
4441                } else if (!strncmp(opt, "pause:", 6)) {
4442                        if (kstrtoint(opt + 6, 0, &pause))
4443                                goto err;
4444                } else if (!strncmp(opt, "eee_timer:", 10)) {
4445                        if (kstrtoint(opt + 10, 0, &eee_timer))
4446                                goto err;
4447                } else if (!strncmp(opt, "chain_mode:", 11)) {
4448                        if (kstrtoint(opt + 11, 0, &chain_mode))
4449                                goto err;
4450                }
4451        }
4452        return 0;
4453
4454err:
4455        pr_err("%s: ERROR broken module parameter conversion", __func__);
4456        return -EINVAL;
4457}
4458
4459__setup("stmmaceth=", stmmac_cmdline_opt);
4460#endif /* MODULE */
4461
4462static int __init stmmac_init(void)
4463{
4464#ifdef CONFIG_DEBUG_FS
4465        /* Create debugfs main directory if it doesn't exist yet */
4466        if (!stmmac_fs_dir) {
4467                stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4468
4469                if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4470                        pr_err("ERROR %s, debugfs create directory failed\n",
4471                               STMMAC_RESOURCE_NAME);
4472
4473                        return -ENOMEM;
4474                }
4475        }
4476#endif
4477
4478        return 0;
4479}
4480
4481static void __exit stmmac_exit(void)
4482{
4483#ifdef CONFIG_DEBUG_FS
4484        debugfs_remove_recursive(stmmac_fs_dir);
4485#endif
4486}
4487
4488module_init(stmmac_init)
4489module_exit(stmmac_exit)
4490
4491MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4492MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4493MODULE_LICENSE("GPL");
4494