linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
<<
>>
Prefs
   1/*******************************************************************************
   2  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
   3  ST Ethernet IPs are built around a Synopsys IP Core.
   4
   5        Copyright(C) 2007-2011 STMicroelectronics Ltd
   6
   7  This program is free software; you can redistribute it and/or modify it
   8  under the terms and conditions of the GNU General Public License,
   9  version 2, as published by the Free Software Foundation.
  10
  11  This program is distributed in the hope it will be useful, but WITHOUT
  12  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14  more details.
  15
  16  The full GNU General Public License is included in this distribution in
  17  the file called "COPYING".
  18
  19  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  20
  21  Documentation available at:
  22        http://www.stlinux.com
  23  Support available at:
  24        https://bugzilla.stlinux.com/
  25*******************************************************************************/
  26
  27#include <linux/clk.h>
  28#include <linux/kernel.h>
  29#include <linux/interrupt.h>
  30#include <linux/ip.h>
  31#include <linux/tcp.h>
  32#include <linux/skbuff.h>
  33#include <linux/ethtool.h>
  34#include <linux/if_ether.h>
  35#include <linux/crc32.h>
  36#include <linux/mii.h>
  37#include <linux/if.h>
  38#include <linux/if_vlan.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/slab.h>
  41#include <linux/prefetch.h>
  42#include <linux/pinctrl/consumer.h>
  43#ifdef CONFIG_DEBUG_FS
  44#include <linux/debugfs.h>
  45#include <linux/seq_file.h>
  46#endif /* CONFIG_DEBUG_FS */
  47#include <linux/net_tstamp.h>
  48#include <net/pkt_cls.h>
  49#include "stmmac_ptp.h"
  50#include "stmmac.h"
  51#include <linux/reset.h>
  52#include <linux/of_mdio.h>
  53#include "dwmac1000.h"
  54#include "dwxgmac2.h"
  55#include "hwif.h"
  56
  57#define STMMAC_ALIGN(x)         __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
  58#define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
  59
  60/* Module parameters */
  61#define TX_TIMEO        5000
  62static int watchdog = TX_TIMEO;
  63module_param(watchdog, int, 0644);
  64MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
  65
  66static int debug = -1;
  67module_param(debug, int, 0644);
  68MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  69
  70static int phyaddr = -1;
  71module_param(phyaddr, int, 0444);
  72MODULE_PARM_DESC(phyaddr, "Physical device address");
  73
  74#define STMMAC_TX_THRESH        (DMA_TX_SIZE / 4)
  75#define STMMAC_RX_THRESH        (DMA_RX_SIZE / 4)
  76
  77static int flow_ctrl = FLOW_OFF;
  78module_param(flow_ctrl, int, 0644);
  79MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
  80
  81static int pause = PAUSE_TIME;
  82module_param(pause, int, 0644);
  83MODULE_PARM_DESC(pause, "Flow Control Pause Time");
  84
  85#define TC_DEFAULT 64
  86static int tc = TC_DEFAULT;
  87module_param(tc, int, 0644);
  88MODULE_PARM_DESC(tc, "DMA threshold control value");
  89
  90#define DEFAULT_BUFSIZE 1536
  91static int buf_sz = DEFAULT_BUFSIZE;
  92module_param(buf_sz, int, 0644);
  93MODULE_PARM_DESC(buf_sz, "DMA buffer size");
  94
  95#define STMMAC_RX_COPYBREAK     256
  96
  97static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  98                                      NETIF_MSG_LINK | NETIF_MSG_IFUP |
  99                                      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
 100
 101#define STMMAC_DEFAULT_LPI_TIMER        1000
 102static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 103module_param(eee_timer, int, 0644);
 104MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
 105#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
 106
 107/* By default the driver will use the ring mode to manage tx and rx descriptors,
 108 * but allow user to force to use the chain instead of the ring
 109 */
 110static unsigned int chain_mode;
 111module_param(chain_mode, int, 0444);
 112MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
 113
 114static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 115
 116#ifdef CONFIG_DEBUG_FS
 117static int stmmac_init_fs(struct net_device *dev);
 118static void stmmac_exit_fs(struct net_device *dev);
 119#endif
 120
 121#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
 122
 123/**
 124 * stmmac_verify_args - verify the driver parameters.
 125 * Description: it checks the driver parameters and set a default in case of
 126 * errors.
 127 */
 128static void stmmac_verify_args(void)
 129{
 130        if (unlikely(watchdog < 0))
 131                watchdog = TX_TIMEO;
 132        if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
 133                buf_sz = DEFAULT_BUFSIZE;
 134        if (unlikely(flow_ctrl > 1))
 135                flow_ctrl = FLOW_AUTO;
 136        else if (likely(flow_ctrl < 0))
 137                flow_ctrl = FLOW_OFF;
 138        if (unlikely((pause < 0) || (pause > 0xffff)))
 139                pause = PAUSE_TIME;
 140        if (eee_timer < 0)
 141                eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 142}
 143
 144/**
 145 * stmmac_disable_all_queues - Disable all queues
 146 * @priv: driver private structure
 147 */
 148static void stmmac_disable_all_queues(struct stmmac_priv *priv)
 149{
 150        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 151        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 152        u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
 153        u32 queue;
 154
 155        for (queue = 0; queue < maxq; queue++) {
 156                struct stmmac_channel *ch = &priv->channel[queue];
 157
 158                napi_disable(&ch->napi);
 159        }
 160}
 161
 162/**
 163 * stmmac_enable_all_queues - Enable all queues
 164 * @priv: driver private structure
 165 */
 166static void stmmac_enable_all_queues(struct stmmac_priv *priv)
 167{
 168        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 169        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 170        u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
 171        u32 queue;
 172
 173        for (queue = 0; queue < maxq; queue++) {
 174                struct stmmac_channel *ch = &priv->channel[queue];
 175
 176                napi_enable(&ch->napi);
 177        }
 178}
 179
 180/**
 181 * stmmac_stop_all_queues - Stop all queues
 182 * @priv: driver private structure
 183 */
 184static void stmmac_stop_all_queues(struct stmmac_priv *priv)
 185{
 186        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 187        u32 queue;
 188
 189        for (queue = 0; queue < tx_queues_cnt; queue++)
 190                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
 191}
 192
 193/**
 194 * stmmac_start_all_queues - Start all queues
 195 * @priv: driver private structure
 196 */
 197static void stmmac_start_all_queues(struct stmmac_priv *priv)
 198{
 199        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 200        u32 queue;
 201
 202        for (queue = 0; queue < tx_queues_cnt; queue++)
 203                netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
 204}
 205
 206static void stmmac_service_event_schedule(struct stmmac_priv *priv)
 207{
 208        if (!test_bit(STMMAC_DOWN, &priv->state) &&
 209            !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
 210                queue_work(priv->wq, &priv->service_task);
 211}
 212
 213static void stmmac_global_err(struct stmmac_priv *priv)
 214{
 215        netif_carrier_off(priv->dev);
 216        set_bit(STMMAC_RESET_REQUESTED, &priv->state);
 217        stmmac_service_event_schedule(priv);
 218}
 219
 220/**
 221 * stmmac_clk_csr_set - dynamically set the MDC clock
 222 * @priv: driver private structure
 223 * Description: this is to dynamically set the MDC clock according to the csr
 224 * clock input.
 225 * Note:
 226 *      If a specific clk_csr value is passed from the platform
 227 *      this means that the CSR Clock Range selection cannot be
 228 *      changed at run-time and it is fixed (as reported in the driver
 229 *      documentation). Viceversa the driver will try to set the MDC
 230 *      clock dynamically according to the actual clock input.
 231 */
 232static void stmmac_clk_csr_set(struct stmmac_priv *priv)
 233{
 234        u32 clk_rate;
 235
 236        clk_rate = clk_get_rate(priv->plat->stmmac_clk);
 237
 238        /* Platform provided default clk_csr would be assumed valid
 239         * for all other cases except for the below mentioned ones.
 240         * For values higher than the IEEE 802.3 specified frequency
 241         * we can not estimate the proper divider as it is not known
 242         * the frequency of clk_csr_i. So we do not change the default
 243         * divider.
 244         */
 245        if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
 246                if (clk_rate < CSR_F_35M)
 247                        priv->clk_csr = STMMAC_CSR_20_35M;
 248                else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
 249                        priv->clk_csr = STMMAC_CSR_35_60M;
 250                else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
 251                        priv->clk_csr = STMMAC_CSR_60_100M;
 252                else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
 253                        priv->clk_csr = STMMAC_CSR_100_150M;
 254                else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
 255                        priv->clk_csr = STMMAC_CSR_150_250M;
 256                else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
 257                        priv->clk_csr = STMMAC_CSR_250_300M;
 258        }
 259
 260        if (priv->plat->has_sun8i) {
 261                if (clk_rate > 160000000)
 262                        priv->clk_csr = 0x03;
 263                else if (clk_rate > 80000000)
 264                        priv->clk_csr = 0x02;
 265                else if (clk_rate > 40000000)
 266                        priv->clk_csr = 0x01;
 267                else
 268                        priv->clk_csr = 0;
 269        }
 270
 271        if (priv->plat->has_xgmac) {
 272                if (clk_rate > 400000000)
 273                        priv->clk_csr = 0x5;
 274                else if (clk_rate > 350000000)
 275                        priv->clk_csr = 0x4;
 276                else if (clk_rate > 300000000)
 277                        priv->clk_csr = 0x3;
 278                else if (clk_rate > 250000000)
 279                        priv->clk_csr = 0x2;
 280                else if (clk_rate > 150000000)
 281                        priv->clk_csr = 0x1;
 282                else
 283                        priv->clk_csr = 0x0;
 284        }
 285}
 286
 287static void print_pkt(unsigned char *buf, int len)
 288{
 289        pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
 290        print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
 291}
 292
 293static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
 294{
 295        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
 296        u32 avail;
 297
 298        if (tx_q->dirty_tx > tx_q->cur_tx)
 299                avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
 300        else
 301                avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
 302
 303        return avail;
 304}
 305
 306/**
 307 * stmmac_rx_dirty - Get RX queue dirty
 308 * @priv: driver private structure
 309 * @queue: RX queue index
 310 */
 311static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
 312{
 313        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 314        u32 dirty;
 315
 316        if (rx_q->dirty_rx <= rx_q->cur_rx)
 317                dirty = rx_q->cur_rx - rx_q->dirty_rx;
 318        else
 319                dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
 320
 321        return dirty;
 322}
 323
 324/**
 325 * stmmac_hw_fix_mac_speed - callback for speed selection
 326 * @priv: driver private structure
 327 * Description: on some platforms (e.g. ST), some HW system configuration
 328 * registers have to be set according to the link speed negotiated.
 329 */
 330static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
 331{
 332        struct net_device *ndev = priv->dev;
 333        struct phy_device *phydev = ndev->phydev;
 334
 335        if (likely(priv->plat->fix_mac_speed))
 336                priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
 337}
 338
 339/**
 340 * stmmac_enable_eee_mode - check and enter in LPI mode
 341 * @priv: driver private structure
 342 * Description: this function is to verify and enter in LPI mode in case of
 343 * EEE.
 344 */
 345static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
 346{
 347        u32 tx_cnt = priv->plat->tx_queues_to_use;
 348        u32 queue;
 349
 350        /* check if all TX queues have the work finished */
 351        for (queue = 0; queue < tx_cnt; queue++) {
 352                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
 353
 354                if (tx_q->dirty_tx != tx_q->cur_tx)
 355                        return; /* still unfinished work */
 356        }
 357
 358        /* Check and enter in LPI mode */
 359        if (!priv->tx_path_in_lpi_mode)
 360                stmmac_set_eee_mode(priv, priv->hw,
 361                                priv->plat->en_tx_lpi_clockgating);
 362}
 363
 364/**
 365 * stmmac_disable_eee_mode - disable and exit from LPI mode
 366 * @priv: driver private structure
 367 * Description: this function is to exit and disable EEE in case of
 368 * LPI state is true. This is called by the xmit.
 369 */
 370void stmmac_disable_eee_mode(struct stmmac_priv *priv)
 371{
 372        stmmac_reset_eee_mode(priv, priv->hw);
 373        del_timer_sync(&priv->eee_ctrl_timer);
 374        priv->tx_path_in_lpi_mode = false;
 375}
 376
 377/**
 378 * stmmac_eee_ctrl_timer - EEE TX SW timer.
 379 * @arg : data hook
 380 * Description:
 381 *  if there is no data transfer and if we are not in LPI state,
 382 *  then MAC Transmitter can be moved to LPI state.
 383 */
 384static void stmmac_eee_ctrl_timer(struct timer_list *t)
 385{
 386        struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
 387
 388        stmmac_enable_eee_mode(priv);
 389        mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
 390}
 391
 392/**
 393 * stmmac_eee_init - init EEE
 394 * @priv: driver private structure
 395 * Description:
 396 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
 397 *  can also manage EEE, this function enable the LPI state and start related
 398 *  timer.
 399 */
 400bool stmmac_eee_init(struct stmmac_priv *priv)
 401{
 402        struct net_device *ndev = priv->dev;
 403        int interface = priv->plat->interface;
 404        bool ret = false;
 405
 406        if ((interface != PHY_INTERFACE_MODE_MII) &&
 407            (interface != PHY_INTERFACE_MODE_GMII) &&
 408            !phy_interface_mode_is_rgmii(interface))
 409                goto out;
 410
 411        /* Using PCS we cannot dial with the phy registers at this stage
 412         * so we do not support extra feature like EEE.
 413         */
 414        if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
 415            (priv->hw->pcs == STMMAC_PCS_TBI) ||
 416            (priv->hw->pcs == STMMAC_PCS_RTBI))
 417                goto out;
 418
 419        /* MAC core supports the EEE feature. */
 420        if (priv->dma_cap.eee) {
 421                int tx_lpi_timer = priv->tx_lpi_timer;
 422
 423                /* Check if the PHY supports EEE */
 424                if (phy_init_eee(ndev->phydev, 1)) {
 425                        /* To manage at run-time if the EEE cannot be supported
 426                         * anymore (for example because the lp caps have been
 427                         * changed).
 428                         * In that case the driver disable own timers.
 429                         */
 430                        mutex_lock(&priv->lock);
 431                        if (priv->eee_active) {
 432                                netdev_dbg(priv->dev, "disable EEE\n");
 433                                del_timer_sync(&priv->eee_ctrl_timer);
 434                                stmmac_set_eee_timer(priv, priv->hw, 0,
 435                                                tx_lpi_timer);
 436                        }
 437                        priv->eee_active = 0;
 438                        mutex_unlock(&priv->lock);
 439                        goto out;
 440                }
 441                /* Activate the EEE and start timers */
 442                mutex_lock(&priv->lock);
 443                if (!priv->eee_active) {
 444                        priv->eee_active = 1;
 445                        timer_setup(&priv->eee_ctrl_timer,
 446                                    stmmac_eee_ctrl_timer, 0);
 447                        mod_timer(&priv->eee_ctrl_timer,
 448                                  STMMAC_LPI_T(eee_timer));
 449
 450                        stmmac_set_eee_timer(priv, priv->hw,
 451                                        STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
 452                }
 453                /* Set HW EEE according to the speed */
 454                stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
 455
 456                ret = true;
 457                mutex_unlock(&priv->lock);
 458
 459                netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
 460        }
 461out:
 462        return ret;
 463}
 464
 465/* stmmac_get_tx_hwtstamp - get HW TX timestamps
 466 * @priv: driver private structure
 467 * @p : descriptor pointer
 468 * @skb : the socket buffer
 469 * Description :
 470 * This function will read timestamp from the descriptor & pass it to stack.
 471 * and also perform some sanity checks.
 472 */
 473static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
 474                                   struct dma_desc *p, struct sk_buff *skb)
 475{
 476        struct skb_shared_hwtstamps shhwtstamp;
 477        u64 ns;
 478
 479        if (!priv->hwts_tx_en)
 480                return;
 481
 482        /* exit if skb doesn't support hw tstamp */
 483        if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
 484                return;
 485
 486        /* check tx tstamp status */
 487        if (stmmac_get_tx_timestamp_status(priv, p)) {
 488                /* get the valid tstamp */
 489                stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
 490
 491                memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 492                shhwtstamp.hwtstamp = ns_to_ktime(ns);
 493
 494                netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
 495                /* pass tstamp to stack */
 496                skb_tstamp_tx(skb, &shhwtstamp);
 497        }
 498
 499        return;
 500}
 501
 502/* stmmac_get_rx_hwtstamp - get HW RX timestamps
 503 * @priv: driver private structure
 504 * @p : descriptor pointer
 505 * @np : next descriptor pointer
 506 * @skb : the socket buffer
 507 * Description :
 508 * This function will read received packet's timestamp from the descriptor
 509 * and pass it to stack. It also perform some sanity checks.
 510 */
 511static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
 512                                   struct dma_desc *np, struct sk_buff *skb)
 513{
 514        struct skb_shared_hwtstamps *shhwtstamp = NULL;
 515        struct dma_desc *desc = p;
 516        u64 ns;
 517
 518        if (!priv->hwts_rx_en)
 519                return;
 520        /* For GMAC4, the valid timestamp is from CTX next desc. */
 521        if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
 522                desc = np;
 523
 524        /* Check if timestamp is available */
 525        if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
 526                stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
 527                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
 528                shhwtstamp = skb_hwtstamps(skb);
 529                memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 530                shhwtstamp->hwtstamp = ns_to_ktime(ns);
 531        } else  {
 532                netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
 533        }
 534}
 535
 536/**
 537 *  stmmac_hwtstamp_ioctl - control hardware timestamping.
 538 *  @dev: device pointer.
 539 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 540 *  a proprietary structure used to pass information to the driver.
 541 *  Description:
 542 *  This function configures the MAC to enable/disable both outgoing(TX)
 543 *  and incoming(RX) packets time stamping based on user input.
 544 *  Return Value:
 545 *  0 on success and an appropriate -ve integer on failure.
 546 */
 547static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 548{
 549        struct stmmac_priv *priv = netdev_priv(dev);
 550        struct hwtstamp_config config;
 551        struct timespec64 now;
 552        u64 temp = 0;
 553        u32 ptp_v2 = 0;
 554        u32 tstamp_all = 0;
 555        u32 ptp_over_ipv4_udp = 0;
 556        u32 ptp_over_ipv6_udp = 0;
 557        u32 ptp_over_ethernet = 0;
 558        u32 snap_type_sel = 0;
 559        u32 ts_master_en = 0;
 560        u32 ts_event_en = 0;
 561        u32 value = 0;
 562        u32 sec_inc;
 563        bool xmac;
 564
 565        xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
 566
 567        if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
 568                netdev_alert(priv->dev, "No support for HW time stamping\n");
 569                priv->hwts_tx_en = 0;
 570                priv->hwts_rx_en = 0;
 571
 572                return -EOPNOTSUPP;
 573        }
 574
 575        if (copy_from_user(&config, ifr->ifr_data,
 576                           sizeof(struct hwtstamp_config)))
 577                return -EFAULT;
 578
 579        netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
 580                   __func__, config.flags, config.tx_type, config.rx_filter);
 581
 582        /* reserved for future extensions */
 583        if (config.flags)
 584                return -EINVAL;
 585
 586        if (config.tx_type != HWTSTAMP_TX_OFF &&
 587            config.tx_type != HWTSTAMP_TX_ON)
 588                return -ERANGE;
 589
 590        if (priv->adv_ts) {
 591                switch (config.rx_filter) {
 592                case HWTSTAMP_FILTER_NONE:
 593                        /* time stamp no incoming packet at all */
 594                        config.rx_filter = HWTSTAMP_FILTER_NONE;
 595                        break;
 596
 597                case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 598                        /* PTP v1, UDP, any kind of event packet */
 599                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 600                        /* take time stamp for all event messages */
 601                        if (xmac)
 602                                snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
 603                        else
 604                                snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 605
 606                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 607                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 608                        break;
 609
 610                case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 611                        /* PTP v1, UDP, Sync packet */
 612                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
 613                        /* take time stamp for SYNC messages only */
 614                        ts_event_en = PTP_TCR_TSEVNTENA;
 615
 616                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 617                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 618                        break;
 619
 620                case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 621                        /* PTP v1, UDP, Delay_req packet */
 622                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
 623                        /* take time stamp for Delay_Req messages only */
 624                        ts_master_en = PTP_TCR_TSMSTRENA;
 625                        ts_event_en = PTP_TCR_TSEVNTENA;
 626
 627                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 628                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 629                        break;
 630
 631                case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 632                        /* PTP v2, UDP, any kind of event packet */
 633                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
 634                        ptp_v2 = PTP_TCR_TSVER2ENA;
 635                        /* take time stamp for all event messages */
 636                        if (xmac)
 637                                snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
 638                        else
 639                                snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 640
 641                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 642                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 643                        break;
 644
 645                case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 646                        /* PTP v2, UDP, Sync packet */
 647                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
 648                        ptp_v2 = PTP_TCR_TSVER2ENA;
 649                        /* take time stamp for SYNC messages only */
 650                        ts_event_en = PTP_TCR_TSEVNTENA;
 651
 652                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 653                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 654                        break;
 655
 656                case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 657                        /* PTP v2, UDP, Delay_req packet */
 658                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
 659                        ptp_v2 = PTP_TCR_TSVER2ENA;
 660                        /* take time stamp for Delay_Req messages only */
 661                        ts_master_en = PTP_TCR_TSMSTRENA;
 662                        ts_event_en = PTP_TCR_TSEVNTENA;
 663
 664                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 665                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 666                        break;
 667
 668                case HWTSTAMP_FILTER_PTP_V2_EVENT:
 669                        /* PTP v2/802.AS1 any layer, any kind of event packet */
 670                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 671                        ptp_v2 = PTP_TCR_TSVER2ENA;
 672                        /* take time stamp for all event messages */
 673                        if (xmac)
 674                                snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
 675                        else
 676                                snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 677
 678                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 679                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 680                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 681                        break;
 682
 683                case HWTSTAMP_FILTER_PTP_V2_SYNC:
 684                        /* PTP v2/802.AS1, any layer, Sync packet */
 685                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
 686                        ptp_v2 = PTP_TCR_TSVER2ENA;
 687                        /* take time stamp for SYNC messages only */
 688                        ts_event_en = PTP_TCR_TSEVNTENA;
 689
 690                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 691                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 692                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 693                        break;
 694
 695                case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 696                        /* PTP v2/802.AS1, any layer, Delay_req packet */
 697                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
 698                        ptp_v2 = PTP_TCR_TSVER2ENA;
 699                        /* take time stamp for Delay_Req messages only */
 700                        ts_master_en = PTP_TCR_TSMSTRENA;
 701                        ts_event_en = PTP_TCR_TSEVNTENA;
 702
 703                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 704                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 705                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 706                        break;
 707
 708                case HWTSTAMP_FILTER_NTP_ALL:
 709                case HWTSTAMP_FILTER_ALL:
 710                        /* time stamp any incoming packet */
 711                        config.rx_filter = HWTSTAMP_FILTER_ALL;
 712                        tstamp_all = PTP_TCR_TSENALL;
 713                        break;
 714
 715                default:
 716                        return -ERANGE;
 717                }
 718        } else {
 719                switch (config.rx_filter) {
 720                case HWTSTAMP_FILTER_NONE:
 721                        config.rx_filter = HWTSTAMP_FILTER_NONE;
 722                        break;
 723                default:
 724                        /* PTP v1, UDP, any kind of event packet */
 725                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 726                        break;
 727                }
 728        }
 729        priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
 730        priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
 731
 732        if (!priv->hwts_tx_en && !priv->hwts_rx_en)
 733                stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
 734        else {
 735                value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
 736                         tstamp_all | ptp_v2 | ptp_over_ethernet |
 737                         ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
 738                         ts_master_en | snap_type_sel);
 739                stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
 740
 741                /* program Sub Second Increment reg */
 742                stmmac_config_sub_second_increment(priv,
 743                                priv->ptpaddr, priv->plat->clk_ptp_rate,
 744                                xmac, &sec_inc);
 745                temp = div_u64(1000000000ULL, sec_inc);
 746
 747                /* Store sub second increment and flags for later use */
 748                priv->sub_second_inc = sec_inc;
 749                priv->systime_flags = value;
 750
 751                /* calculate default added value:
 752                 * formula is :
 753                 * addend = (2^32)/freq_div_ratio;
 754                 * where, freq_div_ratio = 1e9ns/sec_inc
 755                 */
 756                temp = (u64)(temp << 32);
 757                priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
 758                stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
 759
 760                /* initialize system time */
 761                ktime_get_real_ts64(&now);
 762
 763                /* lower 32 bits of tv_sec are safe until y2106 */
 764                stmmac_init_systime(priv, priv->ptpaddr,
 765                                (u32)now.tv_sec, now.tv_nsec);
 766        }
 767
 768        return copy_to_user(ifr->ifr_data, &config,
 769                            sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
 770}
 771
 772/**
 773 * stmmac_init_ptp - init PTP
 774 * @priv: driver private structure
 775 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
 776 * This is done by looking at the HW cap. register.
 777 * This function also registers the ptp driver.
 778 */
 779static int stmmac_init_ptp(struct stmmac_priv *priv)
 780{
 781        bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
 782
 783        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
 784                return -EOPNOTSUPP;
 785
 786        priv->adv_ts = 0;
 787        /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
 788        if (xmac && priv->dma_cap.atime_stamp)
 789                priv->adv_ts = 1;
 790        /* Dwmac 3.x core with extend_desc can support adv_ts */
 791        else if (priv->extend_desc && priv->dma_cap.atime_stamp)
 792                priv->adv_ts = 1;
 793
 794        if (priv->dma_cap.time_stamp)
 795                netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
 796
 797        if (priv->adv_ts)
 798                netdev_info(priv->dev,
 799                            "IEEE 1588-2008 Advanced Timestamp supported\n");
 800
 801        priv->hwts_tx_en = 0;
 802        priv->hwts_rx_en = 0;
 803
 804        stmmac_ptp_register(priv);
 805
 806        return 0;
 807}
 808
 809static void stmmac_release_ptp(struct stmmac_priv *priv)
 810{
 811        if (priv->plat->clk_ptp_ref)
 812                clk_disable_unprepare(priv->plat->clk_ptp_ref);
 813        stmmac_ptp_unregister(priv);
 814}
 815
 816/**
 817 *  stmmac_mac_flow_ctrl - Configure flow control in all queues
 818 *  @priv: driver private structure
 819 *  Description: It is used for configuring the flow control in all queues
 820 */
 821static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
 822{
 823        u32 tx_cnt = priv->plat->tx_queues_to_use;
 824
 825        stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
 826                        priv->pause, tx_cnt);
 827}
 828
 829/**
 830 * stmmac_adjust_link - adjusts the link parameters
 831 * @dev: net device structure
 832 * Description: this is the helper called by the physical abstraction layer
 833 * drivers to communicate the phy link status. According the speed and duplex
 834 * this driver can invoke registered glue-logic as well.
 835 * It also invoke the eee initialization because it could happen when switch
 836 * on different networks (that are eee capable).
 837 */
 838static void stmmac_adjust_link(struct net_device *dev)
 839{
 840        struct stmmac_priv *priv = netdev_priv(dev);
 841        struct phy_device *phydev = dev->phydev;
 842        bool new_state = false;
 843
 844        if (!phydev)
 845                return;
 846
 847        mutex_lock(&priv->lock);
 848
 849        if (phydev->link) {
 850                u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
 851
 852                /* Now we make sure that we can be in full duplex mode.
 853                 * If not, we operate in half-duplex mode. */
 854                if (phydev->duplex != priv->oldduplex) {
 855                        new_state = true;
 856                        if (!phydev->duplex)
 857                                ctrl &= ~priv->hw->link.duplex;
 858                        else
 859                                ctrl |= priv->hw->link.duplex;
 860                        priv->oldduplex = phydev->duplex;
 861                }
 862                /* Flow Control operation */
 863                if (phydev->pause)
 864                        stmmac_mac_flow_ctrl(priv, phydev->duplex);
 865
 866                if (phydev->speed != priv->speed) {
 867                        new_state = true;
 868                        ctrl &= ~priv->hw->link.speed_mask;
 869                        switch (phydev->speed) {
 870                        case SPEED_1000:
 871                                ctrl |= priv->hw->link.speed1000;
 872                                break;
 873                        case SPEED_100:
 874                                ctrl |= priv->hw->link.speed100;
 875                                break;
 876                        case SPEED_10:
 877                                ctrl |= priv->hw->link.speed10;
 878                                break;
 879                        default:
 880                                netif_warn(priv, link, priv->dev,
 881                                           "broken speed: %d\n", phydev->speed);
 882                                phydev->speed = SPEED_UNKNOWN;
 883                                break;
 884                        }
 885                        if (phydev->speed != SPEED_UNKNOWN)
 886                                stmmac_hw_fix_mac_speed(priv);
 887                        priv->speed = phydev->speed;
 888                }
 889
 890                writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
 891
 892                if (!priv->oldlink) {
 893                        new_state = true;
 894                        priv->oldlink = true;
 895                }
 896        } else if (priv->oldlink) {
 897                new_state = true;
 898                priv->oldlink = false;
 899                priv->speed = SPEED_UNKNOWN;
 900                priv->oldduplex = DUPLEX_UNKNOWN;
 901        }
 902
 903        if (new_state && netif_msg_link(priv))
 904                phy_print_status(phydev);
 905
 906        mutex_unlock(&priv->lock);
 907
 908        if (phydev->is_pseudo_fixed_link)
 909                /* Stop PHY layer to call the hook to adjust the link in case
 910                 * of a switch is attached to the stmmac driver.
 911                 */
 912                phydev->irq = PHY_IGNORE_INTERRUPT;
 913        else
 914                /* At this stage, init the EEE if supported.
 915                 * Never called in case of fixed_link.
 916                 */
 917                priv->eee_enabled = stmmac_eee_init(priv);
 918}
 919
 920/**
 921 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
 922 * @priv: driver private structure
 923 * Description: this is to verify if the HW supports the PCS.
 924 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
 925 * configured for the TBI, RTBI, or SGMII PHY interface.
 926 */
 927static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
 928{
 929        int interface = priv->plat->interface;
 930
 931        if (priv->dma_cap.pcs) {
 932                if ((interface == PHY_INTERFACE_MODE_RGMII) ||
 933                    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
 934                    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
 935                    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
 936                        netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
 937                        priv->hw->pcs = STMMAC_PCS_RGMII;
 938                } else if (interface == PHY_INTERFACE_MODE_SGMII) {
 939                        netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
 940                        priv->hw->pcs = STMMAC_PCS_SGMII;
 941                }
 942        }
 943}
 944
 945/**
 946 * stmmac_init_phy - PHY initialization
 947 * @dev: net device structure
 948 * Description: it initializes the driver's PHY state, and attaches the PHY
 949 * to the mac driver.
 950 *  Return value:
 951 *  0 on success
 952 */
 953static int stmmac_init_phy(struct net_device *dev)
 954{
 955        struct stmmac_priv *priv = netdev_priv(dev);
 956        u32 tx_cnt = priv->plat->tx_queues_to_use;
 957        struct phy_device *phydev;
 958        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
 959        char bus_id[MII_BUS_ID_SIZE];
 960        int interface = priv->plat->interface;
 961        int max_speed = priv->plat->max_speed;
 962        priv->oldlink = false;
 963        priv->speed = SPEED_UNKNOWN;
 964        priv->oldduplex = DUPLEX_UNKNOWN;
 965
 966        if (priv->plat->phy_node) {
 967                phydev = of_phy_connect(dev, priv->plat->phy_node,
 968                                        &stmmac_adjust_link, 0, interface);
 969        } else {
 970                snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
 971                         priv->plat->bus_id);
 972
 973                snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
 974                         priv->plat->phy_addr);
 975                netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
 976                           phy_id_fmt);
 977
 978                phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
 979                                     interface);
 980        }
 981
 982        if (IS_ERR_OR_NULL(phydev)) {
 983                netdev_err(priv->dev, "Could not attach to PHY\n");
 984                if (!phydev)
 985                        return -ENODEV;
 986
 987                return PTR_ERR(phydev);
 988        }
 989
 990        /* Stop Advertising 1000BASE Capability if interface is not GMII */
 991        if ((interface == PHY_INTERFACE_MODE_MII) ||
 992            (interface == PHY_INTERFACE_MODE_RMII) ||
 993                (max_speed < 1000 && max_speed > 0))
 994                phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
 995                                         SUPPORTED_1000baseT_Full);
 996
 997        /*
 998         * Half-duplex mode not supported with multiqueue
 999         * half-duplex can only works with single queue
1000         */
1001        if (tx_cnt > 1)
1002                phydev->supported &= ~(SUPPORTED_1000baseT_Half |
1003                                       SUPPORTED_100baseT_Half |
1004                                       SUPPORTED_10baseT_Half);
1005
1006        /*
1007         * Broken HW is sometimes missing the pull-up resistor on the
1008         * MDIO line, which results in reads to non-existent devices returning
1009         * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1010         * device as well.
1011         * Note: phydev->phy_id is the result of reading the UID PHY registers.
1012         */
1013        if (!priv->plat->phy_node && phydev->phy_id == 0) {
1014                phy_disconnect(phydev);
1015                return -ENODEV;
1016        }
1017
1018        /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1019         * subsequent PHY polling, make sure we force a link transition if
1020         * we have a UP/DOWN/UP transition
1021         */
1022        if (phydev->is_pseudo_fixed_link)
1023                phydev->irq = PHY_POLL;
1024
1025        phy_attached_info(phydev);
1026        return 0;
1027}
1028
1029static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1030{
1031        u32 rx_cnt = priv->plat->rx_queues_to_use;
1032        void *head_rx;
1033        u32 queue;
1034
1035        /* Display RX rings */
1036        for (queue = 0; queue < rx_cnt; queue++) {
1037                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1038
1039                pr_info("\tRX Queue %u rings\n", queue);
1040
1041                if (priv->extend_desc)
1042                        head_rx = (void *)rx_q->dma_erx;
1043                else
1044                        head_rx = (void *)rx_q->dma_rx;
1045
1046                /* Display RX ring */
1047                stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
1048        }
1049}
1050
1051static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1052{
1053        u32 tx_cnt = priv->plat->tx_queues_to_use;
1054        void *head_tx;
1055        u32 queue;
1056
1057        /* Display TX rings */
1058        for (queue = 0; queue < tx_cnt; queue++) {
1059                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1060
1061                pr_info("\tTX Queue %d rings\n", queue);
1062
1063                if (priv->extend_desc)
1064                        head_tx = (void *)tx_q->dma_etx;
1065                else
1066                        head_tx = (void *)tx_q->dma_tx;
1067
1068                stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
1069        }
1070}
1071
1072static void stmmac_display_rings(struct stmmac_priv *priv)
1073{
1074        /* Display RX ring */
1075        stmmac_display_rx_rings(priv);
1076
1077        /* Display TX ring */
1078        stmmac_display_tx_rings(priv);
1079}
1080
1081static int stmmac_set_bfsize(int mtu, int bufsize)
1082{
1083        int ret = bufsize;
1084
1085        if (mtu >= BUF_SIZE_4KiB)
1086                ret = BUF_SIZE_8KiB;
1087        else if (mtu >= BUF_SIZE_2KiB)
1088                ret = BUF_SIZE_4KiB;
1089        else if (mtu > DEFAULT_BUFSIZE)
1090                ret = BUF_SIZE_2KiB;
1091        else
1092                ret = DEFAULT_BUFSIZE;
1093
1094        return ret;
1095}
1096
1097/**
1098 * stmmac_clear_rx_descriptors - clear RX descriptors
1099 * @priv: driver private structure
1100 * @queue: RX queue index
1101 * Description: this function is called to clear the RX descriptors
1102 * in case of both basic and extended descriptors are used.
1103 */
1104static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1105{
1106        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1107        int i;
1108
1109        /* Clear the RX descriptors */
1110        for (i = 0; i < DMA_RX_SIZE; i++)
1111                if (priv->extend_desc)
1112                        stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1113                                        priv->use_riwt, priv->mode,
1114                                        (i == DMA_RX_SIZE - 1));
1115                else
1116                        stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1117                                        priv->use_riwt, priv->mode,
1118                                        (i == DMA_RX_SIZE - 1));
1119}
1120
1121/**
1122 * stmmac_clear_tx_descriptors - clear tx descriptors
1123 * @priv: driver private structure
1124 * @queue: TX queue index.
1125 * Description: this function is called to clear the TX descriptors
1126 * in case of both basic and extended descriptors are used.
1127 */
1128static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1129{
1130        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1131        int i;
1132
1133        /* Clear the TX descriptors */
1134        for (i = 0; i < DMA_TX_SIZE; i++)
1135                if (priv->extend_desc)
1136                        stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1137                                        priv->mode, (i == DMA_TX_SIZE - 1));
1138                else
1139                        stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1140                                        priv->mode, (i == DMA_TX_SIZE - 1));
1141}
1142
1143/**
1144 * stmmac_clear_descriptors - clear descriptors
1145 * @priv: driver private structure
1146 * Description: this function is called to clear the TX and RX descriptors
1147 * in case of both basic and extended descriptors are used.
1148 */
1149static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1150{
1151        u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1152        u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1153        u32 queue;
1154
1155        /* Clear the RX descriptors */
1156        for (queue = 0; queue < rx_queue_cnt; queue++)
1157                stmmac_clear_rx_descriptors(priv, queue);
1158
1159        /* Clear the TX descriptors */
1160        for (queue = 0; queue < tx_queue_cnt; queue++)
1161                stmmac_clear_tx_descriptors(priv, queue);
1162}
1163
1164/**
1165 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1166 * @priv: driver private structure
1167 * @p: descriptor pointer
1168 * @i: descriptor index
1169 * @flags: gfp flag
1170 * @queue: RX queue index
1171 * Description: this function is called to allocate a receive buffer, perform
1172 * the DMA mapping and init the descriptor.
1173 */
1174static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1175                                  int i, gfp_t flags, u32 queue)
1176{
1177        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1178        struct sk_buff *skb;
1179
1180        skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1181        if (!skb) {
1182                netdev_err(priv->dev,
1183                           "%s: Rx init fails; skb is NULL\n", __func__);
1184                return -ENOMEM;
1185        }
1186        rx_q->rx_skbuff[i] = skb;
1187        rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1188                                                priv->dma_buf_sz,
1189                                                DMA_FROM_DEVICE);
1190        if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1191                netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1192                dev_kfree_skb_any(skb);
1193                return -EINVAL;
1194        }
1195
1196        stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
1197
1198        if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1199                stmmac_init_desc3(priv, p);
1200
1201        return 0;
1202}
1203
1204/**
1205 * stmmac_free_rx_buffer - free RX dma buffers
1206 * @priv: private structure
1207 * @queue: RX queue index
1208 * @i: buffer index.
1209 */
1210static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1211{
1212        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1213
1214        if (rx_q->rx_skbuff[i]) {
1215                dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1216                                 priv->dma_buf_sz, DMA_FROM_DEVICE);
1217                dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1218        }
1219        rx_q->rx_skbuff[i] = NULL;
1220}
1221
1222/**
1223 * stmmac_free_tx_buffer - free RX dma buffers
1224 * @priv: private structure
1225 * @queue: RX queue index
1226 * @i: buffer index.
1227 */
1228static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1229{
1230        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1231
1232        if (tx_q->tx_skbuff_dma[i].buf) {
1233                if (tx_q->tx_skbuff_dma[i].map_as_page)
1234                        dma_unmap_page(priv->device,
1235                                       tx_q->tx_skbuff_dma[i].buf,
1236                                       tx_q->tx_skbuff_dma[i].len,
1237                                       DMA_TO_DEVICE);
1238                else
1239                        dma_unmap_single(priv->device,
1240                                         tx_q->tx_skbuff_dma[i].buf,
1241                                         tx_q->tx_skbuff_dma[i].len,
1242                                         DMA_TO_DEVICE);
1243        }
1244
1245        if (tx_q->tx_skbuff[i]) {
1246                dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1247                tx_q->tx_skbuff[i] = NULL;
1248                tx_q->tx_skbuff_dma[i].buf = 0;
1249                tx_q->tx_skbuff_dma[i].map_as_page = false;
1250        }
1251}
1252
1253/**
1254 * init_dma_rx_desc_rings - init the RX descriptor rings
1255 * @dev: net device structure
1256 * @flags: gfp flag.
1257 * Description: this function initializes the DMA RX descriptors
1258 * and allocates the socket buffers. It supports the chained and ring
1259 * modes.
1260 */
1261static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1262{
1263        struct stmmac_priv *priv = netdev_priv(dev);
1264        u32 rx_count = priv->plat->rx_queues_to_use;
1265        int ret = -ENOMEM;
1266        int bfsize = 0;
1267        int queue;
1268        int i;
1269
1270        bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1271        if (bfsize < 0)
1272                bfsize = 0;
1273
1274        if (bfsize < BUF_SIZE_16KiB)
1275                bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1276
1277        priv->dma_buf_sz = bfsize;
1278
1279        /* RX INITIALIZATION */
1280        netif_dbg(priv, probe, priv->dev,
1281                  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1282
1283        for (queue = 0; queue < rx_count; queue++) {
1284                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1285
1286                netif_dbg(priv, probe, priv->dev,
1287                          "(%s) dma_rx_phy=0x%08x\n", __func__,
1288                          (u32)rx_q->dma_rx_phy);
1289
1290                for (i = 0; i < DMA_RX_SIZE; i++) {
1291                        struct dma_desc *p;
1292
1293                        if (priv->extend_desc)
1294                                p = &((rx_q->dma_erx + i)->basic);
1295                        else
1296                                p = rx_q->dma_rx + i;
1297
1298                        ret = stmmac_init_rx_buffers(priv, p, i, flags,
1299                                                     queue);
1300                        if (ret)
1301                                goto err_init_rx_buffers;
1302
1303                        netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1304                                  rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1305                                  (unsigned int)rx_q->rx_skbuff_dma[i]);
1306                }
1307
1308                rx_q->cur_rx = 0;
1309                rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1310
1311                stmmac_clear_rx_descriptors(priv, queue);
1312
1313                /* Setup the chained descriptor addresses */
1314                if (priv->mode == STMMAC_CHAIN_MODE) {
1315                        if (priv->extend_desc)
1316                                stmmac_mode_init(priv, rx_q->dma_erx,
1317                                                rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
1318                        else
1319                                stmmac_mode_init(priv, rx_q->dma_rx,
1320                                                rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
1321                }
1322        }
1323
1324        buf_sz = bfsize;
1325
1326        return 0;
1327
1328err_init_rx_buffers:
1329        while (queue >= 0) {
1330                while (--i >= 0)
1331                        stmmac_free_rx_buffer(priv, queue, i);
1332
1333                if (queue == 0)
1334                        break;
1335
1336                i = DMA_RX_SIZE;
1337                queue--;
1338        }
1339
1340        return ret;
1341}
1342
1343/**
1344 * init_dma_tx_desc_rings - init the TX descriptor rings
1345 * @dev: net device structure.
1346 * Description: this function initializes the DMA TX descriptors
1347 * and allocates the socket buffers. It supports the chained and ring
1348 * modes.
1349 */
1350static int init_dma_tx_desc_rings(struct net_device *dev)
1351{
1352        struct stmmac_priv *priv = netdev_priv(dev);
1353        u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1354        u32 queue;
1355        int i;
1356
1357        for (queue = 0; queue < tx_queue_cnt; queue++) {
1358                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1359
1360                netif_dbg(priv, probe, priv->dev,
1361                          "(%s) dma_tx_phy=0x%08x\n", __func__,
1362                         (u32)tx_q->dma_tx_phy);
1363
1364                /* Setup the chained descriptor addresses */
1365                if (priv->mode == STMMAC_CHAIN_MODE) {
1366                        if (priv->extend_desc)
1367                                stmmac_mode_init(priv, tx_q->dma_etx,
1368                                                tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
1369                        else
1370                                stmmac_mode_init(priv, tx_q->dma_tx,
1371                                                tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
1372                }
1373
1374                for (i = 0; i < DMA_TX_SIZE; i++) {
1375                        struct dma_desc *p;
1376                        if (priv->extend_desc)
1377                                p = &((tx_q->dma_etx + i)->basic);
1378                        else
1379                                p = tx_q->dma_tx + i;
1380
1381                        stmmac_clear_desc(priv, p);
1382
1383                        tx_q->tx_skbuff_dma[i].buf = 0;
1384                        tx_q->tx_skbuff_dma[i].map_as_page = false;
1385                        tx_q->tx_skbuff_dma[i].len = 0;
1386                        tx_q->tx_skbuff_dma[i].last_segment = false;
1387                        tx_q->tx_skbuff[i] = NULL;
1388                }
1389
1390                tx_q->dirty_tx = 0;
1391                tx_q->cur_tx = 0;
1392                tx_q->mss = 0;
1393
1394                netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1395        }
1396
1397        return 0;
1398}
1399
1400/**
1401 * init_dma_desc_rings - init the RX/TX descriptor rings
1402 * @dev: net device structure
1403 * @flags: gfp flag.
1404 * Description: this function initializes the DMA RX/TX descriptors
1405 * and allocates the socket buffers. It supports the chained and ring
1406 * modes.
1407 */
1408static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1409{
1410        struct stmmac_priv *priv = netdev_priv(dev);
1411        int ret;
1412
1413        ret = init_dma_rx_desc_rings(dev, flags);
1414        if (ret)
1415                return ret;
1416
1417        ret = init_dma_tx_desc_rings(dev);
1418
1419        stmmac_clear_descriptors(priv);
1420
1421        if (netif_msg_hw(priv))
1422                stmmac_display_rings(priv);
1423
1424        return ret;
1425}
1426
1427/**
1428 * dma_free_rx_skbufs - free RX dma buffers
1429 * @priv: private structure
1430 * @queue: RX queue index
1431 */
1432static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1433{
1434        int i;
1435
1436        for (i = 0; i < DMA_RX_SIZE; i++)
1437                stmmac_free_rx_buffer(priv, queue, i);
1438}
1439
1440/**
1441 * dma_free_tx_skbufs - free TX dma buffers
1442 * @priv: private structure
1443 * @queue: TX queue index
1444 */
1445static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1446{
1447        int i;
1448
1449        for (i = 0; i < DMA_TX_SIZE; i++)
1450                stmmac_free_tx_buffer(priv, queue, i);
1451}
1452
1453/**
1454 * free_dma_rx_desc_resources - free RX dma desc resources
1455 * @priv: private structure
1456 */
1457static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1458{
1459        u32 rx_count = priv->plat->rx_queues_to_use;
1460        u32 queue;
1461
1462        /* Free RX queue resources */
1463        for (queue = 0; queue < rx_count; queue++) {
1464                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1465
1466                /* Release the DMA RX socket buffers */
1467                dma_free_rx_skbufs(priv, queue);
1468
1469                /* Free DMA regions of consistent memory previously allocated */
1470                if (!priv->extend_desc)
1471                        dma_free_coherent(priv->device,
1472                                          DMA_RX_SIZE * sizeof(struct dma_desc),
1473                                          rx_q->dma_rx, rx_q->dma_rx_phy);
1474                else
1475                        dma_free_coherent(priv->device, DMA_RX_SIZE *
1476                                          sizeof(struct dma_extended_desc),
1477                                          rx_q->dma_erx, rx_q->dma_rx_phy);
1478
1479                kfree(rx_q->rx_skbuff_dma);
1480                kfree(rx_q->rx_skbuff);
1481        }
1482}
1483
1484/**
1485 * free_dma_tx_desc_resources - free TX dma desc resources
1486 * @priv: private structure
1487 */
1488static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1489{
1490        u32 tx_count = priv->plat->tx_queues_to_use;
1491        u32 queue;
1492
1493        /* Free TX queue resources */
1494        for (queue = 0; queue < tx_count; queue++) {
1495                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1496
1497                /* Release the DMA TX socket buffers */
1498                dma_free_tx_skbufs(priv, queue);
1499
1500                /* Free DMA regions of consistent memory previously allocated */
1501                if (!priv->extend_desc)
1502                        dma_free_coherent(priv->device,
1503                                          DMA_TX_SIZE * sizeof(struct dma_desc),
1504                                          tx_q->dma_tx, tx_q->dma_tx_phy);
1505                else
1506                        dma_free_coherent(priv->device, DMA_TX_SIZE *
1507                                          sizeof(struct dma_extended_desc),
1508                                          tx_q->dma_etx, tx_q->dma_tx_phy);
1509
1510                kfree(tx_q->tx_skbuff_dma);
1511                kfree(tx_q->tx_skbuff);
1512        }
1513}
1514
1515/**
1516 * alloc_dma_rx_desc_resources - alloc RX resources.
1517 * @priv: private structure
1518 * Description: according to which descriptor can be used (extend or basic)
1519 * this function allocates the resources for TX and RX paths. In case of
1520 * reception, for example, it pre-allocated the RX socket buffer in order to
1521 * allow zero-copy mechanism.
1522 */
1523static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1524{
1525        u32 rx_count = priv->plat->rx_queues_to_use;
1526        int ret = -ENOMEM;
1527        u32 queue;
1528
1529        /* RX queues buffers and DMA */
1530        for (queue = 0; queue < rx_count; queue++) {
1531                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1532
1533                rx_q->queue_index = queue;
1534                rx_q->priv_data = priv;
1535
1536                rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1537                                                    sizeof(dma_addr_t),
1538                                                    GFP_KERNEL);
1539                if (!rx_q->rx_skbuff_dma)
1540                        goto err_dma;
1541
1542                rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1543                                                sizeof(struct sk_buff *),
1544                                                GFP_KERNEL);
1545                if (!rx_q->rx_skbuff)
1546                        goto err_dma;
1547
1548                if (priv->extend_desc) {
1549                        rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1550                                                            DMA_RX_SIZE *
1551                                                            sizeof(struct
1552                                                            dma_extended_desc),
1553                                                            &rx_q->dma_rx_phy,
1554                                                            GFP_KERNEL);
1555                        if (!rx_q->dma_erx)
1556                                goto err_dma;
1557
1558                } else {
1559                        rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1560                                                           DMA_RX_SIZE *
1561                                                           sizeof(struct
1562                                                           dma_desc),
1563                                                           &rx_q->dma_rx_phy,
1564                                                           GFP_KERNEL);
1565                        if (!rx_q->dma_rx)
1566                                goto err_dma;
1567                }
1568        }
1569
1570        return 0;
1571
1572err_dma:
1573        free_dma_rx_desc_resources(priv);
1574
1575        return ret;
1576}
1577
1578/**
1579 * alloc_dma_tx_desc_resources - alloc TX resources.
1580 * @priv: private structure
1581 * Description: according to which descriptor can be used (extend or basic)
1582 * this function allocates the resources for TX and RX paths. In case of
1583 * reception, for example, it pre-allocated the RX socket buffer in order to
1584 * allow zero-copy mechanism.
1585 */
1586static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1587{
1588        u32 tx_count = priv->plat->tx_queues_to_use;
1589        int ret = -ENOMEM;
1590        u32 queue;
1591
1592        /* TX queues buffers and DMA */
1593        for (queue = 0; queue < tx_count; queue++) {
1594                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1595
1596                tx_q->queue_index = queue;
1597                tx_q->priv_data = priv;
1598
1599                tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1600                                                    sizeof(*tx_q->tx_skbuff_dma),
1601                                                    GFP_KERNEL);
1602                if (!tx_q->tx_skbuff_dma)
1603                        goto err_dma;
1604
1605                tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1606                                                sizeof(struct sk_buff *),
1607                                                GFP_KERNEL);
1608                if (!tx_q->tx_skbuff)
1609                        goto err_dma;
1610
1611                if (priv->extend_desc) {
1612                        tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1613                                                            DMA_TX_SIZE *
1614                                                            sizeof(struct
1615                                                            dma_extended_desc),
1616                                                            &tx_q->dma_tx_phy,
1617                                                            GFP_KERNEL);
1618                        if (!tx_q->dma_etx)
1619                                goto err_dma;
1620                } else {
1621                        tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1622                                                           DMA_TX_SIZE *
1623                                                           sizeof(struct
1624                                                                  dma_desc),
1625                                                           &tx_q->dma_tx_phy,
1626                                                           GFP_KERNEL);
1627                        if (!tx_q->dma_tx)
1628                                goto err_dma;
1629                }
1630        }
1631
1632        return 0;
1633
1634err_dma:
1635        free_dma_tx_desc_resources(priv);
1636
1637        return ret;
1638}
1639
1640/**
1641 * alloc_dma_desc_resources - alloc TX/RX resources.
1642 * @priv: private structure
1643 * Description: according to which descriptor can be used (extend or basic)
1644 * this function allocates the resources for TX and RX paths. In case of
1645 * reception, for example, it pre-allocated the RX socket buffer in order to
1646 * allow zero-copy mechanism.
1647 */
1648static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1649{
1650        /* RX Allocation */
1651        int ret = alloc_dma_rx_desc_resources(priv);
1652
1653        if (ret)
1654                return ret;
1655
1656        ret = alloc_dma_tx_desc_resources(priv);
1657
1658        return ret;
1659}
1660
1661/**
1662 * free_dma_desc_resources - free dma desc resources
1663 * @priv: private structure
1664 */
1665static void free_dma_desc_resources(struct stmmac_priv *priv)
1666{
1667        /* Release the DMA RX socket buffers */
1668        free_dma_rx_desc_resources(priv);
1669
1670        /* Release the DMA TX socket buffers */
1671        free_dma_tx_desc_resources(priv);
1672}
1673
1674/**
1675 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1676 *  @priv: driver private structure
1677 *  Description: It is used for enabling the rx queues in the MAC
1678 */
1679static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1680{
1681        u32 rx_queues_count = priv->plat->rx_queues_to_use;
1682        int queue;
1683        u8 mode;
1684
1685        for (queue = 0; queue < rx_queues_count; queue++) {
1686                mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1687                stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1688        }
1689}
1690
1691/**
1692 * stmmac_start_rx_dma - start RX DMA channel
1693 * @priv: driver private structure
1694 * @chan: RX channel index
1695 * Description:
1696 * This starts a RX DMA channel
1697 */
1698static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1699{
1700        netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1701        stmmac_start_rx(priv, priv->ioaddr, chan);
1702}
1703
1704/**
1705 * stmmac_start_tx_dma - start TX DMA channel
1706 * @priv: driver private structure
1707 * @chan: TX channel index
1708 * Description:
1709 * This starts a TX DMA channel
1710 */
1711static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1712{
1713        netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1714        stmmac_start_tx(priv, priv->ioaddr, chan);
1715}
1716
1717/**
1718 * stmmac_stop_rx_dma - stop RX DMA channel
1719 * @priv: driver private structure
1720 * @chan: RX channel index
1721 * Description:
1722 * This stops a RX DMA channel
1723 */
1724static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1725{
1726        netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1727        stmmac_stop_rx(priv, priv->ioaddr, chan);
1728}
1729
1730/**
1731 * stmmac_stop_tx_dma - stop TX DMA channel
1732 * @priv: driver private structure
1733 * @chan: TX channel index
1734 * Description:
1735 * This stops a TX DMA channel
1736 */
1737static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1738{
1739        netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1740        stmmac_stop_tx(priv, priv->ioaddr, chan);
1741}
1742
1743/**
1744 * stmmac_start_all_dma - start all RX and TX DMA channels
1745 * @priv: driver private structure
1746 * Description:
1747 * This starts all the RX and TX DMA channels
1748 */
1749static void stmmac_start_all_dma(struct stmmac_priv *priv)
1750{
1751        u32 rx_channels_count = priv->plat->rx_queues_to_use;
1752        u32 tx_channels_count = priv->plat->tx_queues_to_use;
1753        u32 chan = 0;
1754
1755        for (chan = 0; chan < rx_channels_count; chan++)
1756                stmmac_start_rx_dma(priv, chan);
1757
1758        for (chan = 0; chan < tx_channels_count; chan++)
1759                stmmac_start_tx_dma(priv, chan);
1760}
1761
1762/**
1763 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1764 * @priv: driver private structure
1765 * Description:
1766 * This stops the RX and TX DMA channels
1767 */
1768static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1769{
1770        u32 rx_channels_count = priv->plat->rx_queues_to_use;
1771        u32 tx_channels_count = priv->plat->tx_queues_to_use;
1772        u32 chan = 0;
1773
1774        for (chan = 0; chan < rx_channels_count; chan++)
1775                stmmac_stop_rx_dma(priv, chan);
1776
1777        for (chan = 0; chan < tx_channels_count; chan++)
1778                stmmac_stop_tx_dma(priv, chan);
1779}
1780
1781/**
1782 *  stmmac_dma_operation_mode - HW DMA operation mode
1783 *  @priv: driver private structure
1784 *  Description: it is used for configuring the DMA operation mode register in
1785 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1786 */
1787static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1788{
1789        u32 rx_channels_count = priv->plat->rx_queues_to_use;
1790        u32 tx_channels_count = priv->plat->tx_queues_to_use;
1791        int rxfifosz = priv->plat->rx_fifo_size;
1792        int txfifosz = priv->plat->tx_fifo_size;
1793        u32 txmode = 0;
1794        u32 rxmode = 0;
1795        u32 chan = 0;
1796        u8 qmode = 0;
1797
1798        if (rxfifosz == 0)
1799                rxfifosz = priv->dma_cap.rx_fifo_size;
1800        if (txfifosz == 0)
1801                txfifosz = priv->dma_cap.tx_fifo_size;
1802
1803        /* Adjust for real per queue fifo size */
1804        rxfifosz /= rx_channels_count;
1805        txfifosz /= tx_channels_count;
1806
1807        if (priv->plat->force_thresh_dma_mode) {
1808                txmode = tc;
1809                rxmode = tc;
1810        } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1811                /*
1812                 * In case of GMAC, SF mode can be enabled
1813                 * to perform the TX COE in HW. This depends on:
1814                 * 1) TX COE if actually supported
1815                 * 2) There is no bugged Jumbo frame support
1816                 *    that needs to not insert csum in the TDES.
1817                 */
1818                txmode = SF_DMA_MODE;
1819                rxmode = SF_DMA_MODE;
1820                priv->xstats.threshold = SF_DMA_MODE;
1821        } else {
1822                txmode = tc;
1823                rxmode = SF_DMA_MODE;
1824        }
1825
1826        /* configure all channels */
1827        for (chan = 0; chan < rx_channels_count; chan++) {
1828                qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1829
1830                stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1831                                rxfifosz, qmode);
1832                stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1833                                chan);
1834        }
1835
1836        for (chan = 0; chan < tx_channels_count; chan++) {
1837                qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1838
1839                stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1840                                txfifosz, qmode);
1841        }
1842}
1843
1844/**
1845 * stmmac_tx_clean - to manage the transmission completion
1846 * @priv: driver private structure
1847 * @queue: TX queue index
1848 * Description: it reclaims the transmit resources after transmission completes.
1849 */
1850static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1851{
1852        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1853        unsigned int bytes_compl = 0, pkts_compl = 0;
1854        unsigned int entry, count = 0;
1855
1856        __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1857
1858        priv->xstats.tx_clean++;
1859
1860        entry = tx_q->dirty_tx;
1861        while ((entry != tx_q->cur_tx) && (count < budget)) {
1862                struct sk_buff *skb = tx_q->tx_skbuff[entry];
1863                struct dma_desc *p;
1864                int status;
1865
1866                if (priv->extend_desc)
1867                        p = (struct dma_desc *)(tx_q->dma_etx + entry);
1868                else
1869                        p = tx_q->dma_tx + entry;
1870
1871                status = stmmac_tx_status(priv, &priv->dev->stats,
1872                                &priv->xstats, p, priv->ioaddr);
1873                /* Check if the descriptor is owned by the DMA */
1874                if (unlikely(status & tx_dma_own))
1875                        break;
1876
1877                count++;
1878
1879                /* Make sure descriptor fields are read after reading
1880                 * the own bit.
1881                 */
1882                dma_rmb();
1883
1884                /* Just consider the last segment and ...*/
1885                if (likely(!(status & tx_not_ls))) {
1886                        /* ... verify the status error condition */
1887                        if (unlikely(status & tx_err)) {
1888                                priv->dev->stats.tx_errors++;
1889                        } else {
1890                                priv->dev->stats.tx_packets++;
1891                                priv->xstats.tx_pkt_n++;
1892                        }
1893                        stmmac_get_tx_hwtstamp(priv, p, skb);
1894                }
1895
1896                if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1897                        if (tx_q->tx_skbuff_dma[entry].map_as_page)
1898                                dma_unmap_page(priv->device,
1899                                               tx_q->tx_skbuff_dma[entry].buf,
1900                                               tx_q->tx_skbuff_dma[entry].len,
1901                                               DMA_TO_DEVICE);
1902                        else
1903                                dma_unmap_single(priv->device,
1904                                                 tx_q->tx_skbuff_dma[entry].buf,
1905                                                 tx_q->tx_skbuff_dma[entry].len,
1906                                                 DMA_TO_DEVICE);
1907                        tx_q->tx_skbuff_dma[entry].buf = 0;
1908                        tx_q->tx_skbuff_dma[entry].len = 0;
1909                        tx_q->tx_skbuff_dma[entry].map_as_page = false;
1910                }
1911
1912                stmmac_clean_desc3(priv, tx_q, p);
1913
1914                tx_q->tx_skbuff_dma[entry].last_segment = false;
1915                tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1916
1917                if (likely(skb != NULL)) {
1918                        pkts_compl++;
1919                        bytes_compl += skb->len;
1920                        dev_consume_skb_any(skb);
1921                        tx_q->tx_skbuff[entry] = NULL;
1922                }
1923
1924                stmmac_release_tx_desc(priv, p, priv->mode);
1925
1926                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1927        }
1928        tx_q->dirty_tx = entry;
1929
1930        netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1931                                  pkts_compl, bytes_compl);
1932
1933        if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1934                                                                queue))) &&
1935            stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1936
1937                netif_dbg(priv, tx_done, priv->dev,
1938                          "%s: restart transmit\n", __func__);
1939                netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1940        }
1941
1942        if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1943                stmmac_enable_eee_mode(priv);
1944                mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1945        }
1946
1947        __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1948
1949        return count;
1950}
1951
1952/**
1953 * stmmac_tx_err - to manage the tx error
1954 * @priv: driver private structure
1955 * @chan: channel index
1956 * Description: it cleans the descriptors and restarts the transmission
1957 * in case of transmission errors.
1958 */
1959static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1960{
1961        struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1962        int i;
1963
1964        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1965
1966        stmmac_stop_tx_dma(priv, chan);
1967        dma_free_tx_skbufs(priv, chan);
1968        for (i = 0; i < DMA_TX_SIZE; i++)
1969                if (priv->extend_desc)
1970                        stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1971                                        priv->mode, (i == DMA_TX_SIZE - 1));
1972                else
1973                        stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1974                                        priv->mode, (i == DMA_TX_SIZE - 1));
1975        tx_q->dirty_tx = 0;
1976        tx_q->cur_tx = 0;
1977        tx_q->mss = 0;
1978        netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1979        stmmac_start_tx_dma(priv, chan);
1980
1981        priv->dev->stats.tx_errors++;
1982        netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1983}
1984
1985/**
1986 *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1987 *  @priv: driver private structure
1988 *  @txmode: TX operating mode
1989 *  @rxmode: RX operating mode
1990 *  @chan: channel index
1991 *  Description: it is used for configuring of the DMA operation mode in
1992 *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1993 *  mode.
1994 */
1995static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1996                                          u32 rxmode, u32 chan)
1997{
1998        u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1999        u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2000        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2001        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2002        int rxfifosz = priv->plat->rx_fifo_size;
2003        int txfifosz = priv->plat->tx_fifo_size;
2004
2005        if (rxfifosz == 0)
2006                rxfifosz = priv->dma_cap.rx_fifo_size;
2007        if (txfifosz == 0)
2008                txfifosz = priv->dma_cap.tx_fifo_size;
2009
2010        /* Adjust for real per queue fifo size */
2011        rxfifosz /= rx_channels_count;
2012        txfifosz /= tx_channels_count;
2013
2014        stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2015        stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2016}
2017
2018static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2019{
2020        int ret;
2021
2022        ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2023                        priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2024        if (ret && (ret != -EINVAL)) {
2025                stmmac_global_err(priv);
2026                return true;
2027        }
2028
2029        return false;
2030}
2031
2032static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2033{
2034        int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2035                                                 &priv->xstats, chan);
2036        struct stmmac_channel *ch = &priv->channel[chan];
2037        bool needs_work = false;
2038
2039        if ((status & handle_rx) && ch->has_rx) {
2040                needs_work = true;
2041        } else {
2042                status &= ~handle_rx;
2043        }
2044
2045        if ((status & handle_tx) && ch->has_tx) {
2046                needs_work = true;
2047        } else {
2048                status &= ~handle_tx;
2049        }
2050
2051        if (needs_work && napi_schedule_prep(&ch->napi)) {
2052                stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2053                __napi_schedule(&ch->napi);
2054        }
2055
2056        return status;
2057}
2058
2059/**
2060 * stmmac_dma_interrupt - DMA ISR
2061 * @priv: driver private structure
2062 * Description: this is the DMA ISR. It is called by the main ISR.
2063 * It calls the dwmac dma routine and schedule poll method in case of some
2064 * work can be done.
2065 */
2066static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2067{
2068        u32 tx_channel_count = priv->plat->tx_queues_to_use;
2069        u32 rx_channel_count = priv->plat->rx_queues_to_use;
2070        u32 channels_to_check = tx_channel_count > rx_channel_count ?
2071                                tx_channel_count : rx_channel_count;
2072        u32 chan;
2073        int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2074
2075        /* Make sure we never check beyond our status buffer. */
2076        if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2077                channels_to_check = ARRAY_SIZE(status);
2078
2079        for (chan = 0; chan < channels_to_check; chan++)
2080                status[chan] = stmmac_napi_check(priv, chan);
2081
2082        for (chan = 0; chan < tx_channel_count; chan++) {
2083                if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2084                        /* Try to bump up the dma threshold on this failure */
2085                        if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2086                            (tc <= 256)) {
2087                                tc += 64;
2088                                if (priv->plat->force_thresh_dma_mode)
2089                                        stmmac_set_dma_operation_mode(priv,
2090                                                                      tc,
2091                                                                      tc,
2092                                                                      chan);
2093                                else
2094                                        stmmac_set_dma_operation_mode(priv,
2095                                                                    tc,
2096                                                                    SF_DMA_MODE,
2097                                                                    chan);
2098                                priv->xstats.threshold = tc;
2099                        }
2100                } else if (unlikely(status[chan] == tx_hard_error)) {
2101                        stmmac_tx_err(priv, chan);
2102                }
2103        }
2104}
2105
2106/**
2107 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2108 * @priv: driver private structure
2109 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2110 */
2111static void stmmac_mmc_setup(struct stmmac_priv *priv)
2112{
2113        unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2114                            MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2115
2116        dwmac_mmc_intr_all_mask(priv->mmcaddr);
2117
2118        if (priv->dma_cap.rmon) {
2119                dwmac_mmc_ctrl(priv->mmcaddr, mode);
2120                memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2121        } else
2122                netdev_info(priv->dev, "No MAC Management Counters available\n");
2123}
2124
2125/**
2126 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2127 * @priv: driver private structure
2128 * Description:
2129 *  new GMAC chip generations have a new register to indicate the
2130 *  presence of the optional feature/functions.
2131 *  This can be also used to override the value passed through the
2132 *  platform and necessary for old MAC10/100 and GMAC chips.
2133 */
2134static int stmmac_get_hw_features(struct stmmac_priv *priv)
2135{
2136        return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2137}
2138
2139/**
2140 * stmmac_check_ether_addr - check if the MAC addr is valid
2141 * @priv: driver private structure
2142 * Description:
2143 * it is to verify if the MAC address is valid, in case of failures it
2144 * generates a random MAC address
2145 */
2146static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2147{
2148        if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2149                stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2150                if (!is_valid_ether_addr(priv->dev->dev_addr))
2151                        eth_hw_addr_random(priv->dev);
2152                netdev_info(priv->dev, "device MAC address %pM\n",
2153                            priv->dev->dev_addr);
2154        }
2155}
2156
2157/**
2158 * stmmac_init_dma_engine - DMA init.
2159 * @priv: driver private structure
2160 * Description:
2161 * It inits the DMA invoking the specific MAC/GMAC callback.
2162 * Some DMA parameters can be passed from the platform;
2163 * in case of these are not passed a default is kept for the MAC or GMAC.
2164 */
2165static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2166{
2167        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2168        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2169        u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2170        struct stmmac_rx_queue *rx_q;
2171        struct stmmac_tx_queue *tx_q;
2172        u32 chan = 0;
2173        int atds = 0;
2174        int ret = 0;
2175
2176        if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2177                dev_err(priv->device, "Invalid DMA configuration\n");
2178                return -EINVAL;
2179        }
2180
2181        if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2182                atds = 1;
2183
2184        ret = stmmac_reset(priv, priv->ioaddr);
2185        if (ret) {
2186                dev_err(priv->device, "Failed to reset the dma\n");
2187                return ret;
2188        }
2189
2190        /* DMA Configuration */
2191        stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2192
2193        if (priv->plat->axi)
2194                stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2195
2196        /* DMA RX Channel Configuration */
2197        for (chan = 0; chan < rx_channels_count; chan++) {
2198                rx_q = &priv->rx_queue[chan];
2199
2200                stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2201                                    rx_q->dma_rx_phy, chan);
2202
2203                rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2204                            (DMA_RX_SIZE * sizeof(struct dma_desc));
2205                stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2206                                       rx_q->rx_tail_addr, chan);
2207        }
2208
2209        /* DMA TX Channel Configuration */
2210        for (chan = 0; chan < tx_channels_count; chan++) {
2211                tx_q = &priv->tx_queue[chan];
2212
2213                stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2214                                    tx_q->dma_tx_phy, chan);
2215
2216                tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2217                stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2218                                       tx_q->tx_tail_addr, chan);
2219        }
2220
2221        /* DMA CSR Channel configuration */
2222        for (chan = 0; chan < dma_csr_ch; chan++)
2223                stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2224
2225        return ret;
2226}
2227
2228static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2229{
2230        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2231
2232        mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2233}
2234
2235/**
2236 * stmmac_tx_timer - mitigation sw timer for tx.
2237 * @data: data pointer
2238 * Description:
2239 * This is the timer handler to directly invoke the stmmac_tx_clean.
2240 */
2241static void stmmac_tx_timer(struct timer_list *t)
2242{
2243        struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2244        struct stmmac_priv *priv = tx_q->priv_data;
2245        struct stmmac_channel *ch;
2246
2247        ch = &priv->channel[tx_q->queue_index];
2248
2249        if (likely(napi_schedule_prep(&ch->napi)))
2250                __napi_schedule(&ch->napi);
2251}
2252
2253/**
2254 * stmmac_init_tx_coalesce - init tx mitigation options.
2255 * @priv: driver private structure
2256 * Description:
2257 * This inits the transmit coalesce parameters: i.e. timer rate,
2258 * timer handler and default threshold used for enabling the
2259 * interrupt on completion bit.
2260 */
2261static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2262{
2263        u32 tx_channel_count = priv->plat->tx_queues_to_use;
2264        u32 chan;
2265
2266        priv->tx_coal_frames = STMMAC_TX_FRAMES;
2267        priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2268
2269        for (chan = 0; chan < tx_channel_count; chan++) {
2270                struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2271
2272                timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2273        }
2274}
2275
2276static void stmmac_set_rings_length(struct stmmac_priv *priv)
2277{
2278        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2279        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2280        u32 chan;
2281
2282        /* set TX ring length */
2283        for (chan = 0; chan < tx_channels_count; chan++)
2284                stmmac_set_tx_ring_len(priv, priv->ioaddr,
2285                                (DMA_TX_SIZE - 1), chan);
2286
2287        /* set RX ring length */
2288        for (chan = 0; chan < rx_channels_count; chan++)
2289                stmmac_set_rx_ring_len(priv, priv->ioaddr,
2290                                (DMA_RX_SIZE - 1), chan);
2291}
2292
2293/**
2294 *  stmmac_set_tx_queue_weight - Set TX queue weight
2295 *  @priv: driver private structure
2296 *  Description: It is used for setting TX queues weight
2297 */
2298static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2299{
2300        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2301        u32 weight;
2302        u32 queue;
2303
2304        for (queue = 0; queue < tx_queues_count; queue++) {
2305                weight = priv->plat->tx_queues_cfg[queue].weight;
2306                stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2307        }
2308}
2309
2310/**
2311 *  stmmac_configure_cbs - Configure CBS in TX queue
2312 *  @priv: driver private structure
2313 *  Description: It is used for configuring CBS in AVB TX queues
2314 */
2315static void stmmac_configure_cbs(struct stmmac_priv *priv)
2316{
2317        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2318        u32 mode_to_use;
2319        u32 queue;
2320
2321        /* queue 0 is reserved for legacy traffic */
2322        for (queue = 1; queue < tx_queues_count; queue++) {
2323                mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2324                if (mode_to_use == MTL_QUEUE_DCB)
2325                        continue;
2326
2327                stmmac_config_cbs(priv, priv->hw,
2328                                priv->plat->tx_queues_cfg[queue].send_slope,
2329                                priv->plat->tx_queues_cfg[queue].idle_slope,
2330                                priv->plat->tx_queues_cfg[queue].high_credit,
2331                                priv->plat->tx_queues_cfg[queue].low_credit,
2332                                queue);
2333        }
2334}
2335
2336/**
2337 *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2338 *  @priv: driver private structure
2339 *  Description: It is used for mapping RX queues to RX dma channels
2340 */
2341static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2342{
2343        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2344        u32 queue;
2345        u32 chan;
2346
2347        for (queue = 0; queue < rx_queues_count; queue++) {
2348                chan = priv->plat->rx_queues_cfg[queue].chan;
2349                stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2350        }
2351}
2352
2353/**
2354 *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2355 *  @priv: driver private structure
2356 *  Description: It is used for configuring the RX Queue Priority
2357 */
2358static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2359{
2360        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2361        u32 queue;
2362        u32 prio;
2363
2364        for (queue = 0; queue < rx_queues_count; queue++) {
2365                if (!priv->plat->rx_queues_cfg[queue].use_prio)
2366                        continue;
2367
2368                prio = priv->plat->rx_queues_cfg[queue].prio;
2369                stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2370        }
2371}
2372
2373/**
2374 *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2375 *  @priv: driver private structure
2376 *  Description: It is used for configuring the TX Queue Priority
2377 */
2378static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2379{
2380        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2381        u32 queue;
2382        u32 prio;
2383
2384        for (queue = 0; queue < tx_queues_count; queue++) {
2385                if (!priv->plat->tx_queues_cfg[queue].use_prio)
2386                        continue;
2387
2388                prio = priv->plat->tx_queues_cfg[queue].prio;
2389                stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2390        }
2391}
2392
2393/**
2394 *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2395 *  @priv: driver private structure
2396 *  Description: It is used for configuring the RX queue routing
2397 */
2398static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2399{
2400        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2401        u32 queue;
2402        u8 packet;
2403
2404        for (queue = 0; queue < rx_queues_count; queue++) {
2405                /* no specific packet type routing specified for the queue */
2406                if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2407                        continue;
2408
2409                packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2410                stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2411        }
2412}
2413
2414/**
2415 *  stmmac_mtl_configuration - Configure MTL
2416 *  @priv: driver private structure
2417 *  Description: It is used for configurring MTL
2418 */
2419static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2420{
2421        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2422        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2423
2424        if (tx_queues_count > 1)
2425                stmmac_set_tx_queue_weight(priv);
2426
2427        /* Configure MTL RX algorithms */
2428        if (rx_queues_count > 1)
2429                stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2430                                priv->plat->rx_sched_algorithm);
2431
2432        /* Configure MTL TX algorithms */
2433        if (tx_queues_count > 1)
2434                stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2435                                priv->plat->tx_sched_algorithm);
2436
2437        /* Configure CBS in AVB TX queues */
2438        if (tx_queues_count > 1)
2439                stmmac_configure_cbs(priv);
2440
2441        /* Map RX MTL to DMA channels */
2442        stmmac_rx_queue_dma_chan_map(priv);
2443
2444        /* Enable MAC RX Queues */
2445        stmmac_mac_enable_rx_queues(priv);
2446
2447        /* Set RX priorities */
2448        if (rx_queues_count > 1)
2449                stmmac_mac_config_rx_queues_prio(priv);
2450
2451        /* Set TX priorities */
2452        if (tx_queues_count > 1)
2453                stmmac_mac_config_tx_queues_prio(priv);
2454
2455        /* Set RX routing */
2456        if (rx_queues_count > 1)
2457                stmmac_mac_config_rx_queues_routing(priv);
2458}
2459
2460static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2461{
2462        if (priv->dma_cap.asp) {
2463                netdev_info(priv->dev, "Enabling Safety Features\n");
2464                stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2465        } else {
2466                netdev_info(priv->dev, "No Safety Features support found\n");
2467        }
2468}
2469
2470/**
2471 * stmmac_hw_setup - setup mac in a usable state.
2472 *  @dev : pointer to the device structure.
2473 *  Description:
2474 *  this is the main function to setup the HW in a usable state because the
2475 *  dma engine is reset, the core registers are configured (e.g. AXI,
2476 *  Checksum features, timers). The DMA is ready to start receiving and
2477 *  transmitting.
2478 *  Return value:
2479 *  0 on success and an appropriate (-)ve integer as defined in errno.h
2480 *  file on failure.
2481 */
2482static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2483{
2484        struct stmmac_priv *priv = netdev_priv(dev);
2485        u32 rx_cnt = priv->plat->rx_queues_to_use;
2486        u32 tx_cnt = priv->plat->tx_queues_to_use;
2487        u32 chan;
2488        int ret;
2489
2490        /* DMA initialization and SW reset */
2491        ret = stmmac_init_dma_engine(priv);
2492        if (ret < 0) {
2493                netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2494                           __func__);
2495                return ret;
2496        }
2497
2498        /* Copy the MAC addr into the HW  */
2499        stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2500
2501        /* PS and related bits will be programmed according to the speed */
2502        if (priv->hw->pcs) {
2503                int speed = priv->plat->mac_port_sel_speed;
2504
2505                if ((speed == SPEED_10) || (speed == SPEED_100) ||
2506                    (speed == SPEED_1000)) {
2507                        priv->hw->ps = speed;
2508                } else {
2509                        dev_warn(priv->device, "invalid port speed\n");
2510                        priv->hw->ps = 0;
2511                }
2512        }
2513
2514        /* Initialize the MAC Core */
2515        stmmac_core_init(priv, priv->hw, dev);
2516
2517        /* Initialize MTL*/
2518        stmmac_mtl_configuration(priv);
2519
2520        /* Initialize Safety Features */
2521        stmmac_safety_feat_configuration(priv);
2522
2523        ret = stmmac_rx_ipc(priv, priv->hw);
2524        if (!ret) {
2525                netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2526                priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2527                priv->hw->rx_csum = 0;
2528        }
2529
2530        /* Enable the MAC Rx/Tx */
2531        stmmac_mac_set(priv, priv->ioaddr, true);
2532
2533        /* Set the HW DMA mode and the COE */
2534        stmmac_dma_operation_mode(priv);
2535
2536        stmmac_mmc_setup(priv);
2537
2538        if (init_ptp) {
2539                ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2540                if (ret < 0)
2541                        netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2542
2543                ret = stmmac_init_ptp(priv);
2544                if (ret == -EOPNOTSUPP)
2545                        netdev_warn(priv->dev, "PTP not supported by HW\n");
2546                else if (ret)
2547                        netdev_warn(priv->dev, "PTP init failed\n");
2548        }
2549
2550#ifdef CONFIG_DEBUG_FS
2551        ret = stmmac_init_fs(dev);
2552        if (ret < 0)
2553                netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2554                            __func__);
2555#endif
2556        priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2557
2558        if (priv->use_riwt) {
2559                ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2560                if (!ret)
2561                        priv->rx_riwt = MAX_DMA_RIWT;
2562        }
2563
2564        if (priv->hw->pcs)
2565                stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
2566
2567        /* set TX and RX rings length */
2568        stmmac_set_rings_length(priv);
2569
2570        /* Enable TSO */
2571        if (priv->tso) {
2572                for (chan = 0; chan < tx_cnt; chan++)
2573                        stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2574        }
2575
2576        /* Start the ball rolling... */
2577        stmmac_start_all_dma(priv);
2578
2579        return 0;
2580}
2581
2582static void stmmac_hw_teardown(struct net_device *dev)
2583{
2584        struct stmmac_priv *priv = netdev_priv(dev);
2585
2586        clk_disable_unprepare(priv->plat->clk_ptp_ref);
2587}
2588
2589/**
2590 *  stmmac_open - open entry point of the driver
2591 *  @dev : pointer to the device structure.
2592 *  Description:
2593 *  This function is the open entry point of the driver.
2594 *  Return value:
2595 *  0 on success and an appropriate (-)ve integer as defined in errno.h
2596 *  file on failure.
2597 */
2598static int stmmac_open(struct net_device *dev)
2599{
2600        struct stmmac_priv *priv = netdev_priv(dev);
2601        u32 chan;
2602        int ret;
2603
2604        stmmac_check_ether_addr(priv);
2605
2606        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2607            priv->hw->pcs != STMMAC_PCS_TBI &&
2608            priv->hw->pcs != STMMAC_PCS_RTBI) {
2609                ret = stmmac_init_phy(dev);
2610                if (ret) {
2611                        netdev_err(priv->dev,
2612                                   "%s: Cannot attach to PHY (error: %d)\n",
2613                                   __func__, ret);
2614                        return ret;
2615                }
2616        }
2617
2618        /* Extra statistics */
2619        memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2620        priv->xstats.threshold = tc;
2621
2622        priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2623        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2624
2625        ret = alloc_dma_desc_resources(priv);
2626        if (ret < 0) {
2627                netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2628                           __func__);
2629                goto dma_desc_error;
2630        }
2631
2632        ret = init_dma_desc_rings(dev, GFP_KERNEL);
2633        if (ret < 0) {
2634                netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2635                           __func__);
2636                goto init_error;
2637        }
2638
2639        ret = stmmac_hw_setup(dev, true);
2640        if (ret < 0) {
2641                netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2642                goto init_error;
2643        }
2644
2645        stmmac_init_tx_coalesce(priv);
2646
2647        if (dev->phydev)
2648                phy_start(dev->phydev);
2649
2650        /* Request the IRQ lines */
2651        ret = request_irq(dev->irq, stmmac_interrupt,
2652                          IRQF_SHARED, dev->name, dev);
2653        if (unlikely(ret < 0)) {
2654                netdev_err(priv->dev,
2655                           "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2656                           __func__, dev->irq, ret);
2657                goto irq_error;
2658        }
2659
2660        /* Request the Wake IRQ in case of another line is used for WoL */
2661        if (priv->wol_irq != dev->irq) {
2662                ret = request_irq(priv->wol_irq, stmmac_interrupt,
2663                                  IRQF_SHARED, dev->name, dev);
2664                if (unlikely(ret < 0)) {
2665                        netdev_err(priv->dev,
2666                                   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2667                                   __func__, priv->wol_irq, ret);
2668                        goto wolirq_error;
2669                }
2670        }
2671
2672        /* Request the IRQ lines */
2673        if (priv->lpi_irq > 0) {
2674                ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2675                                  dev->name, dev);
2676                if (unlikely(ret < 0)) {
2677                        netdev_err(priv->dev,
2678                                   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2679                                   __func__, priv->lpi_irq, ret);
2680                        goto lpiirq_error;
2681                }
2682        }
2683
2684        stmmac_enable_all_queues(priv);
2685        stmmac_start_all_queues(priv);
2686
2687        return 0;
2688
2689lpiirq_error:
2690        if (priv->wol_irq != dev->irq)
2691                free_irq(priv->wol_irq, dev);
2692wolirq_error:
2693        free_irq(dev->irq, dev);
2694irq_error:
2695        if (dev->phydev)
2696                phy_stop(dev->phydev);
2697
2698        for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2699                del_timer_sync(&priv->tx_queue[chan].txtimer);
2700
2701        stmmac_hw_teardown(dev);
2702init_error:
2703        free_dma_desc_resources(priv);
2704dma_desc_error:
2705        if (dev->phydev)
2706                phy_disconnect(dev->phydev);
2707
2708        return ret;
2709}
2710
2711/**
2712 *  stmmac_release - close entry point of the driver
2713 *  @dev : device pointer.
2714 *  Description:
2715 *  This is the stop entry point of the driver.
2716 */
2717static int stmmac_release(struct net_device *dev)
2718{
2719        struct stmmac_priv *priv = netdev_priv(dev);
2720        u32 chan;
2721
2722        if (priv->eee_enabled)
2723                del_timer_sync(&priv->eee_ctrl_timer);
2724
2725        /* Stop and disconnect the PHY */
2726        if (dev->phydev) {
2727                phy_stop(dev->phydev);
2728                phy_disconnect(dev->phydev);
2729        }
2730
2731        stmmac_stop_all_queues(priv);
2732
2733        stmmac_disable_all_queues(priv);
2734
2735        for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2736                del_timer_sync(&priv->tx_queue[chan].txtimer);
2737
2738        /* Free the IRQ lines */
2739        free_irq(dev->irq, dev);
2740        if (priv->wol_irq != dev->irq)
2741                free_irq(priv->wol_irq, dev);
2742        if (priv->lpi_irq > 0)
2743                free_irq(priv->lpi_irq, dev);
2744
2745        /* Stop TX/RX DMA and clear the descriptors */
2746        stmmac_stop_all_dma(priv);
2747
2748        /* Release and free the Rx/Tx resources */
2749        free_dma_desc_resources(priv);
2750
2751        /* Disable the MAC Rx/Tx */
2752        stmmac_mac_set(priv, priv->ioaddr, false);
2753
2754        netif_carrier_off(dev);
2755
2756#ifdef CONFIG_DEBUG_FS
2757        stmmac_exit_fs(dev);
2758#endif
2759
2760        stmmac_release_ptp(priv);
2761
2762        return 0;
2763}
2764
2765/**
2766 *  stmmac_tso_allocator - close entry point of the driver
2767 *  @priv: driver private structure
2768 *  @des: buffer start address
2769 *  @total_len: total length to fill in descriptors
2770 *  @last_segmant: condition for the last descriptor
2771 *  @queue: TX queue index
2772 *  Description:
2773 *  This function fills descriptor and request new descriptors according to
2774 *  buffer length to fill
2775 */
2776static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2777                                 int total_len, bool last_segment, u32 queue)
2778{
2779        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2780        struct dma_desc *desc;
2781        u32 buff_size;
2782        int tmp_len;
2783
2784        tmp_len = total_len;
2785
2786        while (tmp_len > 0) {
2787                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2788                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2789                desc = tx_q->dma_tx + tx_q->cur_tx;
2790
2791                desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2792                buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2793                            TSO_MAX_BUFF_SIZE : tmp_len;
2794
2795                stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2796                                0, 1,
2797                                (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2798                                0, 0);
2799
2800                tmp_len -= TSO_MAX_BUFF_SIZE;
2801        }
2802}
2803
2804/**
2805 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2806 *  @skb : the socket buffer
2807 *  @dev : device pointer
2808 *  Description: this is the transmit function that is called on TSO frames
2809 *  (support available on GMAC4 and newer chips).
2810 *  Diagram below show the ring programming in case of TSO frames:
2811 *
2812 *  First Descriptor
2813 *   --------
2814 *   | DES0 |---> buffer1 = L2/L3/L4 header
2815 *   | DES1 |---> TCP Payload (can continue on next descr...)
2816 *   | DES2 |---> buffer 1 and 2 len
2817 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2818 *   --------
2819 *      |
2820 *     ...
2821 *      |
2822 *   --------
2823 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
2824 *   | DES1 | --|
2825 *   | DES2 | --> buffer 1 and 2 len
2826 *   | DES3 |
2827 *   --------
2828 *
2829 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2830 */
2831static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2832{
2833        struct dma_desc *desc, *first, *mss_desc = NULL;
2834        struct stmmac_priv *priv = netdev_priv(dev);
2835        int nfrags = skb_shinfo(skb)->nr_frags;
2836        u32 queue = skb_get_queue_mapping(skb);
2837        unsigned int first_entry, des;
2838        struct stmmac_tx_queue *tx_q;
2839        int tmp_pay_len = 0;
2840        u32 pay_len, mss;
2841        u8 proto_hdr_len;
2842        int i;
2843
2844        tx_q = &priv->tx_queue[queue];
2845
2846        /* Compute header lengths */
2847        proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2848
2849        /* Desc availability based on threshold should be enough safe */
2850        if (unlikely(stmmac_tx_avail(priv, queue) <
2851                (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2852                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2853                        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2854                                                                queue));
2855                        /* This is a hard error, log it. */
2856                        netdev_err(priv->dev,
2857                                   "%s: Tx Ring full when queue awake\n",
2858                                   __func__);
2859                }
2860                return NETDEV_TX_BUSY;
2861        }
2862
2863        pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2864
2865        mss = skb_shinfo(skb)->gso_size;
2866
2867        /* set new MSS value if needed */
2868        if (mss != tx_q->mss) {
2869                mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2870                stmmac_set_mss(priv, mss_desc, mss);
2871                tx_q->mss = mss;
2872                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2873                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
2874        }
2875
2876        if (netif_msg_tx_queued(priv)) {
2877                pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2878                        __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2879                pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2880                        skb->data_len);
2881        }
2882
2883        first_entry = tx_q->cur_tx;
2884        WARN_ON(tx_q->tx_skbuff[first_entry]);
2885
2886        desc = tx_q->dma_tx + first_entry;
2887        first = desc;
2888
2889        /* first descriptor: fill Headers on Buf1 */
2890        des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2891                             DMA_TO_DEVICE);
2892        if (dma_mapping_error(priv->device, des))
2893                goto dma_map_err;
2894
2895        tx_q->tx_skbuff_dma[first_entry].buf = des;
2896        tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2897
2898        first->des0 = cpu_to_le32(des);
2899
2900        /* Fill start of payload in buff2 of first descriptor */
2901        if (pay_len)
2902                first->des1 = cpu_to_le32(des + proto_hdr_len);
2903
2904        /* If needed take extra descriptors to fill the remaining payload */
2905        tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2906
2907        stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2908
2909        /* Prepare fragments */
2910        for (i = 0; i < nfrags; i++) {
2911                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2912
2913                des = skb_frag_dma_map(priv->device, frag, 0,
2914                                       skb_frag_size(frag),
2915                                       DMA_TO_DEVICE);
2916                if (dma_mapping_error(priv->device, des))
2917                        goto dma_map_err;
2918
2919                stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2920                                     (i == nfrags - 1), queue);
2921
2922                tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2923                tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2924                tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2925        }
2926
2927        tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2928
2929        /* Only the last descriptor gets to point to the skb. */
2930        tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2931
2932        /* We've used all descriptors we need for this skb, however,
2933         * advance cur_tx so that it references a fresh descriptor.
2934         * ndo_start_xmit will fill this descriptor the next time it's
2935         * called and stmmac_tx_clean may clean up to this descriptor.
2936         */
2937        tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2938
2939        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2940                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2941                          __func__);
2942                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2943        }
2944
2945        dev->stats.tx_bytes += skb->len;
2946        priv->xstats.tx_tso_frames++;
2947        priv->xstats.tx_tso_nfrags += nfrags;
2948
2949        /* Manage tx mitigation */
2950        tx_q->tx_count_frames += nfrags + 1;
2951        if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
2952                stmmac_set_tx_ic(priv, desc);
2953                priv->xstats.tx_set_ic_bit++;
2954                tx_q->tx_count_frames = 0;
2955        } else {
2956                stmmac_tx_timer_arm(priv, queue);
2957        }
2958
2959        skb_tx_timestamp(skb);
2960
2961        if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2962                     priv->hwts_tx_en)) {
2963                /* declare that device is doing timestamping */
2964                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2965                stmmac_enable_tx_timestamp(priv, first);
2966        }
2967
2968        /* Complete the first descriptor before granting the DMA */
2969        stmmac_prepare_tso_tx_desc(priv, first, 1,
2970                        proto_hdr_len,
2971                        pay_len,
2972                        1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2973                        tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2974
2975        /* If context desc is used to change MSS */
2976        if (mss_desc) {
2977                /* Make sure that first descriptor has been completely
2978                 * written, including its own bit. This is because MSS is
2979                 * actually before first descriptor, so we need to make
2980                 * sure that MSS's own bit is the last thing written.
2981                 */
2982                dma_wmb();
2983                stmmac_set_tx_owner(priv, mss_desc);
2984        }
2985
2986        /* The own bit must be the latest setting done when prepare the
2987         * descriptor and then barrier is needed to make sure that
2988         * all is coherent before granting the DMA engine.
2989         */
2990        wmb();
2991
2992        if (netif_msg_pktdata(priv)) {
2993                pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2994                        __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2995                        tx_q->cur_tx, first, nfrags);
2996
2997                stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
2998
2999                pr_info(">>> frame to be transmitted: ");
3000                print_pkt(skb->data, skb_headlen(skb));
3001        }
3002
3003        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3004
3005        tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3006        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3007
3008        return NETDEV_TX_OK;
3009
3010dma_map_err:
3011        dev_err(priv->device, "Tx dma map failed\n");
3012        dev_kfree_skb(skb);
3013        priv->dev->stats.tx_dropped++;
3014        return NETDEV_TX_OK;
3015}
3016
3017/**
3018 *  stmmac_xmit - Tx entry point of the driver
3019 *  @skb : the socket buffer
3020 *  @dev : device pointer
3021 *  Description : this is the tx entry point of the driver.
3022 *  It programs the chain or the ring and supports oversized frames
3023 *  and SG feature.
3024 */
3025static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3026{
3027        struct stmmac_priv *priv = netdev_priv(dev);
3028        unsigned int nopaged_len = skb_headlen(skb);
3029        int i, csum_insertion = 0, is_jumbo = 0;
3030        u32 queue = skb_get_queue_mapping(skb);
3031        int nfrags = skb_shinfo(skb)->nr_frags;
3032        int entry;
3033        unsigned int first_entry;
3034        struct dma_desc *desc, *first;
3035        struct stmmac_tx_queue *tx_q;
3036        unsigned int enh_desc;
3037        unsigned int des;
3038
3039        tx_q = &priv->tx_queue[queue];
3040
3041        /* Manage oversized TCP frames for GMAC4 device */
3042        if (skb_is_gso(skb) && priv->tso) {
3043                if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3044                        return stmmac_tso_xmit(skb, dev);
3045        }
3046
3047        if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3048                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3049                        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3050                                                                queue));
3051                        /* This is a hard error, log it. */
3052                        netdev_err(priv->dev,
3053                                   "%s: Tx Ring full when queue awake\n",
3054                                   __func__);
3055                }
3056                return NETDEV_TX_BUSY;
3057        }
3058
3059        if (priv->tx_path_in_lpi_mode)
3060                stmmac_disable_eee_mode(priv);
3061
3062        entry = tx_q->cur_tx;
3063        first_entry = entry;
3064        WARN_ON(tx_q->tx_skbuff[first_entry]);
3065
3066        csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3067
3068        if (likely(priv->extend_desc))
3069                desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3070        else
3071                desc = tx_q->dma_tx + entry;
3072
3073        first = desc;
3074
3075        enh_desc = priv->plat->enh_desc;
3076        /* To program the descriptors according to the size of the frame */
3077        if (enh_desc)
3078                is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3079
3080        if (unlikely(is_jumbo)) {
3081                entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3082                if (unlikely(entry < 0) && (entry != -EINVAL))
3083                        goto dma_map_err;
3084        }
3085
3086        for (i = 0; i < nfrags; i++) {
3087                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3088                int len = skb_frag_size(frag);
3089                bool last_segment = (i == (nfrags - 1));
3090
3091                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3092                WARN_ON(tx_q->tx_skbuff[entry]);
3093
3094                if (likely(priv->extend_desc))
3095                        desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3096                else
3097                        desc = tx_q->dma_tx + entry;
3098
3099                des = skb_frag_dma_map(priv->device, frag, 0, len,
3100                                       DMA_TO_DEVICE);
3101                if (dma_mapping_error(priv->device, des))
3102                        goto dma_map_err; /* should reuse desc w/o issues */
3103
3104                tx_q->tx_skbuff_dma[entry].buf = des;
3105
3106                stmmac_set_desc_addr(priv, desc, des);
3107
3108                tx_q->tx_skbuff_dma[entry].map_as_page = true;
3109                tx_q->tx_skbuff_dma[entry].len = len;
3110                tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3111
3112                /* Prepare the descriptor and set the own bit too */
3113                stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3114                                priv->mode, 1, last_segment, skb->len);
3115        }
3116
3117        /* Only the last descriptor gets to point to the skb. */
3118        tx_q->tx_skbuff[entry] = skb;
3119
3120        /* We've used all descriptors we need for this skb, however,
3121         * advance cur_tx so that it references a fresh descriptor.
3122         * ndo_start_xmit will fill this descriptor the next time it's
3123         * called and stmmac_tx_clean may clean up to this descriptor.
3124         */
3125        entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3126        tx_q->cur_tx = entry;
3127
3128        if (netif_msg_pktdata(priv)) {
3129                void *tx_head;
3130
3131                netdev_dbg(priv->dev,
3132                           "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3133                           __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3134                           entry, first, nfrags);
3135
3136                if (priv->extend_desc)
3137                        tx_head = (void *)tx_q->dma_etx;
3138                else
3139                        tx_head = (void *)tx_q->dma_tx;
3140
3141                stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
3142
3143                netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3144                print_pkt(skb->data, skb->len);
3145        }
3146
3147        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3148                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3149                          __func__);
3150                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3151        }
3152
3153        dev->stats.tx_bytes += skb->len;
3154
3155        /* According to the coalesce parameter the IC bit for the latest
3156         * segment is reset and the timer re-started to clean the tx status.
3157         * This approach takes care about the fragments: desc is the first
3158         * element in case of no SG.
3159         */
3160        tx_q->tx_count_frames += nfrags + 1;
3161        if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3162                stmmac_set_tx_ic(priv, desc);
3163                priv->xstats.tx_set_ic_bit++;
3164                tx_q->tx_count_frames = 0;
3165        } else {
3166                stmmac_tx_timer_arm(priv, queue);
3167        }
3168
3169        skb_tx_timestamp(skb);
3170
3171        /* Ready to fill the first descriptor and set the OWN bit w/o any
3172         * problems because all the descriptors are actually ready to be
3173         * passed to the DMA engine.
3174         */
3175        if (likely(!is_jumbo)) {
3176                bool last_segment = (nfrags == 0);
3177
3178                des = dma_map_single(priv->device, skb->data,
3179                                     nopaged_len, DMA_TO_DEVICE);
3180                if (dma_mapping_error(priv->device, des))
3181                        goto dma_map_err;
3182
3183                tx_q->tx_skbuff_dma[first_entry].buf = des;
3184
3185                stmmac_set_desc_addr(priv, first, des);
3186
3187                tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3188                tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3189
3190                if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3191                             priv->hwts_tx_en)) {
3192                        /* declare that device is doing timestamping */
3193                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3194                        stmmac_enable_tx_timestamp(priv, first);
3195                }
3196
3197                /* Prepare the first descriptor setting the OWN bit too */
3198                stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3199                                csum_insertion, priv->mode, 1, last_segment,
3200                                skb->len);
3201
3202                /* The own bit must be the latest setting done when prepare the
3203                 * descriptor and then barrier is needed to make sure that
3204                 * all is coherent before granting the DMA engine.
3205                 */
3206                wmb();
3207        }
3208
3209        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3210
3211        stmmac_enable_dma_transmission(priv, priv->ioaddr);
3212
3213        tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3214        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3215
3216        return NETDEV_TX_OK;
3217
3218dma_map_err:
3219        netdev_err(priv->dev, "Tx DMA map failed\n");
3220        dev_kfree_skb(skb);
3221        priv->dev->stats.tx_dropped++;
3222        return NETDEV_TX_OK;
3223}
3224
3225static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3226{
3227        struct vlan_ethhdr *veth;
3228        __be16 vlan_proto;
3229        u16 vlanid;
3230
3231        veth = (struct vlan_ethhdr *)skb->data;
3232        vlan_proto = veth->h_vlan_proto;
3233
3234        if ((vlan_proto == htons(ETH_P_8021Q) &&
3235             dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3236            (vlan_proto == htons(ETH_P_8021AD) &&
3237             dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3238                /* pop the vlan tag */
3239                vlanid = ntohs(veth->h_vlan_TCI);
3240                memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3241                skb_pull(skb, VLAN_HLEN);
3242                __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3243        }
3244}
3245
3246
3247static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3248{
3249        if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3250                return 0;
3251
3252        return 1;
3253}
3254
3255/**
3256 * stmmac_rx_refill - refill used skb preallocated buffers
3257 * @priv: driver private structure
3258 * @queue: RX queue index
3259 * Description : this is to reallocate the skb for the reception process
3260 * that is based on zero-copy.
3261 */
3262static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3263{
3264        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3265        int dirty = stmmac_rx_dirty(priv, queue);
3266        unsigned int entry = rx_q->dirty_rx;
3267
3268        int bfsize = priv->dma_buf_sz;
3269
3270        while (dirty-- > 0) {
3271                struct dma_desc *p;
3272
3273                if (priv->extend_desc)
3274                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
3275                else
3276                        p = rx_q->dma_rx + entry;
3277
3278                if (likely(!rx_q->rx_skbuff[entry])) {
3279                        struct sk_buff *skb;
3280
3281                        skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3282                        if (unlikely(!skb)) {
3283                                /* so for a while no zero-copy! */
3284                                rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3285                                if (unlikely(net_ratelimit()))
3286                                        dev_err(priv->device,
3287                                                "fail to alloc skb entry %d\n",
3288                                                entry);
3289                                break;
3290                        }
3291
3292                        rx_q->rx_skbuff[entry] = skb;
3293                        rx_q->rx_skbuff_dma[entry] =
3294                            dma_map_single(priv->device, skb->data, bfsize,
3295                                           DMA_FROM_DEVICE);
3296                        if (dma_mapping_error(priv->device,
3297                                              rx_q->rx_skbuff_dma[entry])) {
3298                                netdev_err(priv->dev, "Rx DMA map failed\n");
3299                                dev_kfree_skb(skb);
3300                                break;
3301                        }
3302
3303                        stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
3304                        stmmac_refill_desc3(priv, rx_q, p);
3305
3306                        if (rx_q->rx_zeroc_thresh > 0)
3307                                rx_q->rx_zeroc_thresh--;
3308
3309                        netif_dbg(priv, rx_status, priv->dev,
3310                                  "refill entry #%d\n", entry);
3311                }
3312                dma_wmb();
3313
3314                stmmac_set_rx_owner(priv, p, priv->use_riwt);
3315
3316                dma_wmb();
3317
3318                entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3319        }
3320        rx_q->dirty_rx = entry;
3321}
3322
3323/**
3324 * stmmac_rx - manage the receive process
3325 * @priv: driver private structure
3326 * @limit: napi bugget
3327 * @queue: RX queue index.
3328 * Description :  this the function called by the napi poll method.
3329 * It gets all the frames inside the ring.
3330 */
3331static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3332{
3333        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3334        struct stmmac_channel *ch = &priv->channel[queue];
3335        unsigned int entry = rx_q->cur_rx;
3336        int coe = priv->hw->rx_csum;
3337        unsigned int next_entry;
3338        unsigned int count = 0;
3339        bool xmac;
3340
3341        xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3342
3343        if (netif_msg_rx_status(priv)) {
3344                void *rx_head;
3345
3346                netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3347                if (priv->extend_desc)
3348                        rx_head = (void *)rx_q->dma_erx;
3349                else
3350                        rx_head = (void *)rx_q->dma_rx;
3351
3352                stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3353        }
3354        while (count < limit) {
3355                int status;
3356                struct dma_desc *p;
3357                struct dma_desc *np;
3358
3359                if (priv->extend_desc)
3360                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
3361                else
3362                        p = rx_q->dma_rx + entry;
3363
3364                /* read the status of the incoming frame */
3365                status = stmmac_rx_status(priv, &priv->dev->stats,
3366                                &priv->xstats, p);
3367                /* check if managed by the DMA otherwise go ahead */
3368                if (unlikely(status & dma_own))
3369                        break;
3370
3371                count++;
3372
3373                rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3374                next_entry = rx_q->cur_rx;
3375
3376                if (priv->extend_desc)
3377                        np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3378                else
3379                        np = rx_q->dma_rx + next_entry;
3380
3381                prefetch(np);
3382
3383                if (priv->extend_desc)
3384                        stmmac_rx_extended_status(priv, &priv->dev->stats,
3385                                        &priv->xstats, rx_q->dma_erx + entry);
3386                if (unlikely(status == discard_frame)) {
3387                        priv->dev->stats.rx_errors++;
3388                        if (priv->hwts_rx_en && !priv->extend_desc) {
3389                                /* DESC2 & DESC3 will be overwritten by device
3390                                 * with timestamp value, hence reinitialize
3391                                 * them in stmmac_rx_refill() function so that
3392                                 * device can reuse it.
3393                                 */
3394                                dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3395                                rx_q->rx_skbuff[entry] = NULL;
3396                                dma_unmap_single(priv->device,
3397                                                 rx_q->rx_skbuff_dma[entry],
3398                                                 priv->dma_buf_sz,
3399                                                 DMA_FROM_DEVICE);
3400                        }
3401                } else {
3402                        struct sk_buff *skb;
3403                        int frame_len;
3404                        unsigned int des;
3405
3406                        stmmac_get_desc_addr(priv, p, &des);
3407                        frame_len = stmmac_get_rx_frame_len(priv, p, coe);
3408
3409                        /*  If frame length is greater than skb buffer size
3410                         *  (preallocated during init) then the packet is
3411                         *  ignored
3412                         */
3413                        if (frame_len > priv->dma_buf_sz) {
3414                                netdev_err(priv->dev,
3415                                           "len %d larger than size (%d)\n",
3416                                           frame_len, priv->dma_buf_sz);
3417                                priv->dev->stats.rx_length_errors++;
3418                                break;
3419                        }
3420
3421                        /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3422                         * Type frames (LLC/LLC-SNAP)
3423                         *
3424                         * llc_snap is never checked in GMAC >= 4, so this ACS
3425                         * feature is always disabled and packets need to be
3426                         * stripped manually.
3427                         */
3428                        if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3429                            unlikely(status != llc_snap))
3430                                frame_len -= ETH_FCS_LEN;
3431
3432                        if (netif_msg_rx_status(priv)) {
3433                                netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3434                                           p, entry, des);
3435                                netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3436                                           frame_len, status);
3437                        }
3438
3439                        /* The zero-copy is always used for all the sizes
3440                         * in case of GMAC4 because it needs
3441                         * to refill the used descriptors, always.
3442                         */
3443                        if (unlikely(!xmac &&
3444                                     ((frame_len < priv->rx_copybreak) ||
3445                                     stmmac_rx_threshold_count(rx_q)))) {
3446                                skb = netdev_alloc_skb_ip_align(priv->dev,
3447                                                                frame_len);
3448                                if (unlikely(!skb)) {
3449                                        if (net_ratelimit())
3450                                                dev_warn(priv->device,
3451                                                         "packet dropped\n");
3452                                        priv->dev->stats.rx_dropped++;
3453                                        break;
3454                                }
3455
3456                                dma_sync_single_for_cpu(priv->device,
3457                                                        rx_q->rx_skbuff_dma
3458                                                        [entry], frame_len,
3459                                                        DMA_FROM_DEVICE);
3460                                skb_copy_to_linear_data(skb,
3461                                                        rx_q->
3462                                                        rx_skbuff[entry]->data,
3463                                                        frame_len);
3464
3465                                skb_put(skb, frame_len);
3466                                dma_sync_single_for_device(priv->device,
3467                                                           rx_q->rx_skbuff_dma
3468                                                           [entry], frame_len,
3469                                                           DMA_FROM_DEVICE);
3470                        } else {
3471                                skb = rx_q->rx_skbuff[entry];
3472                                if (unlikely(!skb)) {
3473                                        netdev_err(priv->dev,
3474                                                   "%s: Inconsistent Rx chain\n",
3475                                                   priv->dev->name);
3476                                        priv->dev->stats.rx_dropped++;
3477                                        break;
3478                                }
3479                                prefetch(skb->data - NET_IP_ALIGN);
3480                                rx_q->rx_skbuff[entry] = NULL;
3481                                rx_q->rx_zeroc_thresh++;
3482
3483                                skb_put(skb, frame_len);
3484                                dma_unmap_single(priv->device,
3485                                                 rx_q->rx_skbuff_dma[entry],
3486                                                 priv->dma_buf_sz,
3487                                                 DMA_FROM_DEVICE);
3488                        }
3489
3490                        if (netif_msg_pktdata(priv)) {
3491                                netdev_dbg(priv->dev, "frame received (%dbytes)",
3492                                           frame_len);
3493                                print_pkt(skb->data, frame_len);
3494                        }
3495
3496                        stmmac_get_rx_hwtstamp(priv, p, np, skb);
3497
3498                        stmmac_rx_vlan(priv->dev, skb);
3499
3500                        skb->protocol = eth_type_trans(skb, priv->dev);
3501
3502                        if (unlikely(!coe))
3503                                skb_checksum_none_assert(skb);
3504                        else
3505                                skb->ip_summed = CHECKSUM_UNNECESSARY;
3506
3507                        napi_gro_receive(&ch->napi, skb);
3508
3509                        priv->dev->stats.rx_packets++;
3510                        priv->dev->stats.rx_bytes += frame_len;
3511                }
3512                entry = next_entry;
3513        }
3514
3515        stmmac_rx_refill(priv, queue);
3516
3517        priv->xstats.rx_pkt_n += count;
3518
3519        return count;
3520}
3521
3522/**
3523 *  stmmac_poll - stmmac poll method (NAPI)
3524 *  @napi : pointer to the napi structure.
3525 *  @budget : maximum number of packets that the current CPU can receive from
3526 *            all interfaces.
3527 *  Description :
3528 *  To look at the incoming frames and clear the tx resources.
3529 */
3530static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3531{
3532        struct stmmac_channel *ch =
3533                container_of(napi, struct stmmac_channel, napi);
3534        struct stmmac_priv *priv = ch->priv_data;
3535        int work_done = 0, work_rem = budget;
3536        u32 chan = ch->index;
3537
3538        priv->xstats.napi_poll++;
3539
3540        if (ch->has_tx) {
3541                int done = stmmac_tx_clean(priv, work_rem, chan);
3542
3543                work_done += done;
3544                work_rem -= done;
3545        }
3546
3547        if (ch->has_rx) {
3548                int done = stmmac_rx(priv, work_rem, chan);
3549
3550                work_done += done;
3551                work_rem -= done;
3552        }
3553
3554        if (work_done < budget && napi_complete_done(napi, work_done))
3555                stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3556
3557        return work_done;
3558}
3559
3560/**
3561 *  stmmac_tx_timeout
3562 *  @dev : Pointer to net device structure
3563 *  Description: this function is called when a packet transmission fails to
3564 *   complete within a reasonable time. The driver will mark the error in the
3565 *   netdev structure and arrange for the device to be reset to a sane state
3566 *   in order to transmit a new packet.
3567 */
3568static void stmmac_tx_timeout(struct net_device *dev)
3569{
3570        struct stmmac_priv *priv = netdev_priv(dev);
3571
3572        stmmac_global_err(priv);
3573}
3574
3575/**
3576 *  stmmac_set_rx_mode - entry point for multicast addressing
3577 *  @dev : pointer to the device structure
3578 *  Description:
3579 *  This function is a driver entry point which gets called by the kernel
3580 *  whenever multicast addresses must be enabled/disabled.
3581 *  Return value:
3582 *  void.
3583 */
3584static void stmmac_set_rx_mode(struct net_device *dev)
3585{
3586        struct stmmac_priv *priv = netdev_priv(dev);
3587
3588        stmmac_set_filter(priv, priv->hw, dev);
3589}
3590
3591/**
3592 *  stmmac_change_mtu - entry point to change MTU size for the device.
3593 *  @dev : device pointer.
3594 *  @new_mtu : the new MTU size for the device.
3595 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
3596 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
3597 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
3598 *  Return value:
3599 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3600 *  file on failure.
3601 */
3602static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3603{
3604        struct stmmac_priv *priv = netdev_priv(dev);
3605
3606        if (netif_running(dev)) {
3607                netdev_err(priv->dev, "must be stopped to change its MTU\n");
3608                return -EBUSY;
3609        }
3610
3611        dev->mtu = new_mtu;
3612
3613        netdev_update_features(dev);
3614
3615        return 0;
3616}
3617
3618static netdev_features_t stmmac_fix_features(struct net_device *dev,
3619                                             netdev_features_t features)
3620{
3621        struct stmmac_priv *priv = netdev_priv(dev);
3622
3623        if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3624                features &= ~NETIF_F_RXCSUM;
3625
3626        if (!priv->plat->tx_coe)
3627                features &= ~NETIF_F_CSUM_MASK;
3628
3629        /* Some GMAC devices have a bugged Jumbo frame support that
3630         * needs to have the Tx COE disabled for oversized frames
3631         * (due to limited buffer sizes). In this case we disable
3632         * the TX csum insertion in the TDES and not use SF.
3633         */
3634        if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3635                features &= ~NETIF_F_CSUM_MASK;
3636
3637        /* Disable tso if asked by ethtool */
3638        if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3639                if (features & NETIF_F_TSO)
3640                        priv->tso = true;
3641                else
3642                        priv->tso = false;
3643        }
3644
3645        return features;
3646}
3647
3648static int stmmac_set_features(struct net_device *netdev,
3649                               netdev_features_t features)
3650{
3651        struct stmmac_priv *priv = netdev_priv(netdev);
3652
3653        /* Keep the COE Type in case of csum is supporting */
3654        if (features & NETIF_F_RXCSUM)
3655                priv->hw->rx_csum = priv->plat->rx_coe;
3656        else
3657                priv->hw->rx_csum = 0;
3658        /* No check needed because rx_coe has been set before and it will be
3659         * fixed in case of issue.
3660         */
3661        stmmac_rx_ipc(priv, priv->hw);
3662
3663        return 0;
3664}
3665
3666/**
3667 *  stmmac_interrupt - main ISR
3668 *  @irq: interrupt number.
3669 *  @dev_id: to pass the net device pointer.
3670 *  Description: this is the main driver interrupt service routine.
3671 *  It can call:
3672 *  o DMA service routine (to manage incoming frame reception and transmission
3673 *    status)
3674 *  o Core interrupts to manage: remote wake-up, management counter, LPI
3675 *    interrupts.
3676 */
3677static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3678{
3679        struct net_device *dev = (struct net_device *)dev_id;
3680        struct stmmac_priv *priv = netdev_priv(dev);
3681        u32 rx_cnt = priv->plat->rx_queues_to_use;
3682        u32 tx_cnt = priv->plat->tx_queues_to_use;
3683        u32 queues_count;
3684        u32 queue;
3685        bool xmac;
3686
3687        xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3688        queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3689
3690        if (priv->irq_wake)
3691                pm_wakeup_event(priv->device, 0);
3692
3693        if (unlikely(!dev)) {
3694                netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3695                return IRQ_NONE;
3696        }
3697
3698        /* Check if adapter is up */
3699        if (test_bit(STMMAC_DOWN, &priv->state))
3700                return IRQ_HANDLED;
3701        /* Check if a fatal error happened */
3702        if (stmmac_safety_feat_interrupt(priv))
3703                return IRQ_HANDLED;
3704
3705        /* To handle GMAC own interrupts */
3706        if ((priv->plat->has_gmac) || xmac) {
3707                int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
3708                int mtl_status;
3709
3710                if (unlikely(status)) {
3711                        /* For LPI we need to save the tx status */
3712                        if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3713                                priv->tx_path_in_lpi_mode = true;
3714                        if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3715                                priv->tx_path_in_lpi_mode = false;
3716                }
3717
3718                for (queue = 0; queue < queues_count; queue++) {
3719                        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3720
3721                        mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3722                                                                queue);
3723                        if (mtl_status != -EINVAL)
3724                                status |= mtl_status;
3725
3726                        if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3727                                stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3728                                                       rx_q->rx_tail_addr,
3729                                                       queue);
3730                }
3731
3732                /* PCS link status */
3733                if (priv->hw->pcs) {
3734                        if (priv->xstats.pcs_link)
3735                                netif_carrier_on(dev);
3736                        else
3737                                netif_carrier_off(dev);
3738                }
3739        }
3740
3741        /* To handle DMA interrupts */
3742        stmmac_dma_interrupt(priv);
3743
3744        return IRQ_HANDLED;
3745}
3746
3747#ifdef CONFIG_NET_POLL_CONTROLLER
3748/* Polling receive - used by NETCONSOLE and other diagnostic tools
3749 * to allow network I/O with interrupts disabled.
3750 */
3751static void stmmac_poll_controller(struct net_device *dev)
3752{
3753        disable_irq(dev->irq);
3754        stmmac_interrupt(dev->irq, dev);
3755        enable_irq(dev->irq);
3756}
3757#endif
3758
3759/**
3760 *  stmmac_ioctl - Entry point for the Ioctl
3761 *  @dev: Device pointer.
3762 *  @rq: An IOCTL specefic structure, that can contain a pointer to
3763 *  a proprietary structure used to pass information to the driver.
3764 *  @cmd: IOCTL command
3765 *  Description:
3766 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3767 */
3768static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3769{
3770        int ret = -EOPNOTSUPP;
3771
3772        if (!netif_running(dev))
3773                return -EINVAL;
3774
3775        switch (cmd) {
3776        case SIOCGMIIPHY:
3777        case SIOCGMIIREG:
3778        case SIOCSMIIREG:
3779                if (!dev->phydev)
3780                        return -EINVAL;
3781                ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3782                break;
3783        case SIOCSHWTSTAMP:
3784                ret = stmmac_hwtstamp_ioctl(dev, rq);
3785                break;
3786        default:
3787                break;
3788        }
3789
3790        return ret;
3791}
3792
3793static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3794                                    void *cb_priv)
3795{
3796        struct stmmac_priv *priv = cb_priv;
3797        int ret = -EOPNOTSUPP;
3798
3799        stmmac_disable_all_queues(priv);
3800
3801        switch (type) {
3802        case TC_SETUP_CLSU32:
3803                if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3804                        ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3805                break;
3806        default:
3807                break;
3808        }
3809
3810        stmmac_enable_all_queues(priv);
3811        return ret;
3812}
3813
3814static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3815                                 struct tc_block_offload *f)
3816{
3817        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3818                return -EOPNOTSUPP;
3819
3820        switch (f->command) {
3821        case TC_BLOCK_BIND:
3822                return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
3823                                priv, priv, f->extack);
3824        case TC_BLOCK_UNBIND:
3825                tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3826                return 0;
3827        default:
3828                return -EOPNOTSUPP;
3829        }
3830}
3831
3832static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3833                           void *type_data)
3834{
3835        struct stmmac_priv *priv = netdev_priv(ndev);
3836
3837        switch (type) {
3838        case TC_SETUP_BLOCK:
3839                return stmmac_setup_tc_block(priv, type_data);
3840        case TC_SETUP_QDISC_CBS:
3841                return stmmac_tc_setup_cbs(priv, priv, type_data);
3842        default:
3843                return -EOPNOTSUPP;
3844        }
3845}
3846
3847static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3848{
3849        struct stmmac_priv *priv = netdev_priv(ndev);
3850        int ret = 0;
3851
3852        ret = eth_mac_addr(ndev, addr);
3853        if (ret)
3854                return ret;
3855
3856        stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
3857
3858        return ret;
3859}
3860
3861#ifdef CONFIG_DEBUG_FS
3862static struct dentry *stmmac_fs_dir;
3863
3864static void sysfs_display_ring(void *head, int size, int extend_desc,
3865                               struct seq_file *seq)
3866{
3867        int i;
3868        struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3869        struct dma_desc *p = (struct dma_desc *)head;
3870
3871        for (i = 0; i < size; i++) {
3872                if (extend_desc) {
3873                        seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3874                                   i, (unsigned int)virt_to_phys(ep),
3875                                   le32_to_cpu(ep->basic.des0),
3876                                   le32_to_cpu(ep->basic.des1),
3877                                   le32_to_cpu(ep->basic.des2),
3878                                   le32_to_cpu(ep->basic.des3));
3879                        ep++;
3880                } else {
3881                        seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3882                                   i, (unsigned int)virt_to_phys(p),
3883                                   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3884                                   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3885                        p++;
3886                }
3887                seq_printf(seq, "\n");
3888        }
3889}
3890
3891static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3892{
3893        struct net_device *dev = seq->private;
3894        struct stmmac_priv *priv = netdev_priv(dev);
3895        u32 rx_count = priv->plat->rx_queues_to_use;
3896        u32 tx_count = priv->plat->tx_queues_to_use;
3897        u32 queue;
3898
3899        for (queue = 0; queue < rx_count; queue++) {
3900                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3901
3902                seq_printf(seq, "RX Queue %d:\n", queue);
3903
3904                if (priv->extend_desc) {
3905                        seq_printf(seq, "Extended descriptor ring:\n");
3906                        sysfs_display_ring((void *)rx_q->dma_erx,
3907                                           DMA_RX_SIZE, 1, seq);
3908                } else {
3909                        seq_printf(seq, "Descriptor ring:\n");
3910                        sysfs_display_ring((void *)rx_q->dma_rx,
3911                                           DMA_RX_SIZE, 0, seq);
3912                }
3913        }
3914
3915        for (queue = 0; queue < tx_count; queue++) {
3916                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3917
3918                seq_printf(seq, "TX Queue %d:\n", queue);
3919
3920                if (priv->extend_desc) {
3921                        seq_printf(seq, "Extended descriptor ring:\n");
3922                        sysfs_display_ring((void *)tx_q->dma_etx,
3923                                           DMA_TX_SIZE, 1, seq);
3924                } else {
3925                        seq_printf(seq, "Descriptor ring:\n");
3926                        sysfs_display_ring((void *)tx_q->dma_tx,
3927                                           DMA_TX_SIZE, 0, seq);
3928                }
3929        }
3930
3931        return 0;
3932}
3933
3934static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3935{
3936        return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3937}
3938
3939/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3940
3941static const struct file_operations stmmac_rings_status_fops = {
3942        .owner = THIS_MODULE,
3943        .open = stmmac_sysfs_ring_open,
3944        .read = seq_read,
3945        .llseek = seq_lseek,
3946        .release = single_release,
3947};
3948
3949static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3950{
3951        struct net_device *dev = seq->private;
3952        struct stmmac_priv *priv = netdev_priv(dev);
3953
3954        if (!priv->hw_cap_support) {
3955                seq_printf(seq, "DMA HW features not supported\n");
3956                return 0;
3957        }
3958
3959        seq_printf(seq, "==============================\n");
3960        seq_printf(seq, "\tDMA HW features\n");
3961        seq_printf(seq, "==============================\n");
3962
3963        seq_printf(seq, "\t10/100 Mbps: %s\n",
3964                   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3965        seq_printf(seq, "\t1000 Mbps: %s\n",
3966                   (priv->dma_cap.mbps_1000) ? "Y" : "N");
3967        seq_printf(seq, "\tHalf duplex: %s\n",
3968                   (priv->dma_cap.half_duplex) ? "Y" : "N");
3969        seq_printf(seq, "\tHash Filter: %s\n",
3970                   (priv->dma_cap.hash_filter) ? "Y" : "N");
3971        seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3972                   (priv->dma_cap.multi_addr) ? "Y" : "N");
3973        seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3974                   (priv->dma_cap.pcs) ? "Y" : "N");
3975        seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3976                   (priv->dma_cap.sma_mdio) ? "Y" : "N");
3977        seq_printf(seq, "\tPMT Remote wake up: %s\n",
3978                   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3979        seq_printf(seq, "\tPMT Magic Frame: %s\n",
3980                   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3981        seq_printf(seq, "\tRMON module: %s\n",
3982                   (priv->dma_cap.rmon) ? "Y" : "N");
3983        seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3984                   (priv->dma_cap.time_stamp) ? "Y" : "N");
3985        seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3986                   (priv->dma_cap.atime_stamp) ? "Y" : "N");
3987        seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3988                   (priv->dma_cap.eee) ? "Y" : "N");
3989        seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3990        seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3991                   (priv->dma_cap.tx_coe) ? "Y" : "N");
3992        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3993                seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3994                           (priv->dma_cap.rx_coe) ? "Y" : "N");
3995        } else {
3996                seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3997                           (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3998                seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3999                           (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4000        }
4001        seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4002                   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4003        seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4004                   priv->dma_cap.number_rx_channel);
4005        seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4006                   priv->dma_cap.number_tx_channel);
4007        seq_printf(seq, "\tEnhanced descriptors: %s\n",
4008                   (priv->dma_cap.enh_desc) ? "Y" : "N");
4009
4010        return 0;
4011}
4012
4013static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
4014{
4015        return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
4016}
4017
4018static const struct file_operations stmmac_dma_cap_fops = {
4019        .owner = THIS_MODULE,
4020        .open = stmmac_sysfs_dma_cap_open,
4021        .read = seq_read,
4022        .llseek = seq_lseek,
4023        .release = single_release,
4024};
4025
4026static int stmmac_init_fs(struct net_device *dev)
4027{
4028        struct stmmac_priv *priv = netdev_priv(dev);
4029
4030        /* Create per netdev entries */
4031        priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
4032
4033        if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
4034                netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
4035
4036                return -ENOMEM;
4037        }
4038
4039        /* Entry to report DMA RX/TX rings */
4040        priv->dbgfs_rings_status =
4041                debugfs_create_file("descriptors_status", 0444,
4042                                    priv->dbgfs_dir, dev,
4043                                    &stmmac_rings_status_fops);
4044
4045        if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
4046                netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
4047                debugfs_remove_recursive(priv->dbgfs_dir);
4048
4049                return -ENOMEM;
4050        }
4051
4052        /* Entry to report the DMA HW features */
4053        priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4054                                                  priv->dbgfs_dir,
4055                                                  dev, &stmmac_dma_cap_fops);
4056
4057        if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
4058                netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
4059                debugfs_remove_recursive(priv->dbgfs_dir);
4060
4061                return -ENOMEM;
4062        }
4063
4064        return 0;
4065}
4066
4067static void stmmac_exit_fs(struct net_device *dev)
4068{
4069        struct stmmac_priv *priv = netdev_priv(dev);
4070
4071        debugfs_remove_recursive(priv->dbgfs_dir);
4072}
4073#endif /* CONFIG_DEBUG_FS */
4074
4075static const struct net_device_ops stmmac_netdev_ops = {
4076        .ndo_open = stmmac_open,
4077        .ndo_start_xmit = stmmac_xmit,
4078        .ndo_stop = stmmac_release,
4079        .ndo_change_mtu = stmmac_change_mtu,
4080        .ndo_fix_features = stmmac_fix_features,
4081        .ndo_set_features = stmmac_set_features,
4082        .ndo_set_rx_mode = stmmac_set_rx_mode,
4083        .ndo_tx_timeout = stmmac_tx_timeout,
4084        .ndo_do_ioctl = stmmac_ioctl,
4085        .ndo_setup_tc = stmmac_setup_tc,
4086#ifdef CONFIG_NET_POLL_CONTROLLER
4087        .ndo_poll_controller = stmmac_poll_controller,
4088#endif
4089        .ndo_set_mac_address = stmmac_set_mac_address,
4090};
4091
4092static void stmmac_reset_subtask(struct stmmac_priv *priv)
4093{
4094        if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4095                return;
4096        if (test_bit(STMMAC_DOWN, &priv->state))
4097                return;
4098
4099        netdev_err(priv->dev, "Reset adapter.\n");
4100
4101        rtnl_lock();
4102        netif_trans_update(priv->dev);
4103        while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4104                usleep_range(1000, 2000);
4105
4106        set_bit(STMMAC_DOWN, &priv->state);
4107        dev_close(priv->dev);
4108        dev_open(priv->dev);
4109        clear_bit(STMMAC_DOWN, &priv->state);
4110        clear_bit(STMMAC_RESETING, &priv->state);
4111        rtnl_unlock();
4112}
4113
4114static void stmmac_service_task(struct work_struct *work)
4115{
4116        struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4117                        service_task);
4118
4119        stmmac_reset_subtask(priv);
4120        clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4121}
4122
4123/**
4124 *  stmmac_hw_init - Init the MAC device
4125 *  @priv: driver private structure
4126 *  Description: this function is to configure the MAC device according to
4127 *  some platform parameters or the HW capability register. It prepares the
4128 *  driver to use either ring or chain modes and to setup either enhanced or
4129 *  normal descriptors.
4130 */
4131static int stmmac_hw_init(struct stmmac_priv *priv)
4132{
4133        int ret;
4134
4135        /* dwmac-sun8i only work in chain mode */
4136        if (priv->plat->has_sun8i)
4137                chain_mode = 1;
4138        priv->chain_mode = chain_mode;
4139
4140        /* Initialize HW Interface */
4141        ret = stmmac_hwif_init(priv);
4142        if (ret)
4143                return ret;
4144
4145        /* Get the HW capability (new GMAC newer than 3.50a) */
4146        priv->hw_cap_support = stmmac_get_hw_features(priv);
4147        if (priv->hw_cap_support) {
4148                dev_info(priv->device, "DMA HW capability register supported\n");
4149
4150                /* We can override some gmac/dma configuration fields: e.g.
4151                 * enh_desc, tx_coe (e.g. that are passed through the
4152                 * platform) with the values from the HW capability
4153                 * register (if supported).
4154                 */
4155                priv->plat->enh_desc = priv->dma_cap.enh_desc;
4156                priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4157                priv->hw->pmt = priv->plat->pmt;
4158
4159                /* TXCOE doesn't work in thresh DMA mode */
4160                if (priv->plat->force_thresh_dma_mode)
4161                        priv->plat->tx_coe = 0;
4162                else
4163                        priv->plat->tx_coe = priv->dma_cap.tx_coe;
4164
4165                /* In case of GMAC4 rx_coe is from HW cap register. */
4166                priv->plat->rx_coe = priv->dma_cap.rx_coe;
4167
4168                if (priv->dma_cap.rx_coe_type2)
4169                        priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4170                else if (priv->dma_cap.rx_coe_type1)
4171                        priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4172
4173        } else {
4174                dev_info(priv->device, "No HW DMA feature register supported\n");
4175        }
4176
4177        if (priv->plat->rx_coe) {
4178                priv->hw->rx_csum = priv->plat->rx_coe;
4179                dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4180                if (priv->synopsys_id < DWMAC_CORE_4_00)
4181                        dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4182        }
4183        if (priv->plat->tx_coe)
4184                dev_info(priv->device, "TX Checksum insertion supported\n");
4185
4186        if (priv->plat->pmt) {
4187                dev_info(priv->device, "Wake-Up On Lan supported\n");
4188                device_set_wakeup_capable(priv->device, 1);
4189        }
4190
4191        if (priv->dma_cap.tsoen)
4192                dev_info(priv->device, "TSO supported\n");
4193
4194        /* Run HW quirks, if any */
4195        if (priv->hwif_quirks) {
4196                ret = priv->hwif_quirks(priv);
4197                if (ret)
4198                        return ret;
4199        }
4200
4201        return 0;
4202}
4203
4204/**
4205 * stmmac_dvr_probe
4206 * @device: device pointer
4207 * @plat_dat: platform data pointer
4208 * @res: stmmac resource pointer
4209 * Description: this is the main probe function used to
4210 * call the alloc_etherdev, allocate the priv structure.
4211 * Return:
4212 * returns 0 on success, otherwise errno.
4213 */
4214int stmmac_dvr_probe(struct device *device,
4215                     struct plat_stmmacenet_data *plat_dat,
4216                     struct stmmac_resources *res)
4217{
4218        struct net_device *ndev = NULL;
4219        struct stmmac_priv *priv;
4220        u32 queue, maxq;
4221        int ret = 0;
4222
4223        ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4224                                  MTL_MAX_TX_QUEUES,
4225                                  MTL_MAX_RX_QUEUES);
4226        if (!ndev)
4227                return -ENOMEM;
4228
4229        SET_NETDEV_DEV(ndev, device);
4230
4231        priv = netdev_priv(ndev);
4232        priv->device = device;
4233        priv->dev = ndev;
4234
4235        stmmac_set_ethtool_ops(ndev);
4236        priv->pause = pause;
4237        priv->plat = plat_dat;
4238        priv->ioaddr = res->addr;
4239        priv->dev->base_addr = (unsigned long)res->addr;
4240
4241        priv->dev->irq = res->irq;
4242        priv->wol_irq = res->wol_irq;
4243        priv->lpi_irq = res->lpi_irq;
4244
4245        if (res->mac)
4246                memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4247
4248        dev_set_drvdata(device, priv->dev);
4249
4250        /* Verify driver arguments */
4251        stmmac_verify_args();
4252
4253        /* Allocate workqueue */
4254        priv->wq = create_singlethread_workqueue("stmmac_wq");
4255        if (!priv->wq) {
4256                dev_err(priv->device, "failed to create workqueue\n");
4257                goto error_wq;
4258        }
4259
4260        INIT_WORK(&priv->service_task, stmmac_service_task);
4261
4262        /* Override with kernel parameters if supplied XXX CRS XXX
4263         * this needs to have multiple instances
4264         */
4265        if ((phyaddr >= 0) && (phyaddr <= 31))
4266                priv->plat->phy_addr = phyaddr;
4267
4268        if (priv->plat->stmmac_rst) {
4269                ret = reset_control_assert(priv->plat->stmmac_rst);
4270                reset_control_deassert(priv->plat->stmmac_rst);
4271                /* Some reset controllers have only reset callback instead of
4272                 * assert + deassert callbacks pair.
4273                 */
4274                if (ret == -ENOTSUPP)
4275                        reset_control_reset(priv->plat->stmmac_rst);
4276        }
4277
4278        /* Init MAC and get the capabilities */
4279        ret = stmmac_hw_init(priv);
4280        if (ret)
4281                goto error_hw_init;
4282
4283        /* Configure real RX and TX queues */
4284        netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4285        netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4286
4287        ndev->netdev_ops = &stmmac_netdev_ops;
4288
4289        ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4290                            NETIF_F_RXCSUM;
4291
4292        ret = stmmac_tc_init(priv, priv);
4293        if (!ret) {
4294                ndev->hw_features |= NETIF_F_HW_TC;
4295        }
4296
4297        if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4298                ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4299                priv->tso = true;
4300                dev_info(priv->device, "TSO feature enabled\n");
4301        }
4302        ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4303        ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4304#ifdef STMMAC_VLAN_TAG_USED
4305        /* Both mac100 and gmac support receive VLAN tag detection */
4306        ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
4307#endif
4308        priv->msg_enable = netif_msg_init(debug, default_msg_level);
4309
4310        /* MTU range: 46 - hw-specific max */
4311        ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4312        if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4313                ndev->max_mtu = JUMBO_LEN;
4314        else if (priv->plat->has_xgmac)
4315                ndev->max_mtu = XGMAC_JUMBO_LEN;
4316        else
4317                ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4318        /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4319         * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4320         */
4321        if ((priv->plat->maxmtu < ndev->max_mtu) &&
4322            (priv->plat->maxmtu >= ndev->min_mtu))
4323                ndev->max_mtu = priv->plat->maxmtu;
4324        else if (priv->plat->maxmtu < ndev->min_mtu)
4325                dev_warn(priv->device,
4326                         "%s: warning: maxmtu having invalid value (%d)\n",
4327                         __func__, priv->plat->maxmtu);
4328
4329        if (flow_ctrl)
4330                priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
4331
4332        /* Rx Watchdog is available in the COREs newer than the 3.40.
4333         * In some case, for example on bugged HW this feature
4334         * has to be disable and this can be done by passing the
4335         * riwt_off field from the platform.
4336         */
4337        if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4338            (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4339                priv->use_riwt = 1;
4340                dev_info(priv->device,
4341                         "Enable RX Mitigation via HW Watchdog Timer\n");
4342        }
4343
4344        /* Setup channels NAPI */
4345        maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4346
4347        for (queue = 0; queue < maxq; queue++) {
4348                struct stmmac_channel *ch = &priv->channel[queue];
4349
4350                ch->priv_data = priv;
4351                ch->index = queue;
4352
4353                if (queue < priv->plat->rx_queues_to_use)
4354                        ch->has_rx = true;
4355                if (queue < priv->plat->tx_queues_to_use)
4356                        ch->has_tx = true;
4357
4358                netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4359                               NAPI_POLL_WEIGHT);
4360        }
4361
4362        mutex_init(&priv->lock);
4363
4364        /* If a specific clk_csr value is passed from the platform
4365         * this means that the CSR Clock Range selection cannot be
4366         * changed at run-time and it is fixed. Viceversa the driver'll try to
4367         * set the MDC clock dynamically according to the csr actual
4368         * clock input.
4369         */
4370        if (!priv->plat->clk_csr)
4371                stmmac_clk_csr_set(priv);
4372        else
4373                priv->clk_csr = priv->plat->clk_csr;
4374
4375        stmmac_check_pcs_mode(priv);
4376
4377        if (priv->hw->pcs != STMMAC_PCS_RGMII  &&
4378            priv->hw->pcs != STMMAC_PCS_TBI &&
4379            priv->hw->pcs != STMMAC_PCS_RTBI) {
4380                /* MDIO bus Registration */
4381                ret = stmmac_mdio_register(ndev);
4382                if (ret < 0) {
4383                        dev_err(priv->device,
4384                                "%s: MDIO bus (id: %d) registration failed",
4385                                __func__, priv->plat->bus_id);
4386                        goto error_mdio_register;
4387                }
4388        }
4389
4390        ret = register_netdev(ndev);
4391        if (ret) {
4392                dev_err(priv->device, "%s: ERROR %i registering the device\n",
4393                        __func__, ret);
4394                goto error_netdev_register;
4395        }
4396
4397        return ret;
4398
4399error_netdev_register:
4400        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4401            priv->hw->pcs != STMMAC_PCS_TBI &&
4402            priv->hw->pcs != STMMAC_PCS_RTBI)
4403                stmmac_mdio_unregister(ndev);
4404error_mdio_register:
4405        for (queue = 0; queue < maxq; queue++) {
4406                struct stmmac_channel *ch = &priv->channel[queue];
4407
4408                netif_napi_del(&ch->napi);
4409        }
4410error_hw_init:
4411        destroy_workqueue(priv->wq);
4412error_wq:
4413        free_netdev(ndev);
4414
4415        return ret;
4416}
4417EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4418
4419/**
4420 * stmmac_dvr_remove
4421 * @dev: device pointer
4422 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4423 * changes the link status, releases the DMA descriptor rings.
4424 */
4425int stmmac_dvr_remove(struct device *dev)
4426{
4427        struct net_device *ndev = dev_get_drvdata(dev);
4428        struct stmmac_priv *priv = netdev_priv(ndev);
4429
4430        netdev_info(priv->dev, "%s: removing driver", __func__);
4431
4432        stmmac_stop_all_dma(priv);
4433
4434        stmmac_mac_set(priv, priv->ioaddr, false);
4435        netif_carrier_off(ndev);
4436        unregister_netdev(ndev);
4437        if (priv->plat->stmmac_rst)
4438                reset_control_assert(priv->plat->stmmac_rst);
4439        clk_disable_unprepare(priv->plat->pclk);
4440        clk_disable_unprepare(priv->plat->stmmac_clk);
4441        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4442            priv->hw->pcs != STMMAC_PCS_TBI &&
4443            priv->hw->pcs != STMMAC_PCS_RTBI)
4444                stmmac_mdio_unregister(ndev);
4445        destroy_workqueue(priv->wq);
4446        mutex_destroy(&priv->lock);
4447        free_netdev(ndev);
4448
4449        return 0;
4450}
4451EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4452
4453/**
4454 * stmmac_suspend - suspend callback
4455 * @dev: device pointer
4456 * Description: this is the function to suspend the device and it is called
4457 * by the platform driver to stop the network queue, release the resources,
4458 * program the PMT register (for WoL), clean and release driver resources.
4459 */
4460int stmmac_suspend(struct device *dev)
4461{
4462        struct net_device *ndev = dev_get_drvdata(dev);
4463        struct stmmac_priv *priv = netdev_priv(ndev);
4464
4465        if (!ndev || !netif_running(ndev))
4466                return 0;
4467
4468        if (ndev->phydev)
4469                phy_stop(ndev->phydev);
4470
4471        mutex_lock(&priv->lock);
4472
4473        netif_device_detach(ndev);
4474        stmmac_stop_all_queues(priv);
4475
4476        stmmac_disable_all_queues(priv);
4477
4478        /* Stop TX/RX DMA */
4479        stmmac_stop_all_dma(priv);
4480
4481        /* Enable Power down mode by programming the PMT regs */
4482        if (device_may_wakeup(priv->device)) {
4483                stmmac_pmt(priv, priv->hw, priv->wolopts);
4484                priv->irq_wake = 1;
4485        } else {
4486                stmmac_mac_set(priv, priv->ioaddr, false);
4487                pinctrl_pm_select_sleep_state(priv->device);
4488                /* Disable clock in case of PWM is off */
4489                clk_disable(priv->plat->pclk);
4490                clk_disable(priv->plat->stmmac_clk);
4491        }
4492        mutex_unlock(&priv->lock);
4493
4494        priv->oldlink = false;
4495        priv->speed = SPEED_UNKNOWN;
4496        priv->oldduplex = DUPLEX_UNKNOWN;
4497        return 0;
4498}
4499EXPORT_SYMBOL_GPL(stmmac_suspend);
4500
4501/**
4502 * stmmac_reset_queues_param - reset queue parameters
4503 * @dev: device pointer
4504 */
4505static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4506{
4507        u32 rx_cnt = priv->plat->rx_queues_to_use;
4508        u32 tx_cnt = priv->plat->tx_queues_to_use;
4509        u32 queue;
4510
4511        for (queue = 0; queue < rx_cnt; queue++) {
4512                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4513
4514                rx_q->cur_rx = 0;
4515                rx_q->dirty_rx = 0;
4516        }
4517
4518        for (queue = 0; queue < tx_cnt; queue++) {
4519                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4520
4521                tx_q->cur_tx = 0;
4522                tx_q->dirty_tx = 0;
4523                tx_q->mss = 0;
4524        }
4525}
4526
4527/**
4528 * stmmac_resume - resume callback
4529 * @dev: device pointer
4530 * Description: when resume this function is invoked to setup the DMA and CORE
4531 * in a usable state.
4532 */
4533int stmmac_resume(struct device *dev)
4534{
4535        struct net_device *ndev = dev_get_drvdata(dev);
4536        struct stmmac_priv *priv = netdev_priv(ndev);
4537
4538        if (!netif_running(ndev))
4539                return 0;
4540
4541        /* Power Down bit, into the PM register, is cleared
4542         * automatically as soon as a magic packet or a Wake-up frame
4543         * is received. Anyway, it's better to manually clear
4544         * this bit because it can generate problems while resuming
4545         * from another devices (e.g. serial console).
4546         */
4547        if (device_may_wakeup(priv->device)) {
4548                mutex_lock(&priv->lock);
4549                stmmac_pmt(priv, priv->hw, 0);
4550                mutex_unlock(&priv->lock);
4551                priv->irq_wake = 0;
4552        } else {
4553                pinctrl_pm_select_default_state(priv->device);
4554                /* enable the clk previously disabled */
4555                clk_enable(priv->plat->stmmac_clk);
4556                clk_enable(priv->plat->pclk);
4557                /* reset the phy so that it's ready */
4558                if (priv->mii)
4559                        stmmac_mdio_reset(priv->mii);
4560        }
4561
4562        netif_device_attach(ndev);
4563
4564        mutex_lock(&priv->lock);
4565
4566        stmmac_reset_queues_param(priv);
4567
4568        stmmac_clear_descriptors(priv);
4569
4570        stmmac_hw_setup(ndev, false);
4571        stmmac_init_tx_coalesce(priv);
4572        stmmac_set_rx_mode(ndev);
4573
4574        stmmac_enable_all_queues(priv);
4575
4576        stmmac_start_all_queues(priv);
4577
4578        mutex_unlock(&priv->lock);
4579
4580        if (ndev->phydev)
4581                phy_start(ndev->phydev);
4582
4583        return 0;
4584}
4585EXPORT_SYMBOL_GPL(stmmac_resume);
4586
4587#ifndef MODULE
4588static int __init stmmac_cmdline_opt(char *str)
4589{
4590        char *opt;
4591
4592        if (!str || !*str)
4593                return -EINVAL;
4594        while ((opt = strsep(&str, ",")) != NULL) {
4595                if (!strncmp(opt, "debug:", 6)) {
4596                        if (kstrtoint(opt + 6, 0, &debug))
4597                                goto err;
4598                } else if (!strncmp(opt, "phyaddr:", 8)) {
4599                        if (kstrtoint(opt + 8, 0, &phyaddr))
4600                                goto err;
4601                } else if (!strncmp(opt, "buf_sz:", 7)) {
4602                        if (kstrtoint(opt + 7, 0, &buf_sz))
4603                                goto err;
4604                } else if (!strncmp(opt, "tc:", 3)) {
4605                        if (kstrtoint(opt + 3, 0, &tc))
4606                                goto err;
4607                } else if (!strncmp(opt, "watchdog:", 9)) {
4608                        if (kstrtoint(opt + 9, 0, &watchdog))
4609                                goto err;
4610                } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4611                        if (kstrtoint(opt + 10, 0, &flow_ctrl))
4612                                goto err;
4613                } else if (!strncmp(opt, "pause:", 6)) {
4614                        if (kstrtoint(opt + 6, 0, &pause))
4615                                goto err;
4616                } else if (!strncmp(opt, "eee_timer:", 10)) {
4617                        if (kstrtoint(opt + 10, 0, &eee_timer))
4618                                goto err;
4619                } else if (!strncmp(opt, "chain_mode:", 11)) {
4620                        if (kstrtoint(opt + 11, 0, &chain_mode))
4621                                goto err;
4622                }
4623        }
4624        return 0;
4625
4626err:
4627        pr_err("%s: ERROR broken module parameter conversion", __func__);
4628        return -EINVAL;
4629}
4630
4631__setup("stmmaceth=", stmmac_cmdline_opt);
4632#endif /* MODULE */
4633
4634static int __init stmmac_init(void)
4635{
4636#ifdef CONFIG_DEBUG_FS
4637        /* Create debugfs main directory if it doesn't exist yet */
4638        if (!stmmac_fs_dir) {
4639                stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4640
4641                if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4642                        pr_err("ERROR %s, debugfs create directory failed\n",
4643                               STMMAC_RESOURCE_NAME);
4644
4645                        return -ENOMEM;
4646                }
4647        }
4648#endif
4649
4650        return 0;
4651}
4652
4653static void __exit stmmac_exit(void)
4654{
4655#ifdef CONFIG_DEBUG_FS
4656        debugfs_remove_recursive(stmmac_fs_dir);
4657#endif
4658}
4659
4660module_init(stmmac_init)
4661module_exit(stmmac_exit)
4662
4663MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4664MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4665MODULE_LICENSE("GPL");
4666