linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
<<
>>
Prefs
   1/*******************************************************************************
   2  This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
   3  ST Ethernet IPs are built around a Synopsys IP Core.
   4
   5        Copyright(C) 2007-2011 STMicroelectronics Ltd
   6
   7  This program is free software; you can redistribute it and/or modify it
   8  under the terms and conditions of the GNU General Public License,
   9  version 2, as published by the Free Software Foundation.
  10
  11  This program is distributed in the hope it will be useful, but WITHOUT
  12  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14  more details.
  15
  16  The full GNU General Public License is included in this distribution in
  17  the file called "COPYING".
  18
  19  Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  20
  21  Documentation available at:
  22        http://www.stlinux.com
  23  Support available at:
  24        https://bugzilla.stlinux.com/
  25*******************************************************************************/
  26
  27#include <linux/clk.h>
  28#include <linux/kernel.h>
  29#include <linux/interrupt.h>
  30#include <linux/ip.h>
  31#include <linux/tcp.h>
  32#include <linux/skbuff.h>
  33#include <linux/ethtool.h>
  34#include <linux/if_ether.h>
  35#include <linux/crc32.h>
  36#include <linux/mii.h>
  37#include <linux/if.h>
  38#include <linux/if_vlan.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/slab.h>
  41#include <linux/pm_runtime.h>
  42#include <linux/prefetch.h>
  43#include <linux/pinctrl/consumer.h>
  44#ifdef CONFIG_DEBUG_FS
  45#include <linux/debugfs.h>
  46#include <linux/seq_file.h>
  47#endif /* CONFIG_DEBUG_FS */
  48#include <linux/net_tstamp.h>
  49#include <linux/phylink.h>
  50#include <linux/udp.h>
  51#include <net/pkt_cls.h>
  52#include "stmmac_ptp.h"
  53#include "stmmac.h"
  54#include <linux/reset.h>
  55#include <linux/of_mdio.h>
  56#include "dwmac1000.h"
  57#include "dwxgmac2.h"
  58#include "hwif.h"
  59
  60#define STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
  61#define TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
  62
  63/* Module parameters */
  64#define TX_TIMEO        5000
  65static int watchdog = TX_TIMEO;
  66module_param(watchdog, int, 0644);
  67MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
  68
  69static int debug = -1;
  70module_param(debug, int, 0644);
  71MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  72
  73static int phyaddr = -1;
  74module_param(phyaddr, int, 0444);
  75MODULE_PARM_DESC(phyaddr, "Physical device address");
  76
  77#define STMMAC_TX_THRESH(x)     ((x)->dma_tx_size / 4)
  78#define STMMAC_RX_THRESH(x)     ((x)->dma_rx_size / 4)
  79
  80static int flow_ctrl = FLOW_AUTO;
  81module_param(flow_ctrl, int, 0644);
  82MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
  83
  84static int pause = PAUSE_TIME;
  85module_param(pause, int, 0644);
  86MODULE_PARM_DESC(pause, "Flow Control Pause Time");
  87
  88#define TC_DEFAULT 64
  89static int tc = TC_DEFAULT;
  90module_param(tc, int, 0644);
  91MODULE_PARM_DESC(tc, "DMA threshold control value");
  92
  93#define DEFAULT_BUFSIZE 1536
  94static int buf_sz = DEFAULT_BUFSIZE;
  95module_param(buf_sz, int, 0644);
  96MODULE_PARM_DESC(buf_sz, "DMA buffer size");
  97
  98#define STMMAC_RX_COPYBREAK     256
  99
 100static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
 101                                      NETIF_MSG_LINK | NETIF_MSG_IFUP |
 102                                      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
 103
 104#define STMMAC_DEFAULT_LPI_TIMER        1000
 105static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 106module_param(eee_timer, int, 0644);
 107MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
 108#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
 109
 110/* By default the driver will use the ring mode to manage tx and rx descriptors,
 111 * but allow user to force to use the chain instead of the ring
 112 */
 113static unsigned int chain_mode;
 114module_param(chain_mode, int, 0444);
 115MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
 116
 117static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 118/* For MSI interrupts handling */
 119static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
 120static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
 121static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
 122static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
 123
 124#ifdef CONFIG_DEBUG_FS
 125static const struct net_device_ops stmmac_netdev_ops;
 126static void stmmac_init_fs(struct net_device *dev);
 127static void stmmac_exit_fs(struct net_device *dev);
 128#endif
 129
 130#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
 131
 132int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
 133{
 134        int ret = 0;
 135
 136        if (enabled) {
 137                ret = clk_prepare_enable(priv->plat->stmmac_clk);
 138                if (ret)
 139                        return ret;
 140                ret = clk_prepare_enable(priv->plat->pclk);
 141                if (ret) {
 142                        clk_disable_unprepare(priv->plat->stmmac_clk);
 143                        return ret;
 144                }
 145                if (priv->plat->clks_config) {
 146                        ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
 147                        if (ret) {
 148                                clk_disable_unprepare(priv->plat->stmmac_clk);
 149                                clk_disable_unprepare(priv->plat->pclk);
 150                                return ret;
 151                        }
 152                }
 153        } else {
 154                clk_disable_unprepare(priv->plat->stmmac_clk);
 155                clk_disable_unprepare(priv->plat->pclk);
 156                if (priv->plat->clks_config)
 157                        priv->plat->clks_config(priv->plat->bsp_priv, enabled);
 158        }
 159
 160        return ret;
 161}
 162EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
 163
 164/**
 165 * stmmac_verify_args - verify the driver parameters.
 166 * Description: it checks the driver parameters and set a default in case of
 167 * errors.
 168 */
 169static void stmmac_verify_args(void)
 170{
 171        if (unlikely(watchdog < 0))
 172                watchdog = TX_TIMEO;
 173        if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
 174                buf_sz = DEFAULT_BUFSIZE;
 175        if (unlikely(flow_ctrl > 1))
 176                flow_ctrl = FLOW_AUTO;
 177        else if (likely(flow_ctrl < 0))
 178                flow_ctrl = FLOW_OFF;
 179        if (unlikely((pause < 0) || (pause > 0xffff)))
 180                pause = PAUSE_TIME;
 181        if (eee_timer < 0)
 182                eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 183}
 184
 185/**
 186 * stmmac_disable_all_queues - Disable all queues
 187 * @priv: driver private structure
 188 */
 189static void stmmac_disable_all_queues(struct stmmac_priv *priv)
 190{
 191        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 192        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 193        u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
 194        u32 queue;
 195
 196        for (queue = 0; queue < maxq; queue++) {
 197                struct stmmac_channel *ch = &priv->channel[queue];
 198
 199                if (queue < rx_queues_cnt)
 200                        napi_disable(&ch->rx_napi);
 201                if (queue < tx_queues_cnt)
 202                        napi_disable(&ch->tx_napi);
 203        }
 204}
 205
 206/**
 207 * stmmac_enable_all_queues - Enable all queues
 208 * @priv: driver private structure
 209 */
 210static void stmmac_enable_all_queues(struct stmmac_priv *priv)
 211{
 212        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
 213        u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
 214        u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
 215        u32 queue;
 216
 217        for (queue = 0; queue < maxq; queue++) {
 218                struct stmmac_channel *ch = &priv->channel[queue];
 219
 220                if (queue < rx_queues_cnt)
 221                        napi_enable(&ch->rx_napi);
 222                if (queue < tx_queues_cnt)
 223                        napi_enable(&ch->tx_napi);
 224        }
 225}
 226
 227static void stmmac_service_event_schedule(struct stmmac_priv *priv)
 228{
 229        if (!test_bit(STMMAC_DOWN, &priv->state) &&
 230            !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
 231                queue_work(priv->wq, &priv->service_task);
 232}
 233
 234static void stmmac_global_err(struct stmmac_priv *priv)
 235{
 236        netif_carrier_off(priv->dev);
 237        set_bit(STMMAC_RESET_REQUESTED, &priv->state);
 238        stmmac_service_event_schedule(priv);
 239}
 240
 241/**
 242 * stmmac_clk_csr_set - dynamically set the MDC clock
 243 * @priv: driver private structure
 244 * Description: this is to dynamically set the MDC clock according to the csr
 245 * clock input.
 246 * Note:
 247 *      If a specific clk_csr value is passed from the platform
 248 *      this means that the CSR Clock Range selection cannot be
 249 *      changed at run-time and it is fixed (as reported in the driver
 250 *      documentation). Viceversa the driver will try to set the MDC
 251 *      clock dynamically according to the actual clock input.
 252 */
 253static void stmmac_clk_csr_set(struct stmmac_priv *priv)
 254{
 255        u32 clk_rate;
 256
 257        clk_rate = clk_get_rate(priv->plat->stmmac_clk);
 258
 259        /* Platform provided default clk_csr would be assumed valid
 260         * for all other cases except for the below mentioned ones.
 261         * For values higher than the IEEE 802.3 specified frequency
 262         * we can not estimate the proper divider as it is not known
 263         * the frequency of clk_csr_i. So we do not change the default
 264         * divider.
 265         */
 266        if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
 267                if (clk_rate < CSR_F_35M)
 268                        priv->clk_csr = STMMAC_CSR_20_35M;
 269                else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
 270                        priv->clk_csr = STMMAC_CSR_35_60M;
 271                else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
 272                        priv->clk_csr = STMMAC_CSR_60_100M;
 273                else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
 274                        priv->clk_csr = STMMAC_CSR_100_150M;
 275                else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
 276                        priv->clk_csr = STMMAC_CSR_150_250M;
 277                else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
 278                        priv->clk_csr = STMMAC_CSR_250_300M;
 279        }
 280
 281        if (priv->plat->has_sun8i) {
 282                if (clk_rate > 160000000)
 283                        priv->clk_csr = 0x03;
 284                else if (clk_rate > 80000000)
 285                        priv->clk_csr = 0x02;
 286                else if (clk_rate > 40000000)
 287                        priv->clk_csr = 0x01;
 288                else
 289                        priv->clk_csr = 0;
 290        }
 291
 292        if (priv->plat->has_xgmac) {
 293                if (clk_rate > 400000000)
 294                        priv->clk_csr = 0x5;
 295                else if (clk_rate > 350000000)
 296                        priv->clk_csr = 0x4;
 297                else if (clk_rate > 300000000)
 298                        priv->clk_csr = 0x3;
 299                else if (clk_rate > 250000000)
 300                        priv->clk_csr = 0x2;
 301                else if (clk_rate > 150000000)
 302                        priv->clk_csr = 0x1;
 303                else
 304                        priv->clk_csr = 0x0;
 305        }
 306}
 307
 308static void print_pkt(unsigned char *buf, int len)
 309{
 310        pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
 311        print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
 312}
 313
 314static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
 315{
 316        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
 317        u32 avail;
 318
 319        if (tx_q->dirty_tx > tx_q->cur_tx)
 320                avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
 321        else
 322                avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
 323
 324        return avail;
 325}
 326
 327/**
 328 * stmmac_rx_dirty - Get RX queue dirty
 329 * @priv: driver private structure
 330 * @queue: RX queue index
 331 */
 332static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
 333{
 334        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 335        u32 dirty;
 336
 337        if (rx_q->dirty_rx <= rx_q->cur_rx)
 338                dirty = rx_q->cur_rx - rx_q->dirty_rx;
 339        else
 340                dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
 341
 342        return dirty;
 343}
 344
 345static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
 346{
 347        int tx_lpi_timer;
 348
 349        /* Clear/set the SW EEE timer flag based on LPI ET enablement */
 350        priv->eee_sw_timer_en = en ? 0 : 1;
 351        tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
 352        stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
 353}
 354
 355/**
 356 * stmmac_enable_eee_mode - check and enter in LPI mode
 357 * @priv: driver private structure
 358 * Description: this function is to verify and enter in LPI mode in case of
 359 * EEE.
 360 */
 361static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
 362{
 363        u32 tx_cnt = priv->plat->tx_queues_to_use;
 364        u32 queue;
 365
 366        /* check if all TX queues have the work finished */
 367        for (queue = 0; queue < tx_cnt; queue++) {
 368                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
 369
 370                if (tx_q->dirty_tx != tx_q->cur_tx)
 371                        return; /* still unfinished work */
 372        }
 373
 374        /* Check and enter in LPI mode */
 375        if (!priv->tx_path_in_lpi_mode)
 376                stmmac_set_eee_mode(priv, priv->hw,
 377                                priv->plat->en_tx_lpi_clockgating);
 378}
 379
 380/**
 381 * stmmac_disable_eee_mode - disable and exit from LPI mode
 382 * @priv: driver private structure
 383 * Description: this function is to exit and disable EEE in case of
 384 * LPI state is true. This is called by the xmit.
 385 */
 386void stmmac_disable_eee_mode(struct stmmac_priv *priv)
 387{
 388        if (!priv->eee_sw_timer_en) {
 389                stmmac_lpi_entry_timer_config(priv, 0);
 390                return;
 391        }
 392
 393        stmmac_reset_eee_mode(priv, priv->hw);
 394        del_timer_sync(&priv->eee_ctrl_timer);
 395        priv->tx_path_in_lpi_mode = false;
 396}
 397
 398/**
 399 * stmmac_eee_ctrl_timer - EEE TX SW timer.
 400 * @arg : data hook
 401 * Description:
 402 *  if there is no data transfer and if we are not in LPI state,
 403 *  then MAC Transmitter can be moved to LPI state.
 404 */
 405static void stmmac_eee_ctrl_timer(struct timer_list *t)
 406{
 407        struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
 408
 409        stmmac_enable_eee_mode(priv);
 410        mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
 411}
 412
 413/**
 414 * stmmac_eee_init - init EEE
 415 * @priv: driver private structure
 416 * Description:
 417 *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
 418 *  can also manage EEE, this function enable the LPI state and start related
 419 *  timer.
 420 */
 421bool stmmac_eee_init(struct stmmac_priv *priv)
 422{
 423        int eee_tw_timer = priv->eee_tw_timer;
 424
 425        /* Using PCS we cannot dial with the phy registers at this stage
 426         * so we do not support extra feature like EEE.
 427         */
 428        if (priv->hw->pcs == STMMAC_PCS_TBI ||
 429            priv->hw->pcs == STMMAC_PCS_RTBI)
 430                return false;
 431
 432        /* Check if MAC core supports the EEE feature. */
 433        if (!priv->dma_cap.eee)
 434                return false;
 435
 436        mutex_lock(&priv->lock);
 437
 438        /* Check if it needs to be deactivated */
 439        if (!priv->eee_active) {
 440                if (priv->eee_enabled) {
 441                        netdev_dbg(priv->dev, "disable EEE\n");
 442                        stmmac_lpi_entry_timer_config(priv, 0);
 443                        del_timer_sync(&priv->eee_ctrl_timer);
 444                        stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
 445                }
 446                mutex_unlock(&priv->lock);
 447                return false;
 448        }
 449
 450        if (priv->eee_active && !priv->eee_enabled) {
 451                timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
 452                stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
 453                                     eee_tw_timer);
 454        }
 455
 456        if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
 457                del_timer_sync(&priv->eee_ctrl_timer);
 458                priv->tx_path_in_lpi_mode = false;
 459                stmmac_lpi_entry_timer_config(priv, 1);
 460        } else {
 461                stmmac_lpi_entry_timer_config(priv, 0);
 462                mod_timer(&priv->eee_ctrl_timer,
 463                          STMMAC_LPI_T(priv->tx_lpi_timer));
 464        }
 465
 466        mutex_unlock(&priv->lock);
 467        netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
 468        return true;
 469}
 470
 471/* stmmac_get_tx_hwtstamp - get HW TX timestamps
 472 * @priv: driver private structure
 473 * @p : descriptor pointer
 474 * @skb : the socket buffer
 475 * Description :
 476 * This function will read timestamp from the descriptor & pass it to stack.
 477 * and also perform some sanity checks.
 478 */
 479static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
 480                                   struct dma_desc *p, struct sk_buff *skb)
 481{
 482        struct skb_shared_hwtstamps shhwtstamp;
 483        bool found = false;
 484        s64 adjust = 0;
 485        u64 ns = 0;
 486
 487        if (!priv->hwts_tx_en)
 488                return;
 489
 490        /* exit if skb doesn't support hw tstamp */
 491        if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
 492                return;
 493
 494        /* check tx tstamp status */
 495        if (stmmac_get_tx_timestamp_status(priv, p)) {
 496                stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
 497                found = true;
 498        } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
 499                found = true;
 500        }
 501
 502        if (found) {
 503                /* Correct the clk domain crossing(CDC) error */
 504                if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
 505                        adjust += -(2 * (NSEC_PER_SEC /
 506                                         priv->plat->clk_ptp_rate));
 507                        ns += adjust;
 508                }
 509
 510                memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 511                shhwtstamp.hwtstamp = ns_to_ktime(ns);
 512
 513                netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
 514                /* pass tstamp to stack */
 515                skb_tstamp_tx(skb, &shhwtstamp);
 516        }
 517}
 518
 519/* stmmac_get_rx_hwtstamp - get HW RX timestamps
 520 * @priv: driver private structure
 521 * @p : descriptor pointer
 522 * @np : next descriptor pointer
 523 * @skb : the socket buffer
 524 * Description :
 525 * This function will read received packet's timestamp from the descriptor
 526 * and pass it to stack. It also perform some sanity checks.
 527 */
 528static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
 529                                   struct dma_desc *np, struct sk_buff *skb)
 530{
 531        struct skb_shared_hwtstamps *shhwtstamp = NULL;
 532        struct dma_desc *desc = p;
 533        u64 adjust = 0;
 534        u64 ns = 0;
 535
 536        if (!priv->hwts_rx_en)
 537                return;
 538        /* For GMAC4, the valid timestamp is from CTX next desc. */
 539        if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
 540                desc = np;
 541
 542        /* Check if timestamp is available */
 543        if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
 544                stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
 545
 546                /* Correct the clk domain crossing(CDC) error */
 547                if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
 548                        adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate);
 549                        ns -= adjust;
 550                }
 551
 552                netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
 553                shhwtstamp = skb_hwtstamps(skb);
 554                memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
 555                shhwtstamp->hwtstamp = ns_to_ktime(ns);
 556        } else  {
 557                netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
 558        }
 559}
 560
 561/**
 562 *  stmmac_hwtstamp_set - control hardware timestamping.
 563 *  @dev: device pointer.
 564 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 565 *  a proprietary structure used to pass information to the driver.
 566 *  Description:
 567 *  This function configures the MAC to enable/disable both outgoing(TX)
 568 *  and incoming(RX) packets time stamping based on user input.
 569 *  Return Value:
 570 *  0 on success and an appropriate -ve integer on failure.
 571 */
 572static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
 573{
 574        struct stmmac_priv *priv = netdev_priv(dev);
 575        struct hwtstamp_config config;
 576        struct timespec64 now;
 577        u64 temp = 0;
 578        u32 ptp_v2 = 0;
 579        u32 tstamp_all = 0;
 580        u32 ptp_over_ipv4_udp = 0;
 581        u32 ptp_over_ipv6_udp = 0;
 582        u32 ptp_over_ethernet = 0;
 583        u32 snap_type_sel = 0;
 584        u32 ts_master_en = 0;
 585        u32 ts_event_en = 0;
 586        u32 sec_inc = 0;
 587        u32 value = 0;
 588        bool xmac;
 589
 590        xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
 591
 592        if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
 593                netdev_alert(priv->dev, "No support for HW time stamping\n");
 594                priv->hwts_tx_en = 0;
 595                priv->hwts_rx_en = 0;
 596
 597                return -EOPNOTSUPP;
 598        }
 599
 600        if (copy_from_user(&config, ifr->ifr_data,
 601                           sizeof(config)))
 602                return -EFAULT;
 603
 604        netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
 605                   __func__, config.flags, config.tx_type, config.rx_filter);
 606
 607        /* reserved for future extensions */
 608        if (config.flags)
 609                return -EINVAL;
 610
 611        if (config.tx_type != HWTSTAMP_TX_OFF &&
 612            config.tx_type != HWTSTAMP_TX_ON)
 613                return -ERANGE;
 614
 615        if (priv->adv_ts) {
 616                switch (config.rx_filter) {
 617                case HWTSTAMP_FILTER_NONE:
 618                        /* time stamp no incoming packet at all */
 619                        config.rx_filter = HWTSTAMP_FILTER_NONE;
 620                        break;
 621
 622                case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 623                        /* PTP v1, UDP, any kind of event packet */
 624                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 625                        /* 'xmac' hardware can support Sync, Pdelay_Req and
 626                         * Pdelay_resp by setting bit14 and bits17/16 to 01
 627                         * This leaves Delay_Req timestamps out.
 628                         * Enable all events *and* general purpose message
 629                         * timestamping
 630                         */
 631                        snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 632                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 633                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 634                        break;
 635
 636                case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 637                        /* PTP v1, UDP, Sync packet */
 638                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
 639                        /* take time stamp for SYNC messages only */
 640                        ts_event_en = PTP_TCR_TSEVNTENA;
 641
 642                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 643                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 644                        break;
 645
 646                case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 647                        /* PTP v1, UDP, Delay_req packet */
 648                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
 649                        /* take time stamp for Delay_Req messages only */
 650                        ts_master_en = PTP_TCR_TSMSTRENA;
 651                        ts_event_en = PTP_TCR_TSEVNTENA;
 652
 653                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 654                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 655                        break;
 656
 657                case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 658                        /* PTP v2, UDP, any kind of event packet */
 659                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
 660                        ptp_v2 = PTP_TCR_TSVER2ENA;
 661                        /* take time stamp for all event messages */
 662                        snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 663
 664                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 665                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 666                        break;
 667
 668                case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 669                        /* PTP v2, UDP, Sync packet */
 670                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
 671                        ptp_v2 = PTP_TCR_TSVER2ENA;
 672                        /* take time stamp for SYNC messages only */
 673                        ts_event_en = PTP_TCR_TSEVNTENA;
 674
 675                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 676                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 677                        break;
 678
 679                case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 680                        /* PTP v2, UDP, Delay_req packet */
 681                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
 682                        ptp_v2 = PTP_TCR_TSVER2ENA;
 683                        /* take time stamp for Delay_Req messages only */
 684                        ts_master_en = PTP_TCR_TSMSTRENA;
 685                        ts_event_en = PTP_TCR_TSEVNTENA;
 686
 687                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 688                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 689                        break;
 690
 691                case HWTSTAMP_FILTER_PTP_V2_EVENT:
 692                        /* PTP v2/802.AS1 any layer, any kind of event packet */
 693                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
 694                        ptp_v2 = PTP_TCR_TSVER2ENA;
 695                        snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
 696                        if (priv->synopsys_id != DWMAC_CORE_5_10)
 697                                ts_event_en = PTP_TCR_TSEVNTENA;
 698                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 699                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 700                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 701                        break;
 702
 703                case HWTSTAMP_FILTER_PTP_V2_SYNC:
 704                        /* PTP v2/802.AS1, any layer, Sync packet */
 705                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
 706                        ptp_v2 = PTP_TCR_TSVER2ENA;
 707                        /* take time stamp for SYNC messages only */
 708                        ts_event_en = PTP_TCR_TSEVNTENA;
 709
 710                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 711                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 712                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 713                        break;
 714
 715                case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 716                        /* PTP v2/802.AS1, any layer, Delay_req packet */
 717                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
 718                        ptp_v2 = PTP_TCR_TSVER2ENA;
 719                        /* take time stamp for Delay_Req messages only */
 720                        ts_master_en = PTP_TCR_TSMSTRENA;
 721                        ts_event_en = PTP_TCR_TSEVNTENA;
 722
 723                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
 724                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
 725                        ptp_over_ethernet = PTP_TCR_TSIPENA;
 726                        break;
 727
 728                case HWTSTAMP_FILTER_NTP_ALL:
 729                case HWTSTAMP_FILTER_ALL:
 730                        /* time stamp any incoming packet */
 731                        config.rx_filter = HWTSTAMP_FILTER_ALL;
 732                        tstamp_all = PTP_TCR_TSENALL;
 733                        break;
 734
 735                default:
 736                        return -ERANGE;
 737                }
 738        } else {
 739                switch (config.rx_filter) {
 740                case HWTSTAMP_FILTER_NONE:
 741                        config.rx_filter = HWTSTAMP_FILTER_NONE;
 742                        break;
 743                default:
 744                        /* PTP v1, UDP, any kind of event packet */
 745                        config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
 746                        break;
 747                }
 748        }
 749        priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
 750        priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
 751
 752        if (!priv->hwts_tx_en && !priv->hwts_rx_en)
 753                stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
 754        else {
 755                value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
 756                         tstamp_all | ptp_v2 | ptp_over_ethernet |
 757                         ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
 758                         ts_master_en | snap_type_sel);
 759                stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
 760
 761                /* program Sub Second Increment reg */
 762                stmmac_config_sub_second_increment(priv,
 763                                priv->ptpaddr, priv->plat->clk_ptp_rate,
 764                                xmac, &sec_inc);
 765                temp = div_u64(1000000000ULL, sec_inc);
 766
 767                /* Store sub second increment and flags for later use */
 768                priv->sub_second_inc = sec_inc;
 769                priv->systime_flags = value;
 770
 771                /* calculate default added value:
 772                 * formula is :
 773                 * addend = (2^32)/freq_div_ratio;
 774                 * where, freq_div_ratio = 1e9ns/sec_inc
 775                 */
 776                temp = (u64)(temp << 32);
 777                priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
 778                stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
 779
 780                /* initialize system time */
 781                ktime_get_real_ts64(&now);
 782
 783                /* lower 32 bits of tv_sec are safe until y2106 */
 784                stmmac_init_systime(priv, priv->ptpaddr,
 785                                (u32)now.tv_sec, now.tv_nsec);
 786        }
 787
 788        memcpy(&priv->tstamp_config, &config, sizeof(config));
 789
 790        return copy_to_user(ifr->ifr_data, &config,
 791                            sizeof(config)) ? -EFAULT : 0;
 792}
 793
 794/**
 795 *  stmmac_hwtstamp_get - read hardware timestamping.
 796 *  @dev: device pointer.
 797 *  @ifr: An IOCTL specific structure, that can contain a pointer to
 798 *  a proprietary structure used to pass information to the driver.
 799 *  Description:
 800 *  This function obtain the current hardware timestamping settings
 801    as requested.
 802 */
 803static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
 804{
 805        struct stmmac_priv *priv = netdev_priv(dev);
 806        struct hwtstamp_config *config = &priv->tstamp_config;
 807
 808        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
 809                return -EOPNOTSUPP;
 810
 811        return copy_to_user(ifr->ifr_data, config,
 812                            sizeof(*config)) ? -EFAULT : 0;
 813}
 814
 815/**
 816 * stmmac_init_ptp - init PTP
 817 * @priv: driver private structure
 818 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
 819 * This is done by looking at the HW cap. register.
 820 * This function also registers the ptp driver.
 821 */
 822static int stmmac_init_ptp(struct stmmac_priv *priv)
 823{
 824        bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
 825
 826        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
 827                return -EOPNOTSUPP;
 828
 829        priv->adv_ts = 0;
 830        /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
 831        if (xmac && priv->dma_cap.atime_stamp)
 832                priv->adv_ts = 1;
 833        /* Dwmac 3.x core with extend_desc can support adv_ts */
 834        else if (priv->extend_desc && priv->dma_cap.atime_stamp)
 835                priv->adv_ts = 1;
 836
 837        if (priv->dma_cap.time_stamp)
 838                netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
 839
 840        if (priv->adv_ts)
 841                netdev_info(priv->dev,
 842                            "IEEE 1588-2008 Advanced Timestamp supported\n");
 843
 844        priv->hwts_tx_en = 0;
 845        priv->hwts_rx_en = 0;
 846
 847        stmmac_ptp_register(priv);
 848
 849        return 0;
 850}
 851
 852static void stmmac_release_ptp(struct stmmac_priv *priv)
 853{
 854        clk_disable_unprepare(priv->plat->clk_ptp_ref);
 855        stmmac_ptp_unregister(priv);
 856}
 857
 858/**
 859 *  stmmac_mac_flow_ctrl - Configure flow control in all queues
 860 *  @priv: driver private structure
 861 *  Description: It is used for configuring the flow control in all queues
 862 */
 863static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
 864{
 865        u32 tx_cnt = priv->plat->tx_queues_to_use;
 866
 867        stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
 868                        priv->pause, tx_cnt);
 869}
 870
 871static void stmmac_validate(struct phylink_config *config,
 872                            unsigned long *supported,
 873                            struct phylink_link_state *state)
 874{
 875        struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
 876        __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
 877        __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 878        int tx_cnt = priv->plat->tx_queues_to_use;
 879        int max_speed = priv->plat->max_speed;
 880
 881        phylink_set(mac_supported, 10baseT_Half);
 882        phylink_set(mac_supported, 10baseT_Full);
 883        phylink_set(mac_supported, 100baseT_Half);
 884        phylink_set(mac_supported, 100baseT_Full);
 885        phylink_set(mac_supported, 1000baseT_Half);
 886        phylink_set(mac_supported, 1000baseT_Full);
 887        phylink_set(mac_supported, 1000baseKX_Full);
 888
 889        phylink_set(mac_supported, Autoneg);
 890        phylink_set(mac_supported, Pause);
 891        phylink_set(mac_supported, Asym_Pause);
 892        phylink_set_port_modes(mac_supported);
 893
 894        /* Cut down 1G if asked to */
 895        if ((max_speed > 0) && (max_speed < 1000)) {
 896                phylink_set(mask, 1000baseT_Full);
 897                phylink_set(mask, 1000baseX_Full);
 898        } else if (priv->plat->has_xgmac) {
 899                if (!max_speed || (max_speed >= 2500)) {
 900                        phylink_set(mac_supported, 2500baseT_Full);
 901                        phylink_set(mac_supported, 2500baseX_Full);
 902                }
 903                if (!max_speed || (max_speed >= 5000)) {
 904                        phylink_set(mac_supported, 5000baseT_Full);
 905                }
 906                if (!max_speed || (max_speed >= 10000)) {
 907                        phylink_set(mac_supported, 10000baseSR_Full);
 908                        phylink_set(mac_supported, 10000baseLR_Full);
 909                        phylink_set(mac_supported, 10000baseER_Full);
 910                        phylink_set(mac_supported, 10000baseLRM_Full);
 911                        phylink_set(mac_supported, 10000baseT_Full);
 912                        phylink_set(mac_supported, 10000baseKX4_Full);
 913                        phylink_set(mac_supported, 10000baseKR_Full);
 914                }
 915                if (!max_speed || (max_speed >= 25000)) {
 916                        phylink_set(mac_supported, 25000baseCR_Full);
 917                        phylink_set(mac_supported, 25000baseKR_Full);
 918                        phylink_set(mac_supported, 25000baseSR_Full);
 919                }
 920                if (!max_speed || (max_speed >= 40000)) {
 921                        phylink_set(mac_supported, 40000baseKR4_Full);
 922                        phylink_set(mac_supported, 40000baseCR4_Full);
 923                        phylink_set(mac_supported, 40000baseSR4_Full);
 924                        phylink_set(mac_supported, 40000baseLR4_Full);
 925                }
 926                if (!max_speed || (max_speed >= 50000)) {
 927                        phylink_set(mac_supported, 50000baseCR2_Full);
 928                        phylink_set(mac_supported, 50000baseKR2_Full);
 929                        phylink_set(mac_supported, 50000baseSR2_Full);
 930                        phylink_set(mac_supported, 50000baseKR_Full);
 931                        phylink_set(mac_supported, 50000baseSR_Full);
 932                        phylink_set(mac_supported, 50000baseCR_Full);
 933                        phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
 934                        phylink_set(mac_supported, 50000baseDR_Full);
 935                }
 936                if (!max_speed || (max_speed >= 100000)) {
 937                        phylink_set(mac_supported, 100000baseKR4_Full);
 938                        phylink_set(mac_supported, 100000baseSR4_Full);
 939                        phylink_set(mac_supported, 100000baseCR4_Full);
 940                        phylink_set(mac_supported, 100000baseLR4_ER4_Full);
 941                        phylink_set(mac_supported, 100000baseKR2_Full);
 942                        phylink_set(mac_supported, 100000baseSR2_Full);
 943                        phylink_set(mac_supported, 100000baseCR2_Full);
 944                        phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
 945                        phylink_set(mac_supported, 100000baseDR2_Full);
 946                }
 947        }
 948
 949        /* Half-Duplex can only work with single queue */
 950        if (tx_cnt > 1) {
 951                phylink_set(mask, 10baseT_Half);
 952                phylink_set(mask, 100baseT_Half);
 953                phylink_set(mask, 1000baseT_Half);
 954        }
 955
 956        linkmode_and(supported, supported, mac_supported);
 957        linkmode_andnot(supported, supported, mask);
 958
 959        linkmode_and(state->advertising, state->advertising, mac_supported);
 960        linkmode_andnot(state->advertising, state->advertising, mask);
 961
 962        /* If PCS is supported, check which modes it supports. */
 963        stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
 964}
 965
 966static void stmmac_mac_pcs_get_state(struct phylink_config *config,
 967                                     struct phylink_link_state *state)
 968{
 969        struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
 970
 971        state->link = 0;
 972        stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
 973}
 974
 975static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
 976                              const struct phylink_link_state *state)
 977{
 978        struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
 979
 980        stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
 981}
 982
 983static void stmmac_mac_an_restart(struct phylink_config *config)
 984{
 985        /* Not Supported */
 986}
 987
 988static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
 989{
 990        struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
 991        enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
 992        enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
 993        bool *hs_enable = &fpe_cfg->hs_enable;
 994
 995        if (is_up && *hs_enable) {
 996                stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
 997        } else {
 998                *lo_state = FPE_EVENT_UNKNOWN;
 999                *lp_state = FPE_EVENT_UNKNOWN;
1000        }
1001}
1002
1003static void stmmac_mac_link_down(struct phylink_config *config,
1004                                 unsigned int mode, phy_interface_t interface)
1005{
1006        struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1007
1008        stmmac_mac_set(priv, priv->ioaddr, false);
1009        priv->eee_active = false;
1010        priv->tx_lpi_enabled = false;
1011        stmmac_eee_init(priv);
1012        stmmac_set_eee_pls(priv, priv->hw, false);
1013
1014        if (priv->dma_cap.fpesel)
1015                stmmac_fpe_link_state_handle(priv, false);
1016}
1017
1018static void stmmac_mac_link_up(struct phylink_config *config,
1019                               struct phy_device *phy,
1020                               unsigned int mode, phy_interface_t interface,
1021                               int speed, int duplex,
1022                               bool tx_pause, bool rx_pause)
1023{
1024        struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1025        u32 ctrl;
1026
1027        stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
1028
1029        ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1030        ctrl &= ~priv->hw->link.speed_mask;
1031
1032        if (interface == PHY_INTERFACE_MODE_USXGMII) {
1033                switch (speed) {
1034                case SPEED_10000:
1035                        ctrl |= priv->hw->link.xgmii.speed10000;
1036                        break;
1037                case SPEED_5000:
1038                        ctrl |= priv->hw->link.xgmii.speed5000;
1039                        break;
1040                case SPEED_2500:
1041                        ctrl |= priv->hw->link.xgmii.speed2500;
1042                        break;
1043                default:
1044                        return;
1045                }
1046        } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1047                switch (speed) {
1048                case SPEED_100000:
1049                        ctrl |= priv->hw->link.xlgmii.speed100000;
1050                        break;
1051                case SPEED_50000:
1052                        ctrl |= priv->hw->link.xlgmii.speed50000;
1053                        break;
1054                case SPEED_40000:
1055                        ctrl |= priv->hw->link.xlgmii.speed40000;
1056                        break;
1057                case SPEED_25000:
1058                        ctrl |= priv->hw->link.xlgmii.speed25000;
1059                        break;
1060                case SPEED_10000:
1061                        ctrl |= priv->hw->link.xgmii.speed10000;
1062                        break;
1063                case SPEED_2500:
1064                        ctrl |= priv->hw->link.speed2500;
1065                        break;
1066                case SPEED_1000:
1067                        ctrl |= priv->hw->link.speed1000;
1068                        break;
1069                default:
1070                        return;
1071                }
1072        } else {
1073                switch (speed) {
1074                case SPEED_2500:
1075                        ctrl |= priv->hw->link.speed2500;
1076                        break;
1077                case SPEED_1000:
1078                        ctrl |= priv->hw->link.speed1000;
1079                        break;
1080                case SPEED_100:
1081                        ctrl |= priv->hw->link.speed100;
1082                        break;
1083                case SPEED_10:
1084                        ctrl |= priv->hw->link.speed10;
1085                        break;
1086                default:
1087                        return;
1088                }
1089        }
1090
1091        priv->speed = speed;
1092
1093        if (priv->plat->fix_mac_speed)
1094                priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
1095
1096        if (!duplex)
1097                ctrl &= ~priv->hw->link.duplex;
1098        else
1099                ctrl |= priv->hw->link.duplex;
1100
1101        /* Flow Control operation */
1102        if (tx_pause && rx_pause)
1103                stmmac_mac_flow_ctrl(priv, duplex);
1104
1105        writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1106
1107        stmmac_mac_set(priv, priv->ioaddr, true);
1108        if (phy && priv->dma_cap.eee) {
1109                priv->eee_active = phy_init_eee(phy, 1) >= 0;
1110                priv->eee_enabled = stmmac_eee_init(priv);
1111                priv->tx_lpi_enabled = priv->eee_enabled;
1112                stmmac_set_eee_pls(priv, priv->hw, true);
1113        }
1114
1115        if (priv->dma_cap.fpesel)
1116                stmmac_fpe_link_state_handle(priv, true);
1117}
1118
1119static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1120        .validate = stmmac_validate,
1121        .mac_pcs_get_state = stmmac_mac_pcs_get_state,
1122        .mac_config = stmmac_mac_config,
1123        .mac_an_restart = stmmac_mac_an_restart,
1124        .mac_link_down = stmmac_mac_link_down,
1125        .mac_link_up = stmmac_mac_link_up,
1126};
1127
1128/**
1129 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1130 * @priv: driver private structure
1131 * Description: this is to verify if the HW supports the PCS.
1132 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1133 * configured for the TBI, RTBI, or SGMII PHY interface.
1134 */
1135static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1136{
1137        int interface = priv->plat->interface;
1138
1139        if (priv->dma_cap.pcs) {
1140                if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1141                    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1142                    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1143                    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1144                        netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1145                        priv->hw->pcs = STMMAC_PCS_RGMII;
1146                } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1147                        netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1148                        priv->hw->pcs = STMMAC_PCS_SGMII;
1149                }
1150        }
1151}
1152
1153/**
1154 * stmmac_init_phy - PHY initialization
1155 * @dev: net device structure
1156 * Description: it initializes the driver's PHY state, and attaches the PHY
1157 * to the mac driver.
1158 *  Return value:
1159 *  0 on success
1160 */
1161static int stmmac_init_phy(struct net_device *dev)
1162{
1163        struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1164        struct stmmac_priv *priv = netdev_priv(dev);
1165        struct device_node *node;
1166        int ret;
1167
1168        node = priv->plat->phylink_node;
1169
1170        if (node)
1171                ret = phylink_of_phy_connect(priv->phylink, node, 0);
1172
1173        /* Some DT bindings do not set-up the PHY handle. Let's try to
1174         * manually parse it
1175         */
1176        if (!node || ret) {
1177                int addr = priv->plat->phy_addr;
1178                struct phy_device *phydev;
1179
1180                phydev = mdiobus_get_phy(priv->mii, addr);
1181                if (!phydev) {
1182                        netdev_err(priv->dev, "no phy at addr %d\n", addr);
1183                        return -ENODEV;
1184                }
1185
1186                ret = phylink_connect_phy(priv->phylink, phydev);
1187        }
1188
1189        phylink_ethtool_get_wol(priv->phylink, &wol);
1190        device_set_wakeup_capable(priv->device, !!wol.supported);
1191
1192        return ret;
1193}
1194
1195static int stmmac_phy_setup(struct stmmac_priv *priv)
1196{
1197        struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
1198        int mode = priv->plat->phy_interface;
1199        struct phylink *phylink;
1200
1201        priv->phylink_config.dev = &priv->dev->dev;
1202        priv->phylink_config.type = PHYLINK_NETDEV;
1203        priv->phylink_config.pcs_poll = true;
1204        if (priv->plat->mdio_bus_data)
1205                priv->phylink_config.ovr_an_inband =
1206                        priv->plat->mdio_bus_data->xpcs_an_inband;
1207
1208        if (!fwnode)
1209                fwnode = dev_fwnode(priv->device);
1210
1211        phylink = phylink_create(&priv->phylink_config, fwnode,
1212                                 mode, &stmmac_phylink_mac_ops);
1213        if (IS_ERR(phylink))
1214                return PTR_ERR(phylink);
1215
1216        priv->phylink = phylink;
1217        return 0;
1218}
1219
1220static void stmmac_display_rx_rings(struct stmmac_priv *priv)
1221{
1222        u32 rx_cnt = priv->plat->rx_queues_to_use;
1223        unsigned int desc_size;
1224        void *head_rx;
1225        u32 queue;
1226
1227        /* Display RX rings */
1228        for (queue = 0; queue < rx_cnt; queue++) {
1229                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1230
1231                pr_info("\tRX Queue %u rings\n", queue);
1232
1233                if (priv->extend_desc) {
1234                        head_rx = (void *)rx_q->dma_erx;
1235                        desc_size = sizeof(struct dma_extended_desc);
1236                } else {
1237                        head_rx = (void *)rx_q->dma_rx;
1238                        desc_size = sizeof(struct dma_desc);
1239                }
1240
1241                /* Display RX ring */
1242                stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1243                                    rx_q->dma_rx_phy, desc_size);
1244        }
1245}
1246
1247static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1248{
1249        u32 tx_cnt = priv->plat->tx_queues_to_use;
1250        unsigned int desc_size;
1251        void *head_tx;
1252        u32 queue;
1253
1254        /* Display TX rings */
1255        for (queue = 0; queue < tx_cnt; queue++) {
1256                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1257
1258                pr_info("\tTX Queue %d rings\n", queue);
1259
1260                if (priv->extend_desc) {
1261                        head_tx = (void *)tx_q->dma_etx;
1262                        desc_size = sizeof(struct dma_extended_desc);
1263                } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1264                        head_tx = (void *)tx_q->dma_entx;
1265                        desc_size = sizeof(struct dma_edesc);
1266                } else {
1267                        head_tx = (void *)tx_q->dma_tx;
1268                        desc_size = sizeof(struct dma_desc);
1269                }
1270
1271                stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1272                                    tx_q->dma_tx_phy, desc_size);
1273        }
1274}
1275
1276static void stmmac_display_rings(struct stmmac_priv *priv)
1277{
1278        /* Display RX ring */
1279        stmmac_display_rx_rings(priv);
1280
1281        /* Display TX ring */
1282        stmmac_display_tx_rings(priv);
1283}
1284
1285static int stmmac_set_bfsize(int mtu, int bufsize)
1286{
1287        int ret = bufsize;
1288
1289        if (mtu >= BUF_SIZE_8KiB)
1290                ret = BUF_SIZE_16KiB;
1291        else if (mtu >= BUF_SIZE_4KiB)
1292                ret = BUF_SIZE_8KiB;
1293        else if (mtu >= BUF_SIZE_2KiB)
1294                ret = BUF_SIZE_4KiB;
1295        else if (mtu > DEFAULT_BUFSIZE)
1296                ret = BUF_SIZE_2KiB;
1297        else
1298                ret = DEFAULT_BUFSIZE;
1299
1300        return ret;
1301}
1302
1303/**
1304 * stmmac_clear_rx_descriptors - clear RX descriptors
1305 * @priv: driver private structure
1306 * @queue: RX queue index
1307 * Description: this function is called to clear the RX descriptors
1308 * in case of both basic and extended descriptors are used.
1309 */
1310static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1311{
1312        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1313        int i;
1314
1315        /* Clear the RX descriptors */
1316        for (i = 0; i < priv->dma_rx_size; i++)
1317                if (priv->extend_desc)
1318                        stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1319                                        priv->use_riwt, priv->mode,
1320                                        (i == priv->dma_rx_size - 1),
1321                                        priv->dma_buf_sz);
1322                else
1323                        stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1324                                        priv->use_riwt, priv->mode,
1325                                        (i == priv->dma_rx_size - 1),
1326                                        priv->dma_buf_sz);
1327}
1328
1329/**
1330 * stmmac_clear_tx_descriptors - clear tx descriptors
1331 * @priv: driver private structure
1332 * @queue: TX queue index.
1333 * Description: this function is called to clear the TX descriptors
1334 * in case of both basic and extended descriptors are used.
1335 */
1336static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1337{
1338        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1339        int i;
1340
1341        /* Clear the TX descriptors */
1342        for (i = 0; i < priv->dma_tx_size; i++) {
1343                int last = (i == (priv->dma_tx_size - 1));
1344                struct dma_desc *p;
1345
1346                if (priv->extend_desc)
1347                        p = &tx_q->dma_etx[i].basic;
1348                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1349                        p = &tx_q->dma_entx[i].basic;
1350                else
1351                        p = &tx_q->dma_tx[i];
1352
1353                stmmac_init_tx_desc(priv, p, priv->mode, last);
1354        }
1355}
1356
1357/**
1358 * stmmac_clear_descriptors - clear descriptors
1359 * @priv: driver private structure
1360 * Description: this function is called to clear the TX and RX descriptors
1361 * in case of both basic and extended descriptors are used.
1362 */
1363static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1364{
1365        u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1366        u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1367        u32 queue;
1368
1369        /* Clear the RX descriptors */
1370        for (queue = 0; queue < rx_queue_cnt; queue++)
1371                stmmac_clear_rx_descriptors(priv, queue);
1372
1373        /* Clear the TX descriptors */
1374        for (queue = 0; queue < tx_queue_cnt; queue++)
1375                stmmac_clear_tx_descriptors(priv, queue);
1376}
1377
1378/**
1379 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1380 * @priv: driver private structure
1381 * @p: descriptor pointer
1382 * @i: descriptor index
1383 * @flags: gfp flag
1384 * @queue: RX queue index
1385 * Description: this function is called to allocate a receive buffer, perform
1386 * the DMA mapping and init the descriptor.
1387 */
1388static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1389                                  int i, gfp_t flags, u32 queue)
1390{
1391        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1392        struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1393        gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1394
1395        if (priv->dma_cap.addr64 <= 32)
1396                gfp |= GFP_DMA32;
1397
1398        buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1399        if (!buf->page)
1400                return -ENOMEM;
1401
1402        if (priv->sph) {
1403                buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1404                if (!buf->sec_page)
1405                        return -ENOMEM;
1406
1407                buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1408                stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1409        } else {
1410                buf->sec_page = NULL;
1411                stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1412        }
1413
1414        buf->addr = page_pool_get_dma_addr(buf->page);
1415        stmmac_set_desc_addr(priv, p, buf->addr);
1416        if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1417                stmmac_init_desc3(priv, p);
1418
1419        return 0;
1420}
1421
1422/**
1423 * stmmac_free_rx_buffer - free RX dma buffers
1424 * @priv: private structure
1425 * @queue: RX queue index
1426 * @i: buffer index.
1427 */
1428static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1429{
1430        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1431        struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1432
1433        if (buf->page)
1434                page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1435        buf->page = NULL;
1436
1437        if (buf->sec_page)
1438                page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1439        buf->sec_page = NULL;
1440}
1441
1442/**
1443 * stmmac_free_tx_buffer - free RX dma buffers
1444 * @priv: private structure
1445 * @queue: RX queue index
1446 * @i: buffer index.
1447 */
1448static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1449{
1450        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1451
1452        if (tx_q->tx_skbuff_dma[i].buf) {
1453                if (tx_q->tx_skbuff_dma[i].map_as_page)
1454                        dma_unmap_page(priv->device,
1455                                       tx_q->tx_skbuff_dma[i].buf,
1456                                       tx_q->tx_skbuff_dma[i].len,
1457                                       DMA_TO_DEVICE);
1458                else
1459                        dma_unmap_single(priv->device,
1460                                         tx_q->tx_skbuff_dma[i].buf,
1461                                         tx_q->tx_skbuff_dma[i].len,
1462                                         DMA_TO_DEVICE);
1463        }
1464
1465        if (tx_q->tx_skbuff[i]) {
1466                dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1467                tx_q->tx_skbuff[i] = NULL;
1468                tx_q->tx_skbuff_dma[i].buf = 0;
1469                tx_q->tx_skbuff_dma[i].map_as_page = false;
1470        }
1471}
1472
1473/**
1474 * init_dma_rx_desc_rings - init the RX descriptor rings
1475 * @dev: net device structure
1476 * @flags: gfp flag.
1477 * Description: this function initializes the DMA RX descriptors
1478 * and allocates the socket buffers. It supports the chained and ring
1479 * modes.
1480 */
1481static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1482{
1483        struct stmmac_priv *priv = netdev_priv(dev);
1484        u32 rx_count = priv->plat->rx_queues_to_use;
1485        int ret = -ENOMEM;
1486        int queue;
1487        int i;
1488
1489        /* RX INITIALIZATION */
1490        netif_dbg(priv, probe, priv->dev,
1491                  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1492
1493        for (queue = 0; queue < rx_count; queue++) {
1494                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1495
1496                netif_dbg(priv, probe, priv->dev,
1497                          "(%s) dma_rx_phy=0x%08x\n", __func__,
1498                          (u32)rx_q->dma_rx_phy);
1499
1500                stmmac_clear_rx_descriptors(priv, queue);
1501
1502                for (i = 0; i < priv->dma_rx_size; i++) {
1503                        struct dma_desc *p;
1504
1505                        if (priv->extend_desc)
1506                                p = &((rx_q->dma_erx + i)->basic);
1507                        else
1508                                p = rx_q->dma_rx + i;
1509
1510                        ret = stmmac_init_rx_buffers(priv, p, i, flags,
1511                                                     queue);
1512                        if (ret)
1513                                goto err_init_rx_buffers;
1514                }
1515
1516                rx_q->cur_rx = 0;
1517                rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
1518
1519                /* Setup the chained descriptor addresses */
1520                if (priv->mode == STMMAC_CHAIN_MODE) {
1521                        if (priv->extend_desc)
1522                                stmmac_mode_init(priv, rx_q->dma_erx,
1523                                                 rx_q->dma_rx_phy,
1524                                                 priv->dma_rx_size, 1);
1525                        else
1526                                stmmac_mode_init(priv, rx_q->dma_rx,
1527                                                 rx_q->dma_rx_phy,
1528                                                 priv->dma_rx_size, 0);
1529                }
1530        }
1531
1532        return 0;
1533
1534err_init_rx_buffers:
1535        while (queue >= 0) {
1536                while (--i >= 0)
1537                        stmmac_free_rx_buffer(priv, queue, i);
1538
1539                if (queue == 0)
1540                        break;
1541
1542                i = priv->dma_rx_size;
1543                queue--;
1544        }
1545
1546        return ret;
1547}
1548
1549/**
1550 * init_dma_tx_desc_rings - init the TX descriptor rings
1551 * @dev: net device structure.
1552 * Description: this function initializes the DMA TX descriptors
1553 * and allocates the socket buffers. It supports the chained and ring
1554 * modes.
1555 */
1556static int init_dma_tx_desc_rings(struct net_device *dev)
1557{
1558        struct stmmac_priv *priv = netdev_priv(dev);
1559        u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1560        u32 queue;
1561        int i;
1562
1563        for (queue = 0; queue < tx_queue_cnt; queue++) {
1564                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1565
1566                netif_dbg(priv, probe, priv->dev,
1567                          "(%s) dma_tx_phy=0x%08x\n", __func__,
1568                         (u32)tx_q->dma_tx_phy);
1569
1570                /* Setup the chained descriptor addresses */
1571                if (priv->mode == STMMAC_CHAIN_MODE) {
1572                        if (priv->extend_desc)
1573                                stmmac_mode_init(priv, tx_q->dma_etx,
1574                                                 tx_q->dma_tx_phy,
1575                                                 priv->dma_tx_size, 1);
1576                        else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1577                                stmmac_mode_init(priv, tx_q->dma_tx,
1578                                                 tx_q->dma_tx_phy,
1579                                                 priv->dma_tx_size, 0);
1580                }
1581
1582                for (i = 0; i < priv->dma_tx_size; i++) {
1583                        struct dma_desc *p;
1584                        if (priv->extend_desc)
1585                                p = &((tx_q->dma_etx + i)->basic);
1586                        else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1587                                p = &((tx_q->dma_entx + i)->basic);
1588                        else
1589                                p = tx_q->dma_tx + i;
1590
1591                        stmmac_clear_desc(priv, p);
1592
1593                        tx_q->tx_skbuff_dma[i].buf = 0;
1594                        tx_q->tx_skbuff_dma[i].map_as_page = false;
1595                        tx_q->tx_skbuff_dma[i].len = 0;
1596                        tx_q->tx_skbuff_dma[i].last_segment = false;
1597                        tx_q->tx_skbuff[i] = NULL;
1598                }
1599
1600                tx_q->dirty_tx = 0;
1601                tx_q->cur_tx = 0;
1602                tx_q->mss = 0;
1603
1604                netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1605        }
1606
1607        return 0;
1608}
1609
1610/**
1611 * init_dma_desc_rings - init the RX/TX descriptor rings
1612 * @dev: net device structure
1613 * @flags: gfp flag.
1614 * Description: this function initializes the DMA RX/TX descriptors
1615 * and allocates the socket buffers. It supports the chained and ring
1616 * modes.
1617 */
1618static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1619{
1620        struct stmmac_priv *priv = netdev_priv(dev);
1621        int ret;
1622
1623        ret = init_dma_rx_desc_rings(dev, flags);
1624        if (ret)
1625                return ret;
1626
1627        ret = init_dma_tx_desc_rings(dev);
1628
1629        stmmac_clear_descriptors(priv);
1630
1631        if (netif_msg_hw(priv))
1632                stmmac_display_rings(priv);
1633
1634        return ret;
1635}
1636
1637/**
1638 * dma_free_rx_skbufs - free RX dma buffers
1639 * @priv: private structure
1640 * @queue: RX queue index
1641 */
1642static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1643{
1644        int i;
1645
1646        for (i = 0; i < priv->dma_rx_size; i++)
1647                stmmac_free_rx_buffer(priv, queue, i);
1648}
1649
1650/**
1651 * dma_free_tx_skbufs - free TX dma buffers
1652 * @priv: private structure
1653 * @queue: TX queue index
1654 */
1655static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1656{
1657        int i;
1658
1659        for (i = 0; i < priv->dma_tx_size; i++)
1660                stmmac_free_tx_buffer(priv, queue, i);
1661}
1662
1663/**
1664 * stmmac_free_tx_skbufs - free TX skb buffers
1665 * @priv: private structure
1666 */
1667static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1668{
1669        u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1670        u32 queue;
1671
1672        for (queue = 0; queue < tx_queue_cnt; queue++)
1673                dma_free_tx_skbufs(priv, queue);
1674}
1675
1676/**
1677 * free_dma_rx_desc_resources - free RX dma desc resources
1678 * @priv: private structure
1679 */
1680static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1681{
1682        u32 rx_count = priv->plat->rx_queues_to_use;
1683        u32 queue;
1684
1685        /* Free RX queue resources */
1686        for (queue = 0; queue < rx_count; queue++) {
1687                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1688
1689                /* Release the DMA RX socket buffers */
1690                dma_free_rx_skbufs(priv, queue);
1691
1692                /* Free DMA regions of consistent memory previously allocated */
1693                if (!priv->extend_desc)
1694                        dma_free_coherent(priv->device, priv->dma_rx_size *
1695                                          sizeof(struct dma_desc),
1696                                          rx_q->dma_rx, rx_q->dma_rx_phy);
1697                else
1698                        dma_free_coherent(priv->device, priv->dma_rx_size *
1699                                          sizeof(struct dma_extended_desc),
1700                                          rx_q->dma_erx, rx_q->dma_rx_phy);
1701
1702                kfree(rx_q->buf_pool);
1703                if (rx_q->page_pool)
1704                        page_pool_destroy(rx_q->page_pool);
1705        }
1706}
1707
1708/**
1709 * free_dma_tx_desc_resources - free TX dma desc resources
1710 * @priv: private structure
1711 */
1712static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1713{
1714        u32 tx_count = priv->plat->tx_queues_to_use;
1715        u32 queue;
1716
1717        /* Free TX queue resources */
1718        for (queue = 0; queue < tx_count; queue++) {
1719                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1720                size_t size;
1721                void *addr;
1722
1723                /* Release the DMA TX socket buffers */
1724                dma_free_tx_skbufs(priv, queue);
1725
1726                if (priv->extend_desc) {
1727                        size = sizeof(struct dma_extended_desc);
1728                        addr = tx_q->dma_etx;
1729                } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1730                        size = sizeof(struct dma_edesc);
1731                        addr = tx_q->dma_entx;
1732                } else {
1733                        size = sizeof(struct dma_desc);
1734                        addr = tx_q->dma_tx;
1735                }
1736
1737                size *= priv->dma_tx_size;
1738
1739                dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1740
1741                kfree(tx_q->tx_skbuff_dma);
1742                kfree(tx_q->tx_skbuff);
1743        }
1744}
1745
1746/**
1747 * alloc_dma_rx_desc_resources - alloc RX resources.
1748 * @priv: private structure
1749 * Description: according to which descriptor can be used (extend or basic)
1750 * this function allocates the resources for TX and RX paths. In case of
1751 * reception, for example, it pre-allocated the RX socket buffer in order to
1752 * allow zero-copy mechanism.
1753 */
1754static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1755{
1756        u32 rx_count = priv->plat->rx_queues_to_use;
1757        int ret = -ENOMEM;
1758        u32 queue;
1759
1760        /* RX queues buffers and DMA */
1761        for (queue = 0; queue < rx_count; queue++) {
1762                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1763                struct page_pool_params pp_params = { 0 };
1764                unsigned int num_pages;
1765
1766                rx_q->queue_index = queue;
1767                rx_q->priv_data = priv;
1768
1769                pp_params.flags = PP_FLAG_DMA_MAP;
1770                pp_params.pool_size = priv->dma_rx_size;
1771                num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
1772                pp_params.order = ilog2(num_pages);
1773                pp_params.nid = dev_to_node(priv->device);
1774                pp_params.dev = priv->device;
1775                pp_params.dma_dir = DMA_FROM_DEVICE;
1776
1777                rx_q->page_pool = page_pool_create(&pp_params);
1778                if (IS_ERR(rx_q->page_pool)) {
1779                        ret = PTR_ERR(rx_q->page_pool);
1780                        rx_q->page_pool = NULL;
1781                        goto err_dma;
1782                }
1783
1784                rx_q->buf_pool = kcalloc(priv->dma_rx_size,
1785                                         sizeof(*rx_q->buf_pool),
1786                                         GFP_KERNEL);
1787                if (!rx_q->buf_pool)
1788                        goto err_dma;
1789
1790                if (priv->extend_desc) {
1791                        rx_q->dma_erx = dma_alloc_coherent(priv->device,
1792                                                           priv->dma_rx_size *
1793                                                           sizeof(struct dma_extended_desc),
1794                                                           &rx_q->dma_rx_phy,
1795                                                           GFP_KERNEL);
1796                        if (!rx_q->dma_erx)
1797                                goto err_dma;
1798
1799                } else {
1800                        rx_q->dma_rx = dma_alloc_coherent(priv->device,
1801                                                          priv->dma_rx_size *
1802                                                          sizeof(struct dma_desc),
1803                                                          &rx_q->dma_rx_phy,
1804                                                          GFP_KERNEL);
1805                        if (!rx_q->dma_rx)
1806                                goto err_dma;
1807                }
1808        }
1809
1810        return 0;
1811
1812err_dma:
1813        free_dma_rx_desc_resources(priv);
1814
1815        return ret;
1816}
1817
1818/**
1819 * alloc_dma_tx_desc_resources - alloc TX resources.
1820 * @priv: private structure
1821 * Description: according to which descriptor can be used (extend or basic)
1822 * this function allocates the resources for TX and RX paths. In case of
1823 * reception, for example, it pre-allocated the RX socket buffer in order to
1824 * allow zero-copy mechanism.
1825 */
1826static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1827{
1828        u32 tx_count = priv->plat->tx_queues_to_use;
1829        int ret = -ENOMEM;
1830        u32 queue;
1831
1832        /* TX queues buffers and DMA */
1833        for (queue = 0; queue < tx_count; queue++) {
1834                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1835                size_t size;
1836                void *addr;
1837
1838                tx_q->queue_index = queue;
1839                tx_q->priv_data = priv;
1840
1841                tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
1842                                              sizeof(*tx_q->tx_skbuff_dma),
1843                                              GFP_KERNEL);
1844                if (!tx_q->tx_skbuff_dma)
1845                        goto err_dma;
1846
1847                tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
1848                                          sizeof(struct sk_buff *),
1849                                          GFP_KERNEL);
1850                if (!tx_q->tx_skbuff)
1851                        goto err_dma;
1852
1853                if (priv->extend_desc)
1854                        size = sizeof(struct dma_extended_desc);
1855                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1856                        size = sizeof(struct dma_edesc);
1857                else
1858                        size = sizeof(struct dma_desc);
1859
1860                size *= priv->dma_tx_size;
1861
1862                addr = dma_alloc_coherent(priv->device, size,
1863                                          &tx_q->dma_tx_phy, GFP_KERNEL);
1864                if (!addr)
1865                        goto err_dma;
1866
1867                if (priv->extend_desc)
1868                        tx_q->dma_etx = addr;
1869                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1870                        tx_q->dma_entx = addr;
1871                else
1872                        tx_q->dma_tx = addr;
1873        }
1874
1875        return 0;
1876
1877err_dma:
1878        free_dma_tx_desc_resources(priv);
1879        return ret;
1880}
1881
1882/**
1883 * alloc_dma_desc_resources - alloc TX/RX resources.
1884 * @priv: private structure
1885 * Description: according to which descriptor can be used (extend or basic)
1886 * this function allocates the resources for TX and RX paths. In case of
1887 * reception, for example, it pre-allocated the RX socket buffer in order to
1888 * allow zero-copy mechanism.
1889 */
1890static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1891{
1892        /* RX Allocation */
1893        int ret = alloc_dma_rx_desc_resources(priv);
1894
1895        if (ret)
1896                return ret;
1897
1898        ret = alloc_dma_tx_desc_resources(priv);
1899
1900        return ret;
1901}
1902
1903/**
1904 * free_dma_desc_resources - free dma desc resources
1905 * @priv: private structure
1906 */
1907static void free_dma_desc_resources(struct stmmac_priv *priv)
1908{
1909        /* Release the DMA RX socket buffers */
1910        free_dma_rx_desc_resources(priv);
1911
1912        /* Release the DMA TX socket buffers */
1913        free_dma_tx_desc_resources(priv);
1914}
1915
1916/**
1917 *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
1918 *  @priv: driver private structure
1919 *  Description: It is used for enabling the rx queues in the MAC
1920 */
1921static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1922{
1923        u32 rx_queues_count = priv->plat->rx_queues_to_use;
1924        int queue;
1925        u8 mode;
1926
1927        for (queue = 0; queue < rx_queues_count; queue++) {
1928                mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1929                stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
1930        }
1931}
1932
1933/**
1934 * stmmac_start_rx_dma - start RX DMA channel
1935 * @priv: driver private structure
1936 * @chan: RX channel index
1937 * Description:
1938 * This starts a RX DMA channel
1939 */
1940static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1941{
1942        netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1943        stmmac_start_rx(priv, priv->ioaddr, chan);
1944}
1945
1946/**
1947 * stmmac_start_tx_dma - start TX DMA channel
1948 * @priv: driver private structure
1949 * @chan: TX channel index
1950 * Description:
1951 * This starts a TX DMA channel
1952 */
1953static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1954{
1955        netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1956        stmmac_start_tx(priv, priv->ioaddr, chan);
1957}
1958
1959/**
1960 * stmmac_stop_rx_dma - stop RX DMA channel
1961 * @priv: driver private structure
1962 * @chan: RX channel index
1963 * Description:
1964 * This stops a RX DMA channel
1965 */
1966static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1967{
1968        netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1969        stmmac_stop_rx(priv, priv->ioaddr, chan);
1970}
1971
1972/**
1973 * stmmac_stop_tx_dma - stop TX DMA channel
1974 * @priv: driver private structure
1975 * @chan: TX channel index
1976 * Description:
1977 * This stops a TX DMA channel
1978 */
1979static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1980{
1981        netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1982        stmmac_stop_tx(priv, priv->ioaddr, chan);
1983}
1984
1985/**
1986 * stmmac_start_all_dma - start all RX and TX DMA channels
1987 * @priv: driver private structure
1988 * Description:
1989 * This starts all the RX and TX DMA channels
1990 */
1991static void stmmac_start_all_dma(struct stmmac_priv *priv)
1992{
1993        u32 rx_channels_count = priv->plat->rx_queues_to_use;
1994        u32 tx_channels_count = priv->plat->tx_queues_to_use;
1995        u32 chan = 0;
1996
1997        for (chan = 0; chan < rx_channels_count; chan++)
1998                stmmac_start_rx_dma(priv, chan);
1999
2000        for (chan = 0; chan < tx_channels_count; chan++)
2001                stmmac_start_tx_dma(priv, chan);
2002}
2003
2004/**
2005 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2006 * @priv: driver private structure
2007 * Description:
2008 * This stops the RX and TX DMA channels
2009 */
2010static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2011{
2012        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2013        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2014        u32 chan = 0;
2015
2016        for (chan = 0; chan < rx_channels_count; chan++)
2017                stmmac_stop_rx_dma(priv, chan);
2018
2019        for (chan = 0; chan < tx_channels_count; chan++)
2020                stmmac_stop_tx_dma(priv, chan);
2021}
2022
2023/**
2024 *  stmmac_dma_operation_mode - HW DMA operation mode
2025 *  @priv: driver private structure
2026 *  Description: it is used for configuring the DMA operation mode register in
2027 *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2028 */
2029static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2030{
2031        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2032        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2033        int rxfifosz = priv->plat->rx_fifo_size;
2034        int txfifosz = priv->plat->tx_fifo_size;
2035        u32 txmode = 0;
2036        u32 rxmode = 0;
2037        u32 chan = 0;
2038        u8 qmode = 0;
2039
2040        if (rxfifosz == 0)
2041                rxfifosz = priv->dma_cap.rx_fifo_size;
2042        if (txfifosz == 0)
2043                txfifosz = priv->dma_cap.tx_fifo_size;
2044
2045        /* Adjust for real per queue fifo size */
2046        rxfifosz /= rx_channels_count;
2047        txfifosz /= tx_channels_count;
2048
2049        if (priv->plat->force_thresh_dma_mode) {
2050                txmode = tc;
2051                rxmode = tc;
2052        } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2053                /*
2054                 * In case of GMAC, SF mode can be enabled
2055                 * to perform the TX COE in HW. This depends on:
2056                 * 1) TX COE if actually supported
2057                 * 2) There is no bugged Jumbo frame support
2058                 *    that needs to not insert csum in the TDES.
2059                 */
2060                txmode = SF_DMA_MODE;
2061                rxmode = SF_DMA_MODE;
2062                priv->xstats.threshold = SF_DMA_MODE;
2063        } else {
2064                txmode = tc;
2065                rxmode = SF_DMA_MODE;
2066        }
2067
2068        /* configure all channels */
2069        for (chan = 0; chan < rx_channels_count; chan++) {
2070                qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2071
2072                stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2073                                rxfifosz, qmode);
2074                stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
2075                                chan);
2076        }
2077
2078        for (chan = 0; chan < tx_channels_count; chan++) {
2079                qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2080
2081                stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2082                                txfifosz, qmode);
2083        }
2084}
2085
2086/**
2087 * stmmac_tx_clean - to manage the transmission completion
2088 * @priv: driver private structure
2089 * @queue: TX queue index
2090 * Description: it reclaims the transmit resources after transmission completes.
2091 */
2092static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
2093{
2094        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2095        unsigned int bytes_compl = 0, pkts_compl = 0;
2096        unsigned int entry, count = 0;
2097
2098        __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2099
2100        priv->xstats.tx_clean++;
2101
2102        entry = tx_q->dirty_tx;
2103        while ((entry != tx_q->cur_tx) && (count < budget)) {
2104                struct sk_buff *skb = tx_q->tx_skbuff[entry];
2105                struct dma_desc *p;
2106                int status;
2107
2108                if (priv->extend_desc)
2109                        p = (struct dma_desc *)(tx_q->dma_etx + entry);
2110                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2111                        p = &tx_q->dma_entx[entry].basic;
2112                else
2113                        p = tx_q->dma_tx + entry;
2114
2115                status = stmmac_tx_status(priv, &priv->dev->stats,
2116                                &priv->xstats, p, priv->ioaddr);
2117                /* Check if the descriptor is owned by the DMA */
2118                if (unlikely(status & tx_dma_own))
2119                        break;
2120
2121                count++;
2122
2123                /* Make sure descriptor fields are read after reading
2124                 * the own bit.
2125                 */
2126                dma_rmb();
2127
2128                /* Just consider the last segment and ...*/
2129                if (likely(!(status & tx_not_ls))) {
2130                        /* ... verify the status error condition */
2131                        if (unlikely(status & tx_err)) {
2132                                priv->dev->stats.tx_errors++;
2133                        } else {
2134                                priv->dev->stats.tx_packets++;
2135                                priv->xstats.tx_pkt_n++;
2136                        }
2137                        stmmac_get_tx_hwtstamp(priv, p, skb);
2138                }
2139
2140                if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
2141                        if (tx_q->tx_skbuff_dma[entry].map_as_page)
2142                                dma_unmap_page(priv->device,
2143                                               tx_q->tx_skbuff_dma[entry].buf,
2144                                               tx_q->tx_skbuff_dma[entry].len,
2145                                               DMA_TO_DEVICE);
2146                        else
2147                                dma_unmap_single(priv->device,
2148                                                 tx_q->tx_skbuff_dma[entry].buf,
2149                                                 tx_q->tx_skbuff_dma[entry].len,
2150                                                 DMA_TO_DEVICE);
2151                        tx_q->tx_skbuff_dma[entry].buf = 0;
2152                        tx_q->tx_skbuff_dma[entry].len = 0;
2153                        tx_q->tx_skbuff_dma[entry].map_as_page = false;
2154                }
2155
2156                stmmac_clean_desc3(priv, tx_q, p);
2157
2158                tx_q->tx_skbuff_dma[entry].last_segment = false;
2159                tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2160
2161                if (likely(skb != NULL)) {
2162                        pkts_compl++;
2163                        bytes_compl += skb->len;
2164                        dev_consume_skb_any(skb);
2165                        tx_q->tx_skbuff[entry] = NULL;
2166                }
2167
2168                stmmac_release_tx_desc(priv, p, priv->mode);
2169
2170                entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
2171        }
2172        tx_q->dirty_tx = entry;
2173
2174        netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2175                                  pkts_compl, bytes_compl);
2176
2177        if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2178                                                                queue))) &&
2179            stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2180
2181                netif_dbg(priv, tx_done, priv->dev,
2182                          "%s: restart transmit\n", __func__);
2183                netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2184        }
2185
2186        if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2187            priv->eee_sw_timer_en) {
2188                stmmac_enable_eee_mode(priv);
2189                mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2190        }
2191
2192        /* We still have pending packets, let's call for a new scheduling */
2193        if (tx_q->dirty_tx != tx_q->cur_tx)
2194                hrtimer_start(&tx_q->txtimer,
2195                              STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2196                              HRTIMER_MODE_REL);
2197
2198        __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2199
2200        return count;
2201}
2202
2203/**
2204 * stmmac_tx_err - to manage the tx error
2205 * @priv: driver private structure
2206 * @chan: channel index
2207 * Description: it cleans the descriptors and restarts the transmission
2208 * in case of transmission errors.
2209 */
2210static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2211{
2212        struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2213
2214        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2215
2216        stmmac_stop_tx_dma(priv, chan);
2217        dma_free_tx_skbufs(priv, chan);
2218        stmmac_clear_tx_descriptors(priv, chan);
2219        tx_q->dirty_tx = 0;
2220        tx_q->cur_tx = 0;
2221        tx_q->mss = 0;
2222        netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
2223        stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2224                            tx_q->dma_tx_phy, chan);
2225        stmmac_start_tx_dma(priv, chan);
2226
2227        priv->dev->stats.tx_errors++;
2228        netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2229}
2230
2231/**
2232 *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2233 *  @priv: driver private structure
2234 *  @txmode: TX operating mode
2235 *  @rxmode: RX operating mode
2236 *  @chan: channel index
2237 *  Description: it is used for configuring of the DMA operation mode in
2238 *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2239 *  mode.
2240 */
2241static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2242                                          u32 rxmode, u32 chan)
2243{
2244        u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2245        u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2246        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2247        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2248        int rxfifosz = priv->plat->rx_fifo_size;
2249        int txfifosz = priv->plat->tx_fifo_size;
2250
2251        if (rxfifosz == 0)
2252                rxfifosz = priv->dma_cap.rx_fifo_size;
2253        if (txfifosz == 0)
2254                txfifosz = priv->dma_cap.tx_fifo_size;
2255
2256        /* Adjust for real per queue fifo size */
2257        rxfifosz /= rx_channels_count;
2258        txfifosz /= tx_channels_count;
2259
2260        stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2261        stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2262}
2263
2264static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2265{
2266        int ret;
2267
2268        ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2269                        priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2270        if (ret && (ret != -EINVAL)) {
2271                stmmac_global_err(priv);
2272                return true;
2273        }
2274
2275        return false;
2276}
2277
2278static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2279{
2280        int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2281                                                 &priv->xstats, chan, dir);
2282        struct stmmac_channel *ch = &priv->channel[chan];
2283        unsigned long flags;
2284
2285        if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2286                if (napi_schedule_prep(&ch->rx_napi)) {
2287                        spin_lock_irqsave(&ch->lock, flags);
2288                        stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2289                        spin_unlock_irqrestore(&ch->lock, flags);
2290                        __napi_schedule(&ch->rx_napi);
2291                }
2292        }
2293
2294        if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2295                if (napi_schedule_prep(&ch->tx_napi)) {
2296                        spin_lock_irqsave(&ch->lock, flags);
2297                        stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2298                        spin_unlock_irqrestore(&ch->lock, flags);
2299                        __napi_schedule(&ch->tx_napi);
2300                }
2301        }
2302
2303        return status;
2304}
2305
2306/**
2307 * stmmac_dma_interrupt - DMA ISR
2308 * @priv: driver private structure
2309 * Description: this is the DMA ISR. It is called by the main ISR.
2310 * It calls the dwmac dma routine and schedule poll method in case of some
2311 * work can be done.
2312 */
2313static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2314{
2315        u32 tx_channel_count = priv->plat->tx_queues_to_use;
2316        u32 rx_channel_count = priv->plat->rx_queues_to_use;
2317        u32 channels_to_check = tx_channel_count > rx_channel_count ?
2318                                tx_channel_count : rx_channel_count;
2319        u32 chan;
2320        int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2321
2322        /* Make sure we never check beyond our status buffer. */
2323        if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2324                channels_to_check = ARRAY_SIZE(status);
2325
2326        for (chan = 0; chan < channels_to_check; chan++)
2327                status[chan] = stmmac_napi_check(priv, chan,
2328                                                 DMA_DIR_RXTX);
2329
2330        for (chan = 0; chan < tx_channel_count; chan++) {
2331                if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2332                        /* Try to bump up the dma threshold on this failure */
2333                        if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2334                            (tc <= 256)) {
2335                                tc += 64;
2336                                if (priv->plat->force_thresh_dma_mode)
2337                                        stmmac_set_dma_operation_mode(priv,
2338                                                                      tc,
2339                                                                      tc,
2340                                                                      chan);
2341                                else
2342                                        stmmac_set_dma_operation_mode(priv,
2343                                                                    tc,
2344                                                                    SF_DMA_MODE,
2345                                                                    chan);
2346                                priv->xstats.threshold = tc;
2347                        }
2348                } else if (unlikely(status[chan] == tx_hard_error)) {
2349                        stmmac_tx_err(priv, chan);
2350                }
2351        }
2352}
2353
2354/**
2355 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2356 * @priv: driver private structure
2357 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2358 */
2359static void stmmac_mmc_setup(struct stmmac_priv *priv)
2360{
2361        unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2362                            MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2363
2364        stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2365
2366        if (priv->dma_cap.rmon) {
2367                stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2368                memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2369        } else
2370                netdev_info(priv->dev, "No MAC Management Counters available\n");
2371}
2372
2373/**
2374 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2375 * @priv: driver private structure
2376 * Description:
2377 *  new GMAC chip generations have a new register to indicate the
2378 *  presence of the optional feature/functions.
2379 *  This can be also used to override the value passed through the
2380 *  platform and necessary for old MAC10/100 and GMAC chips.
2381 */
2382static int stmmac_get_hw_features(struct stmmac_priv *priv)
2383{
2384        return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2385}
2386
2387/**
2388 * stmmac_check_ether_addr - check if the MAC addr is valid
2389 * @priv: driver private structure
2390 * Description:
2391 * it is to verify if the MAC address is valid, in case of failures it
2392 * generates a random MAC address
2393 */
2394static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2395{
2396        if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2397                stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
2398                if (!is_valid_ether_addr(priv->dev->dev_addr))
2399                        eth_hw_addr_random(priv->dev);
2400                dev_info(priv->device, "device MAC address %pM\n",
2401                         priv->dev->dev_addr);
2402        }
2403}
2404
2405/**
2406 * stmmac_init_dma_engine - DMA init.
2407 * @priv: driver private structure
2408 * Description:
2409 * It inits the DMA invoking the specific MAC/GMAC callback.
2410 * Some DMA parameters can be passed from the platform;
2411 * in case of these are not passed a default is kept for the MAC or GMAC.
2412 */
2413static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2414{
2415        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2416        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2417        u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2418        struct stmmac_rx_queue *rx_q;
2419        struct stmmac_tx_queue *tx_q;
2420        u32 chan = 0;
2421        int atds = 0;
2422        int ret = 0;
2423
2424        if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2425                dev_err(priv->device, "Invalid DMA configuration\n");
2426                return -EINVAL;
2427        }
2428
2429        if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2430                atds = 1;
2431
2432        ret = stmmac_reset(priv, priv->ioaddr);
2433        if (ret) {
2434                dev_err(priv->device, "Failed to reset the dma\n");
2435                return ret;
2436        }
2437
2438        /* DMA Configuration */
2439        stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2440
2441        if (priv->plat->axi)
2442                stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2443
2444        /* DMA CSR Channel configuration */
2445        for (chan = 0; chan < dma_csr_ch; chan++)
2446                stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
2447
2448        /* DMA RX Channel Configuration */
2449        for (chan = 0; chan < rx_channels_count; chan++) {
2450                rx_q = &priv->rx_queue[chan];
2451
2452                stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2453                                    rx_q->dma_rx_phy, chan);
2454
2455                rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2456                                     (priv->dma_rx_size *
2457                                      sizeof(struct dma_desc));
2458                stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2459                                       rx_q->rx_tail_addr, chan);
2460        }
2461
2462        /* DMA TX Channel Configuration */
2463        for (chan = 0; chan < tx_channels_count; chan++) {
2464                tx_q = &priv->tx_queue[chan];
2465
2466                stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2467                                    tx_q->dma_tx_phy, chan);
2468
2469                tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2470                stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2471                                       tx_q->tx_tail_addr, chan);
2472        }
2473
2474        return ret;
2475}
2476
2477static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2478{
2479        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2480
2481        hrtimer_start(&tx_q->txtimer,
2482                      STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
2483                      HRTIMER_MODE_REL);
2484}
2485
2486/**
2487 * stmmac_tx_timer - mitigation sw timer for tx.
2488 * @data: data pointer
2489 * Description:
2490 * This is the timer handler to directly invoke the stmmac_tx_clean.
2491 */
2492static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
2493{
2494        struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
2495        struct stmmac_priv *priv = tx_q->priv_data;
2496        struct stmmac_channel *ch;
2497
2498        ch = &priv->channel[tx_q->queue_index];
2499
2500        if (likely(napi_schedule_prep(&ch->tx_napi))) {
2501                unsigned long flags;
2502
2503                spin_lock_irqsave(&ch->lock, flags);
2504                stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2505                spin_unlock_irqrestore(&ch->lock, flags);
2506                __napi_schedule(&ch->tx_napi);
2507        }
2508
2509        return HRTIMER_NORESTART;
2510}
2511
2512/**
2513 * stmmac_init_coalesce - init mitigation options.
2514 * @priv: driver private structure
2515 * Description:
2516 * This inits the coalesce parameters: i.e. timer rate,
2517 * timer handler and default threshold used for enabling the
2518 * interrupt on completion bit.
2519 */
2520static void stmmac_init_coalesce(struct stmmac_priv *priv)
2521{
2522        u32 tx_channel_count = priv->plat->tx_queues_to_use;
2523        u32 rx_channel_count = priv->plat->rx_queues_to_use;
2524        u32 chan;
2525
2526        for (chan = 0; chan < tx_channel_count; chan++) {
2527                struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2528
2529                priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
2530                priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
2531
2532                hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2533                tx_q->txtimer.function = stmmac_tx_timer;
2534        }
2535
2536        for (chan = 0; chan < rx_channel_count; chan++)
2537                priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
2538}
2539
2540static void stmmac_set_rings_length(struct stmmac_priv *priv)
2541{
2542        u32 rx_channels_count = priv->plat->rx_queues_to_use;
2543        u32 tx_channels_count = priv->plat->tx_queues_to_use;
2544        u32 chan;
2545
2546        /* set TX ring length */
2547        for (chan = 0; chan < tx_channels_count; chan++)
2548                stmmac_set_tx_ring_len(priv, priv->ioaddr,
2549                                       (priv->dma_tx_size - 1), chan);
2550
2551        /* set RX ring length */
2552        for (chan = 0; chan < rx_channels_count; chan++)
2553                stmmac_set_rx_ring_len(priv, priv->ioaddr,
2554                                       (priv->dma_rx_size - 1), chan);
2555}
2556
2557/**
2558 *  stmmac_set_tx_queue_weight - Set TX queue weight
2559 *  @priv: driver private structure
2560 *  Description: It is used for setting TX queues weight
2561 */
2562static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2563{
2564        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2565        u32 weight;
2566        u32 queue;
2567
2568        for (queue = 0; queue < tx_queues_count; queue++) {
2569                weight = priv->plat->tx_queues_cfg[queue].weight;
2570                stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
2571        }
2572}
2573
2574/**
2575 *  stmmac_configure_cbs - Configure CBS in TX queue
2576 *  @priv: driver private structure
2577 *  Description: It is used for configuring CBS in AVB TX queues
2578 */
2579static void stmmac_configure_cbs(struct stmmac_priv *priv)
2580{
2581        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2582        u32 mode_to_use;
2583        u32 queue;
2584
2585        /* queue 0 is reserved for legacy traffic */
2586        for (queue = 1; queue < tx_queues_count; queue++) {
2587                mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2588                if (mode_to_use == MTL_QUEUE_DCB)
2589                        continue;
2590
2591                stmmac_config_cbs(priv, priv->hw,
2592                                priv->plat->tx_queues_cfg[queue].send_slope,
2593                                priv->plat->tx_queues_cfg[queue].idle_slope,
2594                                priv->plat->tx_queues_cfg[queue].high_credit,
2595                                priv->plat->tx_queues_cfg[queue].low_credit,
2596                                queue);
2597        }
2598}
2599
2600/**
2601 *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2602 *  @priv: driver private structure
2603 *  Description: It is used for mapping RX queues to RX dma channels
2604 */
2605static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2606{
2607        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2608        u32 queue;
2609        u32 chan;
2610
2611        for (queue = 0; queue < rx_queues_count; queue++) {
2612                chan = priv->plat->rx_queues_cfg[queue].chan;
2613                stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
2614        }
2615}
2616
2617/**
2618 *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2619 *  @priv: driver private structure
2620 *  Description: It is used for configuring the RX Queue Priority
2621 */
2622static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2623{
2624        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2625        u32 queue;
2626        u32 prio;
2627
2628        for (queue = 0; queue < rx_queues_count; queue++) {
2629                if (!priv->plat->rx_queues_cfg[queue].use_prio)
2630                        continue;
2631
2632                prio = priv->plat->rx_queues_cfg[queue].prio;
2633                stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
2634        }
2635}
2636
2637/**
2638 *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2639 *  @priv: driver private structure
2640 *  Description: It is used for configuring the TX Queue Priority
2641 */
2642static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2643{
2644        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2645        u32 queue;
2646        u32 prio;
2647
2648        for (queue = 0; queue < tx_queues_count; queue++) {
2649                if (!priv->plat->tx_queues_cfg[queue].use_prio)
2650                        continue;
2651
2652                prio = priv->plat->tx_queues_cfg[queue].prio;
2653                stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
2654        }
2655}
2656
2657/**
2658 *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2659 *  @priv: driver private structure
2660 *  Description: It is used for configuring the RX queue routing
2661 */
2662static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2663{
2664        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2665        u32 queue;
2666        u8 packet;
2667
2668        for (queue = 0; queue < rx_queues_count; queue++) {
2669                /* no specific packet type routing specified for the queue */
2670                if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2671                        continue;
2672
2673                packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2674                stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
2675        }
2676}
2677
2678static void stmmac_mac_config_rss(struct stmmac_priv *priv)
2679{
2680        if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
2681                priv->rss.enable = false;
2682                return;
2683        }
2684
2685        if (priv->dev->features & NETIF_F_RXHASH)
2686                priv->rss.enable = true;
2687        else
2688                priv->rss.enable = false;
2689
2690        stmmac_rss_configure(priv, priv->hw, &priv->rss,
2691                             priv->plat->rx_queues_to_use);
2692}
2693
2694/**
2695 *  stmmac_mtl_configuration - Configure MTL
2696 *  @priv: driver private structure
2697 *  Description: It is used for configurring MTL
2698 */
2699static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2700{
2701        u32 rx_queues_count = priv->plat->rx_queues_to_use;
2702        u32 tx_queues_count = priv->plat->tx_queues_to_use;
2703
2704        if (tx_queues_count > 1)
2705                stmmac_set_tx_queue_weight(priv);
2706
2707        /* Configure MTL RX algorithms */
2708        if (rx_queues_count > 1)
2709                stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2710                                priv->plat->rx_sched_algorithm);
2711
2712        /* Configure MTL TX algorithms */
2713        if (tx_queues_count > 1)
2714                stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2715                                priv->plat->tx_sched_algorithm);
2716
2717        /* Configure CBS in AVB TX queues */
2718        if (tx_queues_count > 1)
2719                stmmac_configure_cbs(priv);
2720
2721        /* Map RX MTL to DMA channels */
2722        stmmac_rx_queue_dma_chan_map(priv);
2723
2724        /* Enable MAC RX Queues */
2725        stmmac_mac_enable_rx_queues(priv);
2726
2727        /* Set RX priorities */
2728        if (rx_queues_count > 1)
2729                stmmac_mac_config_rx_queues_prio(priv);
2730
2731        /* Set TX priorities */
2732        if (tx_queues_count > 1)
2733                stmmac_mac_config_tx_queues_prio(priv);
2734
2735        /* Set RX routing */
2736        if (rx_queues_count > 1)
2737                stmmac_mac_config_rx_queues_routing(priv);
2738
2739        /* Receive Side Scaling */
2740        if (rx_queues_count > 1)
2741                stmmac_mac_config_rss(priv);
2742}
2743
2744static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2745{
2746        if (priv->dma_cap.asp) {
2747                netdev_info(priv->dev, "Enabling Safety Features\n");
2748                stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
2749        } else {
2750                netdev_info(priv->dev, "No Safety Features support found\n");
2751        }
2752}
2753
2754static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
2755{
2756        char *name;
2757
2758        clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
2759        clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
2760
2761        name = priv->wq_name;
2762        sprintf(name, "%s-fpe", priv->dev->name);
2763
2764        priv->fpe_wq = create_singlethread_workqueue(name);
2765        if (!priv->fpe_wq) {
2766                netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
2767
2768                return -ENOMEM;
2769        }
2770        netdev_info(priv->dev, "FPE workqueue start");
2771
2772        return 0;
2773}
2774
2775/**
2776 * stmmac_hw_setup - setup mac in a usable state.
2777 *  @dev : pointer to the device structure.
2778 *  Description:
2779 *  this is the main function to setup the HW in a usable state because the
2780 *  dma engine is reset, the core registers are configured (e.g. AXI,
2781 *  Checksum features, timers). The DMA is ready to start receiving and
2782 *  transmitting.
2783 *  Return value:
2784 *  0 on success and an appropriate (-)ve integer as defined in errno.h
2785 *  file on failure.
2786 */
2787static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2788{
2789        struct stmmac_priv *priv = netdev_priv(dev);
2790        u32 rx_cnt = priv->plat->rx_queues_to_use;
2791        u32 tx_cnt = priv->plat->tx_queues_to_use;
2792        bool sph_en;
2793        u32 chan;
2794        int ret;
2795
2796        /* DMA initialization and SW reset */
2797        ret = stmmac_init_dma_engine(priv);
2798        if (ret < 0) {
2799                netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2800                           __func__);
2801                return ret;
2802        }
2803
2804        /* Copy the MAC addr into the HW  */
2805        stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
2806
2807        /* PS and related bits will be programmed according to the speed */
2808        if (priv->hw->pcs) {
2809                int speed = priv->plat->mac_port_sel_speed;
2810
2811                if ((speed == SPEED_10) || (speed == SPEED_100) ||
2812                    (speed == SPEED_1000)) {
2813                        priv->hw->ps = speed;
2814                } else {
2815                        dev_warn(priv->device, "invalid port speed\n");
2816                        priv->hw->ps = 0;
2817                }
2818        }
2819
2820        /* Initialize the MAC Core */
2821        stmmac_core_init(priv, priv->hw, dev);
2822
2823        /* Initialize MTL*/
2824        stmmac_mtl_configuration(priv);
2825
2826        /* Initialize Safety Features */
2827        stmmac_safety_feat_configuration(priv);
2828
2829        ret = stmmac_rx_ipc(priv, priv->hw);
2830        if (!ret) {
2831                netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2832                priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2833                priv->hw->rx_csum = 0;
2834        }
2835
2836        /* Enable the MAC Rx/Tx */
2837        stmmac_mac_set(priv, priv->ioaddr, true);
2838
2839        /* Set the HW DMA mode and the COE */
2840        stmmac_dma_operation_mode(priv);
2841
2842        stmmac_mmc_setup(priv);
2843
2844        if (init_ptp) {
2845                ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2846                if (ret < 0)
2847                        netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2848
2849                ret = stmmac_init_ptp(priv);
2850                if (ret == -EOPNOTSUPP)
2851                        netdev_warn(priv->dev, "PTP not supported by HW\n");
2852                else if (ret)
2853                        netdev_warn(priv->dev, "PTP init failed\n");
2854        }
2855
2856        priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
2857
2858        /* Convert the timer from msec to usec */
2859        if (!priv->tx_lpi_timer)
2860                priv->tx_lpi_timer = eee_timer * 1000;
2861
2862        if (priv->use_riwt) {
2863                u32 queue;
2864
2865                for (queue = 0; queue < rx_cnt; queue++) {
2866                        if (!priv->rx_riwt[queue])
2867                                priv->rx_riwt[queue] = DEF_DMA_RIWT;
2868
2869                        stmmac_rx_watchdog(priv, priv->ioaddr,
2870                                           priv->rx_riwt[queue], queue);
2871                }
2872        }
2873
2874        if (priv->hw->pcs)
2875                stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
2876
2877        /* set TX and RX rings length */
2878        stmmac_set_rings_length(priv);
2879
2880        /* Enable TSO */
2881        if (priv->tso) {
2882                for (chan = 0; chan < tx_cnt; chan++) {
2883                        struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2884
2885                        /* TSO and TBS cannot co-exist */
2886                        if (tx_q->tbs & STMMAC_TBS_AVAIL)
2887                                continue;
2888
2889                        stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
2890                }
2891        }
2892
2893        /* Enable Split Header */
2894        sph_en = (priv->hw->rx_csum > 0) && priv->sph;
2895        for (chan = 0; chan < rx_cnt; chan++)
2896                stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
2897
2898
2899        /* VLAN Tag Insertion */
2900        if (priv->dma_cap.vlins)
2901                stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
2902
2903        /* TBS */
2904        for (chan = 0; chan < tx_cnt; chan++) {
2905                struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2906                int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
2907
2908                stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
2909        }
2910
2911        /* Configure real RX and TX queues */
2912        netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
2913        netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
2914
2915        /* Start the ball rolling... */
2916        stmmac_start_all_dma(priv);
2917
2918        if (priv->dma_cap.fpesel) {
2919                stmmac_fpe_start_wq(priv);
2920
2921                if (priv->plat->fpe_cfg->enable)
2922                        stmmac_fpe_handshake(priv, true);
2923        }
2924
2925        return 0;
2926}
2927
2928static void stmmac_hw_teardown(struct net_device *dev)
2929{
2930        struct stmmac_priv *priv = netdev_priv(dev);
2931
2932        clk_disable_unprepare(priv->plat->clk_ptp_ref);
2933}
2934
2935static void stmmac_free_irq(struct net_device *dev,
2936                            enum request_irq_err irq_err, int irq_idx)
2937{
2938        struct stmmac_priv *priv = netdev_priv(dev);
2939        int j;
2940
2941        switch (irq_err) {
2942        case REQ_IRQ_ERR_ALL:
2943                irq_idx = priv->plat->tx_queues_to_use;
2944                fallthrough;
2945        case REQ_IRQ_ERR_TX:
2946                for (j = irq_idx - 1; j >= 0; j--) {
2947                        if (priv->tx_irq[j] > 0) {
2948                                irq_set_affinity_hint(priv->tx_irq[j], NULL);
2949                                free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
2950                        }
2951                }
2952                irq_idx = priv->plat->rx_queues_to_use;
2953                fallthrough;
2954        case REQ_IRQ_ERR_RX:
2955                for (j = irq_idx - 1; j >= 0; j--) {
2956                        if (priv->rx_irq[j] > 0) {
2957                                irq_set_affinity_hint(priv->rx_irq[j], NULL);
2958                                free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
2959                        }
2960                }
2961
2962                if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
2963                        free_irq(priv->sfty_ue_irq, dev);
2964                fallthrough;
2965        case REQ_IRQ_ERR_SFTY_UE:
2966                if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
2967                        free_irq(priv->sfty_ce_irq, dev);
2968                fallthrough;
2969        case REQ_IRQ_ERR_SFTY_CE:
2970                if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
2971                        free_irq(priv->lpi_irq, dev);
2972                fallthrough;
2973        case REQ_IRQ_ERR_LPI:
2974                if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
2975                        free_irq(priv->wol_irq, dev);
2976                fallthrough;
2977        case REQ_IRQ_ERR_WOL:
2978                free_irq(dev->irq, dev);
2979                fallthrough;
2980        case REQ_IRQ_ERR_MAC:
2981        case REQ_IRQ_ERR_NO:
2982                /* If MAC IRQ request error, no more IRQ to free */
2983                break;
2984        }
2985}
2986
2987static int stmmac_request_irq_multi_msi(struct net_device *dev)
2988{
2989        enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
2990        struct stmmac_priv *priv = netdev_priv(dev);
2991        cpumask_t cpu_mask;
2992        int irq_idx = 0;
2993        char *int_name;
2994        int ret;
2995        int i;
2996
2997        /* For common interrupt */
2998        int_name = priv->int_name_mac;
2999        sprintf(int_name, "%s:%s", dev->name, "mac");
3000        ret = request_irq(dev->irq, stmmac_mac_interrupt,
3001                          0, int_name, dev);
3002        if (unlikely(ret < 0)) {
3003                netdev_err(priv->dev,
3004                           "%s: alloc mac MSI %d (error: %d)\n",
3005                           __func__, dev->irq, ret);
3006                irq_err = REQ_IRQ_ERR_MAC;
3007                goto irq_error;
3008        }
3009
3010        /* Request the Wake IRQ in case of another line
3011         * is used for WoL
3012         */
3013        if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3014                int_name = priv->int_name_wol;
3015                sprintf(int_name, "%s:%s", dev->name, "wol");
3016                ret = request_irq(priv->wol_irq,
3017                                  stmmac_mac_interrupt,
3018                                  0, int_name, dev);
3019                if (unlikely(ret < 0)) {
3020                        netdev_err(priv->dev,
3021                                   "%s: alloc wol MSI %d (error: %d)\n",
3022                                   __func__, priv->wol_irq, ret);
3023                        irq_err = REQ_IRQ_ERR_WOL;
3024                        goto irq_error;
3025                }
3026        }
3027
3028        /* Request the LPI IRQ in case of another line
3029         * is used for LPI
3030         */
3031        if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3032                int_name = priv->int_name_lpi;
3033                sprintf(int_name, "%s:%s", dev->name, "lpi");
3034                ret = request_irq(priv->lpi_irq,
3035                                  stmmac_mac_interrupt,
3036                                  0, int_name, dev);
3037                if (unlikely(ret < 0)) {
3038                        netdev_err(priv->dev,
3039                                   "%s: alloc lpi MSI %d (error: %d)\n",
3040                                   __func__, priv->lpi_irq, ret);
3041                        irq_err = REQ_IRQ_ERR_LPI;
3042                        goto irq_error;
3043                }
3044        }
3045
3046        /* Request the Safety Feature Correctible Error line in
3047         * case of another line is used
3048         */
3049        if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3050                int_name = priv->int_name_sfty_ce;
3051                sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3052                ret = request_irq(priv->sfty_ce_irq,
3053                                  stmmac_safety_interrupt,
3054                                  0, int_name, dev);
3055                if (unlikely(ret < 0)) {
3056                        netdev_err(priv->dev,
3057                                   "%s: alloc sfty ce MSI %d (error: %d)\n",
3058                                   __func__, priv->sfty_ce_irq, ret);
3059                        irq_err = REQ_IRQ_ERR_SFTY_CE;
3060                        goto irq_error;
3061                }
3062        }
3063
3064        /* Request the Safety Feature Uncorrectible Error line in
3065         * case of another line is used
3066         */
3067        if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3068                int_name = priv->int_name_sfty_ue;
3069                sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3070                ret = request_irq(priv->sfty_ue_irq,
3071                                  stmmac_safety_interrupt,
3072                                  0, int_name, dev);
3073                if (unlikely(ret < 0)) {
3074                        netdev_err(priv->dev,
3075                                   "%s: alloc sfty ue MSI %d (error: %d)\n",
3076                                   __func__, priv->sfty_ue_irq, ret);
3077                        irq_err = REQ_IRQ_ERR_SFTY_UE;
3078                        goto irq_error;
3079                }
3080        }
3081
3082        /* Request Rx MSI irq */
3083        for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3084                if (priv->rx_irq[i] == 0)
3085                        continue;
3086
3087                int_name = priv->int_name_rx_irq[i];
3088                sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3089                ret = request_irq(priv->rx_irq[i],
3090                                  stmmac_msi_intr_rx,
3091                                  0, int_name, &priv->rx_queue[i]);
3092                if (unlikely(ret < 0)) {
3093                        netdev_err(priv->dev,
3094                                   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3095                                   __func__, i, priv->rx_irq[i], ret);
3096                        irq_err = REQ_IRQ_ERR_RX;
3097                        irq_idx = i;
3098                        goto irq_error;
3099                }
3100                cpumask_clear(&cpu_mask);
3101                cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3102                irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3103        }
3104
3105        /* Request Tx MSI irq */
3106        for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3107                if (priv->tx_irq[i] == 0)
3108                        continue;
3109
3110                int_name = priv->int_name_tx_irq[i];
3111                sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3112                ret = request_irq(priv->tx_irq[i],
3113                                  stmmac_msi_intr_tx,
3114                                  0, int_name, &priv->tx_queue[i]);
3115                if (unlikely(ret < 0)) {
3116                        netdev_err(priv->dev,
3117                                   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3118                                   __func__, i, priv->tx_irq[i], ret);
3119                        irq_err = REQ_IRQ_ERR_TX;
3120                        irq_idx = i;
3121                        goto irq_error;
3122                }
3123                cpumask_clear(&cpu_mask);
3124                cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3125                irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3126        }
3127
3128        return 0;
3129
3130irq_error:
3131        stmmac_free_irq(dev, irq_err, irq_idx);
3132        return ret;
3133}
3134
3135static int stmmac_request_irq_single(struct net_device *dev)
3136{
3137        enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
3138        struct stmmac_priv *priv = netdev_priv(dev);
3139        int ret;
3140
3141        ret = request_irq(dev->irq, stmmac_interrupt,
3142                          IRQF_SHARED, dev->name, dev);
3143        if (unlikely(ret < 0)) {
3144                netdev_err(priv->dev,
3145                           "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3146                           __func__, dev->irq, ret);
3147                irq_err = REQ_IRQ_ERR_MAC;
3148                return ret;
3149        }
3150
3151        /* Request the Wake IRQ in case of another line
3152         * is used for WoL
3153         */
3154        if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3155                ret = request_irq(priv->wol_irq, stmmac_interrupt,
3156                                  IRQF_SHARED, dev->name, dev);
3157                if (unlikely(ret < 0)) {
3158                        netdev_err(priv->dev,
3159                                   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3160                                   __func__, priv->wol_irq, ret);
3161                        irq_err = REQ_IRQ_ERR_WOL;
3162                        return ret;
3163                }
3164        }
3165
3166        /* Request the IRQ lines */
3167        if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3168                ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3169                                  IRQF_SHARED, dev->name, dev);
3170                if (unlikely(ret < 0)) {
3171                        netdev_err(priv->dev,
3172                                   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3173                                   __func__, priv->lpi_irq, ret);
3174                        irq_err = REQ_IRQ_ERR_LPI;
3175                        goto irq_error;
3176                }
3177        }
3178
3179        return 0;
3180
3181irq_error:
3182        stmmac_free_irq(dev, irq_err, 0);
3183        return ret;
3184}
3185
3186static int stmmac_request_irq(struct net_device *dev)
3187{
3188        struct stmmac_priv *priv = netdev_priv(dev);
3189        int ret;
3190
3191        /* Request the IRQ lines */
3192        if (priv->plat->multi_msi_en)
3193                ret = stmmac_request_irq_multi_msi(dev);
3194        else
3195                ret = stmmac_request_irq_single(dev);
3196
3197        return ret;
3198}
3199
3200/**
3201 *  stmmac_open - open entry point of the driver
3202 *  @dev : pointer to the device structure.
3203 *  Description:
3204 *  This function is the open entry point of the driver.
3205 *  Return value:
3206 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3207 *  file on failure.
3208 */
3209static int stmmac_open(struct net_device *dev)
3210{
3211        struct stmmac_priv *priv = netdev_priv(dev);
3212        int bfsize = 0;
3213        u32 chan;
3214        int ret;
3215
3216        ret = pm_runtime_get_sync(priv->device);
3217        if (ret < 0) {
3218                pm_runtime_put_noidle(priv->device);
3219                return ret;
3220        }
3221
3222        if (priv->hw->pcs != STMMAC_PCS_TBI &&
3223            priv->hw->pcs != STMMAC_PCS_RTBI &&
3224            priv->hw->xpcs_args.an_mode != DW_AN_C73) {
3225                ret = stmmac_init_phy(dev);
3226                if (ret) {
3227                        netdev_err(priv->dev,
3228                                   "%s: Cannot attach to PHY (error: %d)\n",
3229                                   __func__, ret);
3230                        goto init_phy_error;
3231                }
3232        }
3233
3234        /* Extra statistics */
3235        memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3236        priv->xstats.threshold = tc;
3237
3238        bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
3239        if (bfsize < 0)
3240                bfsize = 0;
3241
3242        if (bfsize < BUF_SIZE_16KiB)
3243                bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
3244
3245        priv->dma_buf_sz = bfsize;
3246        buf_sz = bfsize;
3247
3248        priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3249
3250        if (!priv->dma_tx_size)
3251                priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3252        if (!priv->dma_rx_size)
3253                priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3254
3255        /* Earlier check for TBS */
3256        for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3257                struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3258                int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3259
3260                /* Setup per-TXQ tbs flag before TX descriptor alloc */
3261                tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3262        }
3263
3264        ret = alloc_dma_desc_resources(priv);
3265        if (ret < 0) {
3266                netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3267                           __func__);
3268                goto dma_desc_error;
3269        }
3270
3271        ret = init_dma_desc_rings(dev, GFP_KERNEL);
3272        if (ret < 0) {
3273                netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3274                           __func__);
3275                goto init_error;
3276        }
3277
3278        ret = stmmac_hw_setup(dev, true);
3279        if (ret < 0) {
3280                netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3281                goto init_error;
3282        }
3283
3284        stmmac_init_coalesce(priv);
3285
3286        phylink_start(priv->phylink);
3287        /* We may have called phylink_speed_down before */
3288        phylink_speed_up(priv->phylink);
3289
3290        ret = stmmac_request_irq(dev);
3291        if (ret)
3292                goto irq_error;
3293
3294        stmmac_enable_all_queues(priv);
3295        netif_tx_start_all_queues(priv->dev);
3296
3297        return 0;
3298
3299irq_error:
3300        phylink_stop(priv->phylink);
3301
3302        for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3303                hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3304
3305        stmmac_hw_teardown(dev);
3306init_error:
3307        free_dma_desc_resources(priv);
3308dma_desc_error:
3309        phylink_disconnect_phy(priv->phylink);
3310init_phy_error:
3311        pm_runtime_put(priv->device);
3312        return ret;
3313}
3314
3315static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3316{
3317        set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3318
3319        if (priv->fpe_wq)
3320                destroy_workqueue(priv->fpe_wq);
3321
3322        netdev_info(priv->dev, "FPE workqueue stop");
3323}
3324
3325/**
3326 *  stmmac_release - close entry point of the driver
3327 *  @dev : device pointer.
3328 *  Description:
3329 *  This is the stop entry point of the driver.
3330 */
3331static int stmmac_release(struct net_device *dev)
3332{
3333        struct stmmac_priv *priv = netdev_priv(dev);
3334        u32 chan;
3335
3336        if (device_may_wakeup(priv->device))
3337                phylink_speed_down(priv->phylink, false);
3338        /* Stop and disconnect the PHY */
3339        phylink_stop(priv->phylink);
3340        phylink_disconnect_phy(priv->phylink);
3341
3342        stmmac_disable_all_queues(priv);
3343
3344        for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3345                hrtimer_cancel(&priv->tx_queue[chan].txtimer);
3346
3347        /* Free the IRQ lines */
3348        stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
3349
3350        if (priv->eee_enabled) {
3351                priv->tx_path_in_lpi_mode = false;
3352                del_timer_sync(&priv->eee_ctrl_timer);
3353        }
3354
3355        /* Stop TX/RX DMA and clear the descriptors */
3356        stmmac_stop_all_dma(priv);
3357
3358        /* Release and free the Rx/Tx resources */
3359        free_dma_desc_resources(priv);
3360
3361        /* Disable the MAC Rx/Tx */
3362        stmmac_mac_set(priv, priv->ioaddr, false);
3363
3364        netif_carrier_off(dev);
3365
3366        stmmac_release_ptp(priv);
3367
3368        pm_runtime_put(priv->device);
3369
3370        if (priv->dma_cap.fpesel)
3371                stmmac_fpe_stop_wq(priv);
3372
3373        return 0;
3374}
3375
3376static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3377                               struct stmmac_tx_queue *tx_q)
3378{
3379        u16 tag = 0x0, inner_tag = 0x0;
3380        u32 inner_type = 0x0;
3381        struct dma_desc *p;
3382
3383        if (!priv->dma_cap.vlins)
3384                return false;
3385        if (!skb_vlan_tag_present(skb))
3386                return false;
3387        if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3388                inner_tag = skb_vlan_tag_get(skb);
3389                inner_type = STMMAC_VLAN_INSERT;
3390        }
3391
3392        tag = skb_vlan_tag_get(skb);
3393
3394        if (tx_q->tbs & STMMAC_TBS_AVAIL)
3395                p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3396        else
3397                p = &tx_q->dma_tx[tx_q->cur_tx];
3398
3399        if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3400                return false;
3401
3402        stmmac_set_tx_owner(priv, p);
3403        tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3404        return true;
3405}
3406
3407/**
3408 *  stmmac_tso_allocator - close entry point of the driver
3409 *  @priv: driver private structure
3410 *  @des: buffer start address
3411 *  @total_len: total length to fill in descriptors
3412 *  @last_segmant: condition for the last descriptor
3413 *  @queue: TX queue index
3414 *  Description:
3415 *  This function fills descriptor and request new descriptors according to
3416 *  buffer length to fill
3417 */
3418static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
3419                                 int total_len, bool last_segment, u32 queue)
3420{
3421        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3422        struct dma_desc *desc;
3423        u32 buff_size;
3424        int tmp_len;
3425
3426        tmp_len = total_len;
3427
3428        while (tmp_len > 0) {
3429                dma_addr_t curr_addr;
3430
3431                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3432                                                priv->dma_tx_size);
3433                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3434
3435                if (tx_q->tbs & STMMAC_TBS_AVAIL)
3436                        desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3437                else
3438                        desc = &tx_q->dma_tx[tx_q->cur_tx];
3439
3440                curr_addr = des + (total_len - tmp_len);
3441                if (priv->dma_cap.addr64 <= 32)
3442                        desc->des0 = cpu_to_le32(curr_addr);
3443                else
3444                        stmmac_set_desc_addr(priv, desc, curr_addr);
3445
3446                buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3447                            TSO_MAX_BUFF_SIZE : tmp_len;
3448
3449                stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3450                                0, 1,
3451                                (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3452                                0, 0);
3453
3454                tmp_len -= TSO_MAX_BUFF_SIZE;
3455        }
3456}
3457
3458static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3459{
3460        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3461        int desc_size;
3462
3463        if (likely(priv->extend_desc))
3464                desc_size = sizeof(struct dma_extended_desc);
3465        else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3466                desc_size = sizeof(struct dma_edesc);
3467        else
3468                desc_size = sizeof(struct dma_desc);
3469
3470        /* The own bit must be the latest setting done when prepare the
3471         * descriptor and then barrier is needed to make sure that
3472         * all is coherent before granting the DMA engine.
3473         */
3474        wmb();
3475
3476        tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3477        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3478}
3479
3480/**
3481 *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3482 *  @skb : the socket buffer
3483 *  @dev : device pointer
3484 *  Description: this is the transmit function that is called on TSO frames
3485 *  (support available on GMAC4 and newer chips).
3486 *  Diagram below show the ring programming in case of TSO frames:
3487 *
3488 *  First Descriptor
3489 *   --------
3490 *   | DES0 |---> buffer1 = L2/L3/L4 header
3491 *   | DES1 |---> TCP Payload (can continue on next descr...)
3492 *   | DES2 |---> buffer 1 and 2 len
3493 *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3494 *   --------
3495 *      |
3496 *     ...
3497 *      |
3498 *   --------
3499 *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
3500 *   | DES1 | --|
3501 *   | DES2 | --> buffer 1 and 2 len
3502 *   | DES3 |
3503 *   --------
3504 *
3505 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3506 */
3507static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3508{
3509        struct dma_desc *desc, *first, *mss_desc = NULL;
3510        struct stmmac_priv *priv = netdev_priv(dev);
3511        int nfrags = skb_shinfo(skb)->nr_frags;
3512        u32 queue = skb_get_queue_mapping(skb);
3513        unsigned int first_entry, tx_packets;
3514        int tmp_pay_len = 0, first_tx;
3515        struct stmmac_tx_queue *tx_q;
3516        bool has_vlan, set_ic;
3517        u8 proto_hdr_len, hdr;
3518        u32 pay_len, mss;
3519        dma_addr_t des;
3520        int i;
3521
3522        tx_q = &priv->tx_queue[queue];
3523        first_tx = tx_q->cur_tx;
3524
3525        /* Compute header lengths */
3526        if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3527                proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
3528                hdr = sizeof(struct udphdr);
3529        } else {
3530                proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3531                hdr = tcp_hdrlen(skb);
3532        }
3533
3534        /* Desc availability based on threshold should be enough safe */
3535        if (unlikely(stmmac_tx_avail(priv, queue) <
3536                (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
3537                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3538                        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3539                                                                queue));
3540                        /* This is a hard error, log it. */
3541                        netdev_err(priv->dev,
3542                                   "%s: Tx Ring full when queue awake\n",
3543                                   __func__);
3544                }
3545                return NETDEV_TX_BUSY;
3546        }
3547
3548        pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
3549
3550        mss = skb_shinfo(skb)->gso_size;
3551
3552        /* set new MSS value if needed */
3553        if (mss != tx_q->mss) {
3554                if (tx_q->tbs & STMMAC_TBS_AVAIL)
3555                        mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3556                else
3557                        mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
3558
3559                stmmac_set_mss(priv, mss_desc, mss);
3560                tx_q->mss = mss;
3561                tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3562                                                priv->dma_tx_size);
3563                WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
3564        }
3565
3566        if (netif_msg_tx_queued(priv)) {
3567                pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
3568                        __func__, hdr, proto_hdr_len, pay_len, mss);
3569                pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
3570                        skb->data_len);
3571        }
3572
3573        /* Check if VLAN can be inserted by HW */
3574        has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3575
3576        first_entry = tx_q->cur_tx;
3577        WARN_ON(tx_q->tx_skbuff[first_entry]);
3578
3579        if (tx_q->tbs & STMMAC_TBS_AVAIL)
3580                desc = &tx_q->dma_entx[first_entry].basic;
3581        else
3582                desc = &tx_q->dma_tx[first_entry];
3583        first = desc;
3584
3585        if (has_vlan)
3586                stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3587
3588        /* first descriptor: fill Headers on Buf1 */
3589        des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
3590                             DMA_TO_DEVICE);
3591        if (dma_mapping_error(priv->device, des))
3592                goto dma_map_err;
3593
3594        tx_q->tx_skbuff_dma[first_entry].buf = des;
3595        tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
3596
3597        if (priv->dma_cap.addr64 <= 32) {
3598                first->des0 = cpu_to_le32(des);
3599
3600                /* Fill start of payload in buff2 of first descriptor */
3601                if (pay_len)
3602                        first->des1 = cpu_to_le32(des + proto_hdr_len);
3603
3604                /* If needed take extra descriptors to fill the remaining payload */
3605                tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
3606        } else {
3607                stmmac_set_desc_addr(priv, first, des);
3608                tmp_pay_len = pay_len;
3609                des += proto_hdr_len;
3610                pay_len = 0;
3611        }
3612
3613        stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
3614
3615        /* Prepare fragments */
3616        for (i = 0; i < nfrags; i++) {
3617                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3618
3619                des = skb_frag_dma_map(priv->device, frag, 0,
3620                                       skb_frag_size(frag),
3621                                       DMA_TO_DEVICE);
3622                if (dma_mapping_error(priv->device, des))
3623                        goto dma_map_err;
3624
3625                stmmac_tso_allocator(priv, des, skb_frag_size(frag),
3626                                     (i == nfrags - 1), queue);
3627
3628                tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
3629                tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
3630                tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
3631        }
3632
3633        tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
3634
3635        /* Only the last descriptor gets to point to the skb. */
3636        tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3637
3638        /* Manage tx mitigation */
3639        tx_packets = (tx_q->cur_tx + 1) - first_tx;
3640        tx_q->tx_count_frames += tx_packets;
3641
3642        if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3643                set_ic = true;
3644        else if (!priv->tx_coal_frames[queue])
3645                set_ic = false;
3646        else if (tx_packets > priv->tx_coal_frames[queue])
3647                set_ic = true;
3648        else if ((tx_q->tx_count_frames %
3649                  priv->tx_coal_frames[queue]) < tx_packets)
3650                set_ic = true;
3651        else
3652                set_ic = false;
3653
3654        if (set_ic) {
3655                if (tx_q->tbs & STMMAC_TBS_AVAIL)
3656                        desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3657                else
3658                        desc = &tx_q->dma_tx[tx_q->cur_tx];
3659
3660                tx_q->tx_count_frames = 0;
3661                stmmac_set_tx_ic(priv, desc);
3662                priv->xstats.tx_set_ic_bit++;
3663        }
3664
3665        /* We've used all descriptors we need for this skb, however,
3666         * advance cur_tx so that it references a fresh descriptor.
3667         * ndo_start_xmit will fill this descriptor the next time it's
3668         * called and stmmac_tx_clean may clean up to this descriptor.
3669         */
3670        tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
3671
3672        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3673                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3674                          __func__);
3675                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3676        }
3677
3678        dev->stats.tx_bytes += skb->len;
3679        priv->xstats.tx_tso_frames++;
3680        priv->xstats.tx_tso_nfrags += nfrags;
3681
3682        if (priv->sarc_type)
3683                stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3684
3685        skb_tx_timestamp(skb);
3686
3687        if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3688                     priv->hwts_tx_en)) {
3689                /* declare that device is doing timestamping */
3690                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3691                stmmac_enable_tx_timestamp(priv, first);
3692        }
3693
3694        /* Complete the first descriptor before granting the DMA */
3695        stmmac_prepare_tso_tx_desc(priv, first, 1,
3696                        proto_hdr_len,
3697                        pay_len,
3698                        1, tx_q->tx_skbuff_dma[first_entry].last_segment,
3699                        hdr / 4, (skb->len - proto_hdr_len));
3700
3701        /* If context desc is used to change MSS */
3702        if (mss_desc) {
3703                /* Make sure that first descriptor has been completely
3704                 * written, including its own bit. This is because MSS is
3705                 * actually before first descriptor, so we need to make
3706                 * sure that MSS's own bit is the last thing written.
3707                 */
3708                dma_wmb();
3709                stmmac_set_tx_owner(priv, mss_desc);
3710        }
3711
3712        if (netif_msg_pktdata(priv)) {
3713                pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
3714                        __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3715                        tx_q->cur_tx, first, nfrags);
3716                pr_info(">>> frame to be transmitted: ");
3717                print_pkt(skb->data, skb_headlen(skb));
3718        }
3719
3720        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3721
3722        stmmac_flush_tx_descriptors(priv, queue);
3723        stmmac_tx_timer_arm(priv, queue);
3724
3725        return NETDEV_TX_OK;
3726
3727dma_map_err:
3728        dev_err(priv->device, "Tx dma map failed\n");
3729        dev_kfree_skb(skb);
3730        priv->dev->stats.tx_dropped++;
3731        return NETDEV_TX_OK;
3732}
3733
3734/**
3735 *  stmmac_xmit - Tx entry point of the driver
3736 *  @skb : the socket buffer
3737 *  @dev : device pointer
3738 *  Description : this is the tx entry point of the driver.
3739 *  It programs the chain or the ring and supports oversized frames
3740 *  and SG feature.
3741 */
3742static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3743{
3744        unsigned int first_entry, tx_packets, enh_desc;
3745        struct stmmac_priv *priv = netdev_priv(dev);
3746        unsigned int nopaged_len = skb_headlen(skb);
3747        int i, csum_insertion = 0, is_jumbo = 0;
3748        u32 queue = skb_get_queue_mapping(skb);
3749        int nfrags = skb_shinfo(skb)->nr_frags;
3750        int gso = skb_shinfo(skb)->gso_type;
3751        struct dma_edesc *tbs_desc = NULL;
3752        struct dma_desc *desc, *first;
3753        struct stmmac_tx_queue *tx_q;
3754        bool has_vlan, set_ic;
3755        int entry, first_tx;
3756        dma_addr_t des;
3757
3758        tx_q = &priv->tx_queue[queue];
3759        first_tx = tx_q->cur_tx;
3760
3761        if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
3762                stmmac_disable_eee_mode(priv);
3763
3764        /* Manage oversized TCP frames for GMAC4 device */
3765        if (skb_is_gso(skb) && priv->tso) {
3766                if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3767                        return stmmac_tso_xmit(skb, dev);
3768                if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
3769                        return stmmac_tso_xmit(skb, dev);
3770        }
3771
3772        if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3773                if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3774                        netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3775                                                                queue));
3776                        /* This is a hard error, log it. */
3777                        netdev_err(priv->dev,
3778                                   "%s: Tx Ring full when queue awake\n",
3779                                   __func__);
3780                }
3781                return NETDEV_TX_BUSY;
3782        }
3783
3784        /* Check if VLAN can be inserted by HW */
3785        has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
3786
3787        entry = tx_q->cur_tx;
3788        first_entry = entry;
3789        WARN_ON(tx_q->tx_skbuff[first_entry]);
3790
3791        csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3792
3793        if (likely(priv->extend_desc))
3794                desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3795        else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3796                desc = &tx_q->dma_entx[entry].basic;
3797        else
3798                desc = tx_q->dma_tx + entry;
3799
3800        first = desc;
3801
3802        if (has_vlan)
3803                stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
3804
3805        enh_desc = priv->plat->enh_desc;
3806        /* To program the descriptors according to the size of the frame */
3807        if (enh_desc)
3808                is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
3809
3810        if (unlikely(is_jumbo)) {
3811                entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
3812                if (unlikely(entry < 0) && (entry != -EINVAL))
3813                        goto dma_map_err;
3814        }
3815
3816        for (i = 0; i < nfrags; i++) {
3817                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3818                int len = skb_frag_size(frag);
3819                bool last_segment = (i == (nfrags - 1));
3820
3821                entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3822                WARN_ON(tx_q->tx_skbuff[entry]);
3823
3824                if (likely(priv->extend_desc))
3825                        desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3826                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3827                        desc = &tx_q->dma_entx[entry].basic;
3828                else
3829                        desc = tx_q->dma_tx + entry;
3830
3831                des = skb_frag_dma_map(priv->device, frag, 0, len,
3832                                       DMA_TO_DEVICE);
3833                if (dma_mapping_error(priv->device, des))
3834                        goto dma_map_err; /* should reuse desc w/o issues */
3835
3836                tx_q->tx_skbuff_dma[entry].buf = des;
3837
3838                stmmac_set_desc_addr(priv, desc, des);
3839
3840                tx_q->tx_skbuff_dma[entry].map_as_page = true;
3841                tx_q->tx_skbuff_dma[entry].len = len;
3842                tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3843
3844                /* Prepare the descriptor and set the own bit too */
3845                stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3846                                priv->mode, 1, last_segment, skb->len);
3847        }
3848
3849        /* Only the last descriptor gets to point to the skb. */
3850        tx_q->tx_skbuff[entry] = skb;
3851
3852        /* According to the coalesce parameter the IC bit for the latest
3853         * segment is reset and the timer re-started to clean the tx status.
3854         * This approach takes care about the fragments: desc is the first
3855         * element in case of no SG.
3856         */
3857        tx_packets = (entry + 1) - first_tx;
3858        tx_q->tx_count_frames += tx_packets;
3859
3860        if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
3861                set_ic = true;
3862        else if (!priv->tx_coal_frames[queue])
3863                set_ic = false;
3864        else if (tx_packets > priv->tx_coal_frames[queue])
3865                set_ic = true;
3866        else if ((tx_q->tx_count_frames %
3867                  priv->tx_coal_frames[queue]) < tx_packets)
3868                set_ic = true;
3869        else
3870                set_ic = false;
3871
3872        if (set_ic) {
3873                if (likely(priv->extend_desc))
3874                        desc = &tx_q->dma_etx[entry].basic;
3875                else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3876                        desc = &tx_q->dma_entx[entry].basic;
3877                else
3878                        desc = &tx_q->dma_tx[entry];
3879
3880                tx_q->tx_count_frames = 0;
3881                stmmac_set_tx_ic(priv, desc);
3882                priv->xstats.tx_set_ic_bit++;
3883        }
3884
3885        /* We've used all descriptors we need for this skb, however,
3886         * advance cur_tx so that it references a fresh descriptor.
3887         * ndo_start_xmit will fill this descriptor the next time it's
3888         * called and stmmac_tx_clean may clean up to this descriptor.
3889         */
3890        entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
3891        tx_q->cur_tx = entry;
3892
3893        if (netif_msg_pktdata(priv)) {
3894                netdev_dbg(priv->dev,
3895                           "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3896                           __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3897                           entry, first, nfrags);
3898
3899                netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3900                print_pkt(skb->data, skb->len);
3901        }
3902
3903        if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3904                netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3905                          __func__);
3906                netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3907        }
3908
3909        dev->stats.tx_bytes += skb->len;
3910
3911        if (priv->sarc_type)
3912                stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3913
3914        skb_tx_timestamp(skb);
3915
3916        /* Ready to fill the first descriptor and set the OWN bit w/o any
3917         * problems because all the descriptors are actually ready to be
3918         * passed to the DMA engine.
3919         */
3920        if (likely(!is_jumbo)) {
3921                bool last_segment = (nfrags == 0);
3922
3923                des = dma_map_single(priv->device, skb->data,
3924                                     nopaged_len, DMA_TO_DEVICE);
3925                if (dma_mapping_error(priv->device, des))
3926                        goto dma_map_err;
3927
3928                tx_q->tx_skbuff_dma[first_entry].buf = des;
3929
3930                stmmac_set_desc_addr(priv, first, des);
3931
3932                tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3933                tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3934
3935                if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3936                             priv->hwts_tx_en)) {
3937                        /* declare that device is doing timestamping */
3938                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3939                        stmmac_enable_tx_timestamp(priv, first);
3940                }
3941
3942                /* Prepare the first descriptor setting the OWN bit too */
3943                stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3944                                csum_insertion, priv->mode, 0, last_segment,
3945                                skb->len);
3946        }
3947
3948        if (tx_q->tbs & STMMAC_TBS_EN) {
3949                struct timespec64 ts = ns_to_timespec64(skb->tstamp);
3950
3951                tbs_desc = &tx_q->dma_entx[first_entry];
3952                stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
3953        }
3954
3955        stmmac_set_tx_owner(priv, first);
3956
3957        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3958
3959        stmmac_enable_dma_transmission(priv, priv->ioaddr);
3960
3961        stmmac_flush_tx_descriptors(priv, queue);
3962        stmmac_tx_timer_arm(priv, queue);
3963
3964        return NETDEV_TX_OK;
3965
3966dma_map_err:
3967        netdev_err(priv->dev, "Tx DMA map failed\n");
3968        dev_kfree_skb(skb);
3969        priv->dev->stats.tx_dropped++;
3970        return NETDEV_TX_OK;
3971}
3972
3973static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3974{
3975        struct vlan_ethhdr *veth;
3976        __be16 vlan_proto;
3977        u16 vlanid;
3978
3979        veth = (struct vlan_ethhdr *)skb->data;
3980        vlan_proto = veth->h_vlan_proto;
3981
3982        if ((vlan_proto == htons(ETH_P_8021Q) &&
3983             dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3984            (vlan_proto == htons(ETH_P_8021AD) &&
3985             dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
3986                /* pop the vlan tag */
3987                vlanid = ntohs(veth->h_vlan_TCI);
3988                memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
3989                skb_pull(skb, VLAN_HLEN);
3990                __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
3991        }
3992}
3993
3994/**
3995 * stmmac_rx_refill - refill used skb preallocated buffers
3996 * @priv: driver private structure
3997 * @queue: RX queue index
3998 * Description : this is to reallocate the skb for the reception process
3999 * that is based on zero-copy.
4000 */
4001static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4002{
4003        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4004        int len, dirty = stmmac_rx_dirty(priv, queue);
4005        unsigned int entry = rx_q->dirty_rx;
4006        gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4007
4008        if (priv->dma_cap.addr64 <= 32)
4009                gfp |= GFP_DMA32;
4010
4011        len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
4012
4013        while (dirty-- > 0) {
4014                struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4015                struct dma_desc *p;
4016                bool use_rx_wd;
4017
4018                if (priv->extend_desc)
4019                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
4020                else
4021                        p = rx_q->dma_rx + entry;
4022
4023                if (!buf->page) {
4024                        buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4025                        if (!buf->page)
4026                                break;
4027                }
4028
4029                if (priv->sph && !buf->sec_page) {
4030                        buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4031                        if (!buf->sec_page)
4032                                break;
4033
4034                        buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4035
4036                        dma_sync_single_for_device(priv->device, buf->sec_addr,
4037                                                   len, DMA_FROM_DEVICE);
4038                }
4039
4040                buf->addr = page_pool_get_dma_addr(buf->page);
4041
4042                /* Sync whole allocation to device. This will invalidate old
4043                 * data.
4044                 */
4045                dma_sync_single_for_device(priv->device, buf->addr, len,
4046                                           DMA_FROM_DEVICE);
4047
4048                stmmac_set_desc_addr(priv, p, buf->addr);
4049                if (priv->sph)
4050                        stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4051                else
4052                        stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4053                stmmac_refill_desc3(priv, rx_q, p);
4054
4055                rx_q->rx_count_frames++;
4056                rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4057                if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4058                        rx_q->rx_count_frames = 0;
4059
4060                use_rx_wd = !priv->rx_coal_frames[queue];
4061                use_rx_wd |= rx_q->rx_count_frames > 0;
4062                if (!priv->use_riwt)
4063                        use_rx_wd = false;
4064
4065                dma_wmb();
4066                stmmac_set_rx_owner(priv, p, use_rx_wd);
4067
4068                entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4069        }
4070        rx_q->dirty_rx = entry;
4071        rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4072                            (rx_q->dirty_rx * sizeof(struct dma_desc));
4073        stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4074}
4075
4076static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4077                                       struct dma_desc *p,
4078                                       int status, unsigned int len)
4079{
4080        unsigned int plen = 0, hlen = 0;
4081        int coe = priv->hw->rx_csum;
4082
4083        /* Not first descriptor, buffer is always zero */
4084        if (priv->sph && len)
4085                return 0;
4086
4087        /* First descriptor, get split header length */
4088        stmmac_get_rx_header_len(priv, p, &hlen);
4089        if (priv->sph && hlen) {
4090                priv->xstats.rx_split_hdr_pkt_n++;
4091                return hlen;
4092        }
4093
4094        /* First descriptor, not last descriptor and not split header */
4095        if (status & rx_not_ls)
4096                return priv->dma_buf_sz;
4097
4098        plen = stmmac_get_rx_frame_len(priv, p, coe);
4099
4100        /* First descriptor and last descriptor and not split header */
4101        return min_t(unsigned int, priv->dma_buf_sz, plen);
4102}
4103
4104static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4105                                       struct dma_desc *p,
4106                                       int status, unsigned int len)
4107{
4108        int coe = priv->hw->rx_csum;
4109        unsigned int plen = 0;
4110
4111        /* Not split header, buffer is not available */
4112        if (!priv->sph)
4113                return 0;
4114
4115        /* Not last descriptor */
4116        if (status & rx_not_ls)
4117                return priv->dma_buf_sz;
4118
4119        plen = stmmac_get_rx_frame_len(priv, p, coe);
4120
4121        /* Last descriptor */
4122        return plen - len;
4123}
4124
4125/**
4126 * stmmac_rx - manage the receive process
4127 * @priv: driver private structure
4128 * @limit: napi bugget
4129 * @queue: RX queue index.
4130 * Description :  this the function called by the napi poll method.
4131 * It gets all the frames inside the ring.
4132 */
4133static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
4134{
4135        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4136        struct stmmac_channel *ch = &priv->channel[queue];
4137        unsigned int count = 0, error = 0, len = 0;
4138        int status = 0, coe = priv->hw->rx_csum;
4139        unsigned int next_entry = rx_q->cur_rx;
4140        unsigned int desc_size;
4141        struct sk_buff *skb = NULL;
4142
4143        if (netif_msg_rx_status(priv)) {
4144                void *rx_head;
4145
4146                netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4147                if (priv->extend_desc) {
4148                        rx_head = (void *)rx_q->dma_erx;
4149                        desc_size = sizeof(struct dma_extended_desc);
4150                } else {
4151                        rx_head = (void *)rx_q->dma_rx;
4152                        desc_size = sizeof(struct dma_desc);
4153                }
4154
4155                stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4156                                    rx_q->dma_rx_phy, desc_size);
4157        }
4158        while (count < limit) {
4159                unsigned int buf1_len = 0, buf2_len = 0;
4160                enum pkt_hash_types hash_type;
4161                struct stmmac_rx_buffer *buf;
4162                struct dma_desc *np, *p;
4163                int entry;
4164                u32 hash;
4165
4166                if (!count && rx_q->state_saved) {
4167                        skb = rx_q->state.skb;
4168                        error = rx_q->state.error;
4169                        len = rx_q->state.len;
4170                } else {
4171                        rx_q->state_saved = false;
4172                        skb = NULL;
4173                        error = 0;
4174                        len = 0;
4175                }
4176
4177                if (count >= limit)
4178                        break;
4179
4180read_again:
4181                buf1_len = 0;
4182                buf2_len = 0;
4183                entry = next_entry;
4184                buf = &rx_q->buf_pool[entry];
4185
4186                if (priv->extend_desc)
4187                        p = (struct dma_desc *)(rx_q->dma_erx + entry);
4188                else
4189                        p = rx_q->dma_rx + entry;
4190
4191                /* read the status of the incoming frame */
4192                status = stmmac_rx_status(priv, &priv->dev->stats,
4193                                &priv->xstats, p);
4194                /* check if managed by the DMA otherwise go ahead */
4195                if (unlikely(status & dma_own))
4196                        break;
4197
4198                rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4199                                                priv->dma_rx_size);
4200                next_entry = rx_q->cur_rx;
4201
4202                if (priv->extend_desc)
4203                        np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4204                else
4205                        np = rx_q->dma_rx + next_entry;
4206
4207                prefetch(np);
4208
4209                if (priv->extend_desc)
4210                        stmmac_rx_extended_status(priv, &priv->dev->stats,
4211                                        &priv->xstats, rx_q->dma_erx + entry);
4212                if (unlikely(status == discard_frame)) {
4213                        page_pool_recycle_direct(rx_q->page_pool, buf->page);
4214                        buf->page = NULL;
4215                        error = 1;
4216                        if (!priv->hwts_rx_en)
4217                                priv->dev->stats.rx_errors++;
4218                }
4219
4220                if (unlikely(error && (status & rx_not_ls)))
4221                        goto read_again;
4222                if (unlikely(error)) {
4223                        dev_kfree_skb(skb);
4224                        skb = NULL;
4225                        count++;
4226                        continue;
4227                }
4228
4229                /* Buffer is good. Go on. */
4230
4231                prefetch(page_address(buf->page));
4232                if (buf->sec_page)
4233                        prefetch(page_address(buf->sec_page));
4234
4235                buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
4236                len += buf1_len;
4237                buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
4238                len += buf2_len;
4239
4240                /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
4241                 * Type frames (LLC/LLC-SNAP)
4242                 *
4243                 * llc_snap is never checked in GMAC >= 4, so this ACS
4244                 * feature is always disabled and packets need to be
4245                 * stripped manually.
4246                 */
4247                if (likely(!(status & rx_not_ls)) &&
4248                    (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
4249                     unlikely(status != llc_snap))) {
4250                        if (buf2_len)
4251                                buf2_len -= ETH_FCS_LEN;
4252                        else
4253                                buf1_len -= ETH_FCS_LEN;
4254
4255                        len -= ETH_FCS_LEN;
4256                }
4257
4258                if (!skb) {
4259                        skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
4260                        if (!skb) {
4261                                priv->dev->stats.rx_dropped++;
4262                                count++;
4263                                goto drain_data;
4264                        }
4265
4266                        dma_sync_single_for_cpu(priv->device, buf->addr,
4267                                                buf1_len, DMA_FROM_DEVICE);
4268                        skb_copy_to_linear_data(skb, page_address(buf->page),
4269                                                buf1_len);
4270                        skb_put(skb, buf1_len);
4271
4272                        /* Data payload copied into SKB, page ready for recycle */
4273                        page_pool_recycle_direct(rx_q->page_pool, buf->page);
4274                        buf->page = NULL;
4275                } else if (buf1_len) {
4276                        dma_sync_single_for_cpu(priv->device, buf->addr,
4277                                                buf1_len, DMA_FROM_DEVICE);
4278                        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
4279                                        buf->page, 0, buf1_len,
4280                                        priv->dma_buf_sz);
4281
4282                        /* Data payload appended into SKB */
4283                        page_pool_release_page(rx_q->page_pool, buf->page);
4284                        buf->page = NULL;
4285                }
4286
4287                if (buf2_len) {
4288                        dma_sync_single_for_cpu(priv->device, buf->sec_addr,
4289                                                buf2_len, DMA_FROM_DEVICE);
4290                        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
4291                                        buf->sec_page, 0, buf2_len,
4292                                        priv->dma_buf_sz);
4293
4294                        /* Data payload appended into SKB */
4295                        page_pool_release_page(rx_q->page_pool, buf->sec_page);
4296                        buf->sec_page = NULL;
4297                }
4298
4299drain_data:
4300                if (likely(status & rx_not_ls))
4301                        goto read_again;
4302                if (!skb)
4303                        continue;
4304
4305                /* Got entire packet into SKB. Finish it. */
4306
4307                stmmac_get_rx_hwtstamp(priv, p, np, skb);
4308                stmmac_rx_vlan(priv->dev, skb);
4309                skb->protocol = eth_type_trans(skb, priv->dev);
4310
4311                if (unlikely(!coe))
4312                        skb_checksum_none_assert(skb);
4313                else
4314                        skb->ip_summed = CHECKSUM_UNNECESSARY;
4315
4316                if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4317                        skb_set_hash(skb, hash, hash_type);
4318
4319                skb_record_rx_queue(skb, queue);
4320                napi_gro_receive(&ch->rx_napi, skb);
4321                skb = NULL;
4322
4323                priv->dev->stats.rx_packets++;
4324                priv->dev->stats.rx_bytes += len;
4325                count++;
4326        }
4327
4328        if (status & rx_not_ls || skb) {
4329                rx_q->state_saved = true;
4330                rx_q->state.skb = skb;
4331                rx_q->state.error = error;
4332                rx_q->state.len = len;
4333        }
4334
4335        stmmac_rx_refill(priv, queue);
4336
4337        priv->xstats.rx_pkt_n += count;
4338
4339        return count;
4340}
4341
4342static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
4343{
4344        struct stmmac_channel *ch =
4345                container_of(napi, struct stmmac_channel, rx_napi);
4346        struct stmmac_priv *priv = ch->priv_data;
4347        u32 chan = ch->index;
4348        int work_done;
4349
4350        priv->xstats.napi_poll++;
4351
4352        work_done = stmmac_rx(priv, budget, chan);
4353        if (work_done < budget && napi_complete_done(napi, work_done)) {
4354                unsigned long flags;
4355
4356                spin_lock_irqsave(&ch->lock, flags);
4357                stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
4358                spin_unlock_irqrestore(&ch->lock, flags);
4359        }
4360
4361        return work_done;
4362}
4363
4364static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
4365{
4366        struct stmmac_channel *ch =
4367                container_of(napi, struct stmmac_channel, tx_napi);
4368        struct stmmac_priv *priv = ch->priv_data;
4369        u32 chan = ch->index;
4370        int work_done;
4371
4372        priv->xstats.napi_poll++;
4373
4374        work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
4375        work_done = min(work_done, budget);
4376
4377        if (work_done < budget && napi_complete_done(napi, work_done)) {
4378                unsigned long flags;
4379
4380                spin_lock_irqsave(&ch->lock, flags);
4381                stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
4382                spin_unlock_irqrestore(&ch->lock, flags);
4383        }
4384
4385        return work_done;
4386}
4387
4388/**
4389 *  stmmac_tx_timeout
4390 *  @dev : Pointer to net device structure
4391 *  Description: this function is called when a packet transmission fails to
4392 *   complete within a reasonable time. The driver will mark the error in the
4393 *   netdev structure and arrange for the device to be reset to a sane state
4394 *   in order to transmit a new packet.
4395 */
4396static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
4397{
4398        struct stmmac_priv *priv = netdev_priv(dev);
4399
4400        stmmac_global_err(priv);
4401}
4402
4403/**
4404 *  stmmac_set_rx_mode - entry point for multicast addressing
4405 *  @dev : pointer to the device structure
4406 *  Description:
4407 *  This function is a driver entry point which gets called by the kernel
4408 *  whenever multicast addresses must be enabled/disabled.
4409 *  Return value:
4410 *  void.
4411 */
4412static void stmmac_set_rx_mode(struct net_device *dev)
4413{
4414        struct stmmac_priv *priv = netdev_priv(dev);
4415
4416        stmmac_set_filter(priv, priv->hw, dev);
4417}
4418
4419/**
4420 *  stmmac_change_mtu - entry point to change MTU size for the device.
4421 *  @dev : device pointer.
4422 *  @new_mtu : the new MTU size for the device.
4423 *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
4424 *  to drive packet transmission. Ethernet has an MTU of 1500 octets
4425 *  (ETH_DATA_LEN). This value can be changed with ifconfig.
4426 *  Return value:
4427 *  0 on success and an appropriate (-)ve integer as defined in errno.h
4428 *  file on failure.
4429 */
4430static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
4431{
4432        struct stmmac_priv *priv = netdev_priv(dev);
4433        int txfifosz = priv->plat->tx_fifo_size;
4434
4435        if (txfifosz == 0)
4436                txfifosz = priv->dma_cap.tx_fifo_size;
4437
4438        txfifosz /= priv->plat->tx_queues_to_use;
4439
4440        if (netif_running(dev)) {
4441                netdev_err(priv->dev, "must be stopped to change its MTU\n");
4442                return -EBUSY;
4443        }
4444
4445        new_mtu = STMMAC_ALIGN(new_mtu);
4446
4447        /* If condition true, FIFO is too small or MTU too large */
4448        if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
4449                return -EINVAL;
4450
4451        dev->mtu = new_mtu;
4452
4453        netdev_update_features(dev);
4454
4455        return 0;
4456}
4457
4458static netdev_features_t stmmac_fix_features(struct net_device *dev,
4459                                             netdev_features_t features)
4460{
4461        struct stmmac_priv *priv = netdev_priv(dev);
4462
4463        if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
4464                features &= ~NETIF_F_RXCSUM;
4465
4466        if (!priv->plat->tx_coe)
4467                features &= ~NETIF_F_CSUM_MASK;
4468
4469        /* Some GMAC devices have a bugged Jumbo frame support that
4470         * needs to have the Tx COE disabled for oversized frames
4471         * (due to limited buffer sizes). In this case we disable
4472         * the TX csum insertion in the TDES and not use SF.
4473         */
4474        if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
4475                features &= ~NETIF_F_CSUM_MASK;
4476
4477        /* Disable tso if asked by ethtool */
4478        if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4479                if (features & NETIF_F_TSO)
4480                        priv->tso = true;
4481                else
4482                        priv->tso = false;
4483        }
4484
4485        return features;
4486}
4487
4488static int stmmac_set_features(struct net_device *netdev,
4489                               netdev_features_t features)
4490{
4491        struct stmmac_priv *priv = netdev_priv(netdev);
4492        bool sph_en;
4493        u32 chan;
4494
4495        /* Keep the COE Type in case of csum is supporting */
4496        if (features & NETIF_F_RXCSUM)
4497                priv->hw->rx_csum = priv->plat->rx_coe;
4498        else
4499                priv->hw->rx_csum = 0;
4500        /* No check needed because rx_coe has been set before and it will be
4501         * fixed in case of issue.
4502         */
4503        stmmac_rx_ipc(priv, priv->hw);
4504
4505        sph_en = (priv->hw->rx_csum > 0) && priv->sph;
4506        for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
4507                stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
4508
4509        return 0;
4510}
4511
4512static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
4513{
4514        struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
4515        enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
4516        enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
4517        bool *hs_enable = &fpe_cfg->hs_enable;
4518
4519        if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
4520                return;
4521
4522        /* If LP has sent verify mPacket, LP is FPE capable */
4523        if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
4524                if (*lp_state < FPE_STATE_CAPABLE)
4525                        *lp_state = FPE_STATE_CAPABLE;
4526
4527                /* If user has requested FPE enable, quickly response */
4528                if (*hs_enable)
4529                        stmmac_fpe_send_mpacket(priv, priv->ioaddr,
4530                                                MPACKET_RESPONSE);
4531        }
4532
4533        /* If Local has sent verify mPacket, Local is FPE capable */
4534        if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
4535                if (*lo_state < FPE_STATE_CAPABLE)
4536                        *lo_state = FPE_STATE_CAPABLE;
4537        }
4538
4539        /* If LP has sent response mPacket, LP is entering FPE ON */
4540        if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
4541                *lp_state = FPE_STATE_ENTERING_ON;
4542
4543        /* If Local has sent response mPacket, Local is entering FPE ON */
4544        if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
4545                *lo_state = FPE_STATE_ENTERING_ON;
4546
4547        if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
4548            !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
4549            priv->fpe_wq) {
4550                queue_work(priv->fpe_wq, &priv->fpe_task);
4551        }
4552}
4553
4554static void stmmac_common_interrupt(struct stmmac_priv *priv)
4555{
4556        u32 rx_cnt = priv->plat->rx_queues_to_use;
4557        u32 tx_cnt = priv->plat->tx_queues_to_use;
4558        u32 queues_count;
4559        u32 queue;
4560        bool xmac;
4561
4562        xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
4563        queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
4564
4565        if (priv->irq_wake)
4566                pm_wakeup_event(priv->device, 0);
4567
4568        if (priv->dma_cap.estsel)
4569                stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
4570                                      &priv->xstats, tx_cnt);
4571
4572        if (priv->dma_cap.fpesel) {
4573                int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
4574                                                   priv->dev);
4575
4576                stmmac_fpe_event_status(priv, status);
4577        }
4578
4579        /* To handle GMAC own interrupts */
4580        if ((priv->plat->has_gmac) || xmac) {
4581                int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
4582
4583                if (unlikely(status)) {
4584                        /* For LPI we need to save the tx status */
4585                        if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
4586                                priv->tx_path_in_lpi_mode = true;
4587                        if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
4588                                priv->tx_path_in_lpi_mode = false;
4589                }
4590
4591                for (queue = 0; queue < queues_count; queue++) {
4592                        status = stmmac_host_mtl_irq_status(priv, priv->hw,
4593                                                            queue);
4594                }
4595
4596                /* PCS link status */
4597                if (priv->hw->pcs) {
4598                        if (priv->xstats.pcs_link)
4599                                netif_carrier_on(priv->dev);
4600                        else
4601                                netif_carrier_off(priv->dev);
4602                }
4603
4604                stmmac_timestamp_interrupt(priv, priv);
4605        }
4606}
4607
4608/**
4609 *  stmmac_interrupt - main ISR
4610 *  @irq: interrupt number.
4611 *  @dev_id: to pass the net device pointer.
4612 *  Description: this is the main driver interrupt service routine.
4613 *  It can call:
4614 *  o DMA service routine (to manage incoming frame reception and transmission
4615 *    status)
4616 *  o Core interrupts to manage: remote wake-up, management counter, LPI
4617 *    interrupts.
4618 */
4619static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
4620{
4621        struct net_device *dev = (struct net_device *)dev_id;
4622        struct stmmac_priv *priv = netdev_priv(dev);
4623
4624        /* Check if adapter is up */
4625        if (test_bit(STMMAC_DOWN, &priv->state))
4626                return IRQ_HANDLED;
4627
4628        /* Check if a fatal error happened */
4629        if (stmmac_safety_feat_interrupt(priv))
4630                return IRQ_HANDLED;
4631
4632        /* To handle Common interrupts */
4633        stmmac_common_interrupt(priv);
4634
4635        /* To handle DMA interrupts */
4636        stmmac_dma_interrupt(priv);
4637
4638        return IRQ_HANDLED;
4639}
4640
4641static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
4642{
4643        struct net_device *dev = (struct net_device *)dev_id;
4644        struct stmmac_priv *priv = netdev_priv(dev);
4645
4646        if (unlikely(!dev)) {
4647                netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
4648                return IRQ_NONE;
4649        }
4650
4651        /* Check if adapter is up */
4652        if (test_bit(STMMAC_DOWN, &priv->state))
4653                return IRQ_HANDLED;
4654
4655        /* To handle Common interrupts */
4656        stmmac_common_interrupt(priv);
4657
4658        return IRQ_HANDLED;
4659}
4660
4661static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
4662{
4663        struct net_device *dev = (struct net_device *)dev_id;
4664        struct stmmac_priv *priv = netdev_priv(dev);
4665
4666        if (unlikely(!dev)) {
4667                netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
4668                return IRQ_NONE;
4669        }
4670
4671        /* Check if adapter is up */
4672        if (test_bit(STMMAC_DOWN, &priv->state))
4673                return IRQ_HANDLED;
4674
4675        /* Check if a fatal error happened */
4676        stmmac_safety_feat_interrupt(priv);
4677
4678        return IRQ_HANDLED;
4679}
4680
4681static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
4682{
4683        struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
4684        int chan = tx_q->queue_index;
4685        struct stmmac_priv *priv;
4686        int status;
4687
4688        priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
4689
4690        if (unlikely(!data)) {
4691                netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
4692                return IRQ_NONE;
4693        }
4694
4695        /* Check if adapter is up */
4696        if (test_bit(STMMAC_DOWN, &priv->state))
4697                return IRQ_HANDLED;
4698
4699        status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
4700
4701        if (unlikely(status & tx_hard_error_bump_tc)) {
4702                /* Try to bump up the dma threshold on this failure */
4703                if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
4704                    tc <= 256) {
4705                        tc += 64;
4706                        if (priv->plat->force_thresh_dma_mode)
4707                                stmmac_set_dma_operation_mode(priv,
4708                                                              tc,
4709                                                              tc,
4710                                                              chan);
4711                        else
4712                                stmmac_set_dma_operation_mode(priv,
4713                                                              tc,
4714                                                              SF_DMA_MODE,
4715                                                              chan);
4716                        priv->xstats.threshold = tc;
4717                }
4718        } else if (unlikely(status == tx_hard_error)) {
4719                stmmac_tx_err(priv, chan);
4720        }
4721
4722        return IRQ_HANDLED;
4723}
4724
4725static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
4726{
4727        struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
4728        int chan = rx_q->queue_index;
4729        struct stmmac_priv *priv;
4730
4731        priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
4732
4733        if (unlikely(!data)) {
4734                netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
4735                return IRQ_NONE;
4736        }
4737
4738        /* Check if adapter is up */
4739        if (test_bit(STMMAC_DOWN, &priv->state))
4740                return IRQ_HANDLED;
4741
4742        stmmac_napi_check(priv, chan, DMA_DIR_RX);
4743
4744        return IRQ_HANDLED;
4745}
4746
4747#ifdef CONFIG_NET_POLL_CONTROLLER
4748/* Polling receive - used by NETCONSOLE and other diagnostic tools
4749 * to allow network I/O with interrupts disabled.
4750 */
4751static void stmmac_poll_controller(struct net_device *dev)
4752{
4753        struct stmmac_priv *priv = netdev_priv(dev);
4754        int i;
4755
4756        /* If adapter is down, do nothing */
4757        if (test_bit(STMMAC_DOWN, &priv->state))
4758                return;
4759
4760        if (priv->plat->multi_msi_en) {
4761                for (i = 0; i < priv->plat->rx_queues_to_use; i++)
4762                        stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
4763
4764                for (i = 0; i < priv->plat->tx_queues_to_use; i++)
4765                        stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
4766        } else {
4767                disable_irq(dev->irq);
4768                stmmac_interrupt(dev->irq, dev);
4769                enable_irq(dev->irq);
4770        }
4771}
4772#endif
4773
4774/**
4775 *  stmmac_ioctl - Entry point for the Ioctl
4776 *  @dev: Device pointer.
4777 *  @rq: An IOCTL specefic structure, that can contain a pointer to
4778 *  a proprietary structure used to pass information to the driver.
4779 *  @cmd: IOCTL command
4780 *  Description:
4781 *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
4782 */
4783static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4784{
4785        struct stmmac_priv *priv = netdev_priv (dev);
4786        int ret = -EOPNOTSUPP;
4787
4788        if (!netif_running(dev))
4789                return -EINVAL;
4790
4791        switch (cmd) {
4792        case SIOCGMIIPHY:
4793        case SIOCGMIIREG:
4794        case SIOCSMIIREG:
4795                ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
4796                break;
4797        case SIOCSHWTSTAMP:
4798                ret = stmmac_hwtstamp_set(dev, rq);
4799                break;
4800        case SIOCGHWTSTAMP:
4801                ret = stmmac_hwtstamp_get(dev, rq);
4802                break;
4803        default:
4804                break;
4805        }
4806
4807        return ret;
4808}
4809
4810static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4811                                    void *cb_priv)
4812{
4813        struct stmmac_priv *priv = cb_priv;
4814        int ret = -EOPNOTSUPP;
4815
4816        if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
4817                return ret;
4818
4819        stmmac_disable_all_queues(priv);
4820
4821        switch (type) {
4822        case TC_SETUP_CLSU32:
4823                ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
4824                break;
4825        case TC_SETUP_CLSFLOWER:
4826                ret = stmmac_tc_setup_cls(priv, priv, type_data);
4827                break;
4828        default:
4829                break;
4830        }
4831
4832        stmmac_enable_all_queues(priv);
4833        return ret;
4834}
4835
4836static LIST_HEAD(stmmac_block_cb_list);
4837
4838static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
4839                           void *type_data)
4840{
4841        struct stmmac_priv *priv = netdev_priv(ndev);
4842
4843        switch (type) {
4844        case TC_SETUP_BLOCK:
4845                return flow_block_cb_setup_simple(type_data,
4846                                                  &stmmac_block_cb_list,
4847                                                  stmmac_setup_tc_block_cb,
4848                                                  priv, priv, true);
4849        case TC_SETUP_QDISC_CBS:
4850                return stmmac_tc_setup_cbs(priv, priv, type_data);
4851        case TC_SETUP_QDISC_TAPRIO:
4852                return stmmac_tc_setup_taprio(priv, priv, type_data);
4853        case TC_SETUP_QDISC_ETF:
4854                return stmmac_tc_setup_etf(priv, priv, type_data);
4855        default:
4856                return -EOPNOTSUPP;
4857        }
4858}
4859
4860static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
4861                               struct net_device *sb_dev, select_queue_fallback_t fallback)
4862{
4863        int gso = skb_shinfo(skb)->gso_type;
4864
4865        if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4866                /*
4867                 * There is no way to determine the number of TSO/USO
4868                 * capable Queues. Let's use always the Queue 0
4869                 * because if TSO/USO is supported then at least this
4870                 * one will be capable.
4871                 */
4872                return 0;
4873        }
4874
4875        return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
4876}
4877
4878static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
4879{
4880        struct stmmac_priv *priv = netdev_priv(ndev);
4881        int ret = 0;
4882
4883        ret = pm_runtime_get_sync(priv->device);
4884        if (ret < 0) {
4885                pm_runtime_put_noidle(priv->device);
4886                return ret;
4887        }
4888
4889        ret = eth_mac_addr(ndev, addr);
4890        if (ret)
4891                goto set_mac_error;
4892
4893        stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
4894
4895set_mac_error:
4896        pm_runtime_put(priv->device);
4897
4898        return ret;
4899}
4900
4901#ifdef CONFIG_DEBUG_FS
4902static struct dentry *stmmac_fs_dir;
4903
4904static void sysfs_display_ring(void *head, int size, int extend_desc,
4905                               struct seq_file *seq, dma_addr_t dma_phy_addr)
4906{
4907        int i;
4908        struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
4909        struct dma_desc *p = (struct dma_desc *)head;
4910        dma_addr_t dma_addr;
4911
4912        for (i = 0; i < size; i++) {
4913                if (extend_desc) {
4914                        dma_addr = dma_phy_addr + i * sizeof(*ep);
4915                        seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4916                                   i, &dma_addr,
4917                                   le32_to_cpu(ep->basic.des0),
4918                                   le32_to_cpu(ep->basic.des1),
4919                                   le32_to_cpu(ep->basic.des2),
4920                                   le32_to_cpu(ep->basic.des3));
4921                        ep++;
4922                } else {
4923                        dma_addr = dma_phy_addr + i * sizeof(*p);
4924                        seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
4925                                   i, &dma_addr,
4926                                   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
4927                                   le32_to_cpu(p->des2), le32_to_cpu(p->des3));
4928                        p++;
4929                }
4930                seq_printf(seq, "\n");
4931        }
4932}
4933
4934static int stmmac_rings_status_show(struct seq_file *seq, void *v)
4935{
4936        struct net_device *dev = seq->private;
4937        struct stmmac_priv *priv = netdev_priv(dev);
4938        u32 rx_count = priv->plat->rx_queues_to_use;
4939        u32 tx_count = priv->plat->tx_queues_to_use;
4940        u32 queue;
4941
4942        if ((dev->flags & IFF_UP) == 0)
4943                return 0;
4944
4945        for (queue = 0; queue < rx_count; queue++) {
4946                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4947
4948                seq_printf(seq, "RX Queue %d:\n", queue);
4949
4950                if (priv->extend_desc) {
4951                        seq_printf(seq, "Extended descriptor ring:\n");
4952                        sysfs_display_ring((void *)rx_q->dma_erx,
4953                                           priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
4954                } else {
4955                        seq_printf(seq, "Descriptor ring:\n");
4956                        sysfs_display_ring((void *)rx_q->dma_rx,
4957                                           priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
4958                }
4959        }
4960
4961        for (queue = 0; queue < tx_count; queue++) {
4962                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4963
4964                seq_printf(seq, "TX Queue %d:\n", queue);
4965
4966                if (priv->extend_desc) {
4967                        seq_printf(seq, "Extended descriptor ring:\n");
4968                        sysfs_display_ring((void *)tx_q->dma_etx,
4969                                           priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
4970                } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
4971                        seq_printf(seq, "Descriptor ring:\n");
4972                        sysfs_display_ring((void *)tx_q->dma_tx,
4973                                           priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
4974                }
4975        }
4976
4977        return 0;
4978}
4979DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
4980
4981static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
4982{
4983        struct net_device *dev = seq->private;
4984        struct stmmac_priv *priv = netdev_priv(dev);
4985
4986        if (!priv->hw_cap_support) {
4987                seq_printf(seq, "DMA HW features not supported\n");
4988                return 0;
4989        }
4990
4991        seq_printf(seq, "==============================\n");
4992        seq_printf(seq, "\tDMA HW features\n");
4993        seq_printf(seq, "==============================\n");
4994
4995        seq_printf(seq, "\t10/100 Mbps: %s\n",
4996                   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
4997        seq_printf(seq, "\t1000 Mbps: %s\n",
4998                   (priv->dma_cap.mbps_1000) ? "Y" : "N");
4999        seq_printf(seq, "\tHalf duplex: %s\n",
5000                   (priv->dma_cap.half_duplex) ? "Y" : "N");
5001        seq_printf(seq, "\tHash Filter: %s\n",
5002                   (priv->dma_cap.hash_filter) ? "Y" : "N");
5003        seq_printf(seq, "\tMultiple MAC address registers: %s\n",
5004                   (priv->dma_cap.multi_addr) ? "Y" : "N");
5005        seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
5006                   (priv->dma_cap.pcs) ? "Y" : "N");
5007        seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
5008                   (priv->dma_cap.sma_mdio) ? "Y" : "N");
5009        seq_printf(seq, "\tPMT Remote wake up: %s\n",
5010                   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
5011        seq_printf(seq, "\tPMT Magic Frame: %s\n",
5012                   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
5013        seq_printf(seq, "\tRMON module: %s\n",
5014                   (priv->dma_cap.rmon) ? "Y" : "N");
5015        seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
5016                   (priv->dma_cap.time_stamp) ? "Y" : "N");
5017        seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
5018                   (priv->dma_cap.atime_stamp) ? "Y" : "N");
5019        seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
5020                   (priv->dma_cap.eee) ? "Y" : "N");
5021        seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
5022        seq_printf(seq, "\tChecksum Offload in TX: %s\n",
5023                   (priv->dma_cap.tx_coe) ? "Y" : "N");
5024        if (priv->synopsys_id >= DWMAC_CORE_4_00) {
5025                seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
5026                           (priv->dma_cap.rx_coe) ? "Y" : "N");
5027        } else {
5028                seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
5029                           (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
5030                seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
5031                           (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
5032        }
5033        seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
5034                   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
5035        seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
5036                   priv->dma_cap.number_rx_channel);
5037        seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
5038                   priv->dma_cap.number_tx_channel);
5039        seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
5040                   priv->dma_cap.number_rx_queues);
5041        seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
5042                   priv->dma_cap.number_tx_queues);
5043        seq_printf(seq, "\tEnhanced descriptors: %s\n",
5044                   (priv->dma_cap.enh_desc) ? "Y" : "N");
5045        seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
5046        seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
5047        seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
5048        seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
5049        seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
5050                   priv->dma_cap.pps_out_num);
5051        seq_printf(seq, "\tSafety Features: %s\n",
5052                   priv->dma_cap.asp ? "Y" : "N");
5053        seq_printf(seq, "\tFlexible RX Parser: %s\n",
5054                   priv->dma_cap.frpsel ? "Y" : "N");
5055        seq_printf(seq, "\tEnhanced Addressing: %d\n",
5056                   priv->dma_cap.addr64);
5057        seq_printf(seq, "\tReceive Side Scaling: %s\n",
5058                   priv->dma_cap.rssen ? "Y" : "N");
5059        seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
5060                   priv->dma_cap.vlhash ? "Y" : "N");
5061        seq_printf(seq, "\tSplit Header: %s\n",
5062                   priv->dma_cap.sphen ? "Y" : "N");
5063        seq_printf(seq, "\tVLAN TX Insertion: %s\n",
5064                   priv->dma_cap.vlins ? "Y" : "N");
5065        seq_printf(seq, "\tDouble VLAN: %s\n",
5066                   priv->dma_cap.dvlan ? "Y" : "N");
5067        seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
5068                   priv->dma_cap.l3l4fnum);
5069        seq_printf(seq, "\tARP Offloading: %s\n",
5070                   priv->dma_cap.arpoffsel ? "Y" : "N");
5071        seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
5072                   priv->dma_cap.estsel ? "Y" : "N");
5073        seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
5074                   priv->dma_cap.fpesel ? "Y" : "N");
5075        seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
5076                   priv->dma_cap.tbssel ? "Y" : "N");
5077        return 0;
5078}
5079DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
5080
5081/* Use network device events to rename debugfs file entries.
5082 */
5083static int stmmac_device_event(struct notifier_block *unused,
5084                               unsigned long event, void *ptr)
5085{
5086        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5087        struct stmmac_priv *priv = netdev_priv(dev);
5088
5089        if (dev->netdev_ops != &stmmac_netdev_ops)
5090                goto done;
5091
5092        switch (event) {
5093        case NETDEV_CHANGENAME:
5094                if (priv->dbgfs_dir)
5095                        priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
5096                                                         priv->dbgfs_dir,
5097                                                         stmmac_fs_dir,
5098                                                         dev->name);
5099                break;
5100        }
5101done:
5102        return NOTIFY_DONE;
5103}
5104
5105static struct notifier_block stmmac_notifier = {
5106        .notifier_call = stmmac_device_event,
5107};
5108
5109static void stmmac_init_fs(struct net_device *dev)
5110{
5111        struct stmmac_priv *priv = netdev_priv(dev);
5112
5113        rtnl_lock();
5114
5115        /* Create per netdev entries */
5116        priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
5117
5118        /* Entry to report DMA RX/TX rings */
5119        debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
5120                            &stmmac_rings_status_fops);
5121
5122        /* Entry to report the DMA HW features */
5123        debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
5124                            &stmmac_dma_cap_fops);
5125
5126        rtnl_unlock();
5127}
5128
5129static void stmmac_exit_fs(struct net_device *dev)
5130{
5131        struct stmmac_priv *priv = netdev_priv(dev);
5132
5133        debugfs_remove_recursive(priv->dbgfs_dir);
5134}
5135#endif /* CONFIG_DEBUG_FS */
5136
5137static u32 stmmac_vid_crc32_le(__le16 vid_le)
5138{
5139        unsigned char *data = (unsigned char *)&vid_le;
5140        unsigned char data_byte = 0;
5141        u32 crc = ~0x0;
5142        u32 temp = 0;
5143        int i, bits;
5144
5145        bits = get_bitmask_order(VLAN_VID_MASK);
5146        for (i = 0; i < bits; i++) {
5147                if ((i % 8) == 0)
5148                        data_byte = data[i / 8];
5149
5150                temp = ((crc & 1) ^ data_byte) & 1;
5151                crc >>= 1;
5152                data_byte >>= 1;
5153
5154                if (temp)
5155                        crc ^= 0xedb88320;
5156        }
5157
5158        return crc;
5159}
5160
5161static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
5162{
5163        u32 crc, hash = 0;
5164        __le16 pmatch = 0;
5165        int count = 0;
5166        u16 vid = 0;
5167
5168        for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
5169                __le16 vid_le = cpu_to_le16(vid);
5170                crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
5171                hash |= (1 << crc);
5172                count++;
5173        }
5174
5175        if (!priv->dma_cap.vlhash) {
5176                if (count > 2) /* VID = 0 always passes filter */
5177                        return -EOPNOTSUPP;
5178
5179                pmatch = cpu_to_le16(vid);
5180                hash = 0;
5181        }
5182
5183        return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
5184}
5185
5186static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
5187{
5188        struct stmmac_priv *priv = netdev_priv(ndev);
5189        bool is_double = false;
5190        int ret;
5191
5192        if (be16_to_cpu(proto) == ETH_P_8021AD)
5193                is_double = true;
5194
5195        set_bit(vid, priv->active_vlans);
5196        ret = stmmac_vlan_update(priv, is_double);
5197        if (ret) {
5198                clear_bit(vid, priv->active_vlans);
5199                return ret;
5200        }
5201
5202        if (priv->hw->num_vlan) {
5203                ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
5204                if (ret)
5205                        return ret;
5206        }
5207
5208        return 0;
5209}
5210
5211static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
5212{
5213        struct stmmac_priv *priv = netdev_priv(ndev);
5214        bool is_double = false;
5215        int ret;
5216
5217        ret = pm_runtime_get_sync(priv->device);
5218        if (ret < 0) {
5219                pm_runtime_put_noidle(priv->device);
5220                return ret;
5221        }
5222
5223        if (be16_to_cpu(proto) == ETH_P_8021AD)
5224                is_double = true;
5225
5226        clear_bit(vid, priv->active_vlans);
5227
5228        if (priv->hw->num_vlan) {
5229                ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
5230                if (ret)
5231                        goto del_vlan_error;
5232        }
5233
5234        ret = stmmac_vlan_update(priv, is_double);
5235
5236del_vlan_error:
5237        pm_runtime_put(priv->device);
5238
5239        return ret;
5240}
5241
5242static const struct net_device_ops stmmac_netdev_ops = {
5243        .ndo_open = stmmac_open,
5244        .ndo_start_xmit = stmmac_xmit,
5245        .ndo_stop = stmmac_release,
5246        .ndo_change_mtu = stmmac_change_mtu,
5247        .ndo_fix_features = stmmac_fix_features,
5248        .ndo_set_features = stmmac_set_features,
5249        .ndo_set_rx_mode = stmmac_set_rx_mode,
5250        .ndo_tx_timeout = stmmac_tx_timeout,
5251        .ndo_do_ioctl = stmmac_ioctl,
5252        .ndo_setup_tc = stmmac_setup_tc,
5253        .ndo_select_queue = stmmac_select_queue,
5254#ifdef CONFIG_NET_POLL_CONTROLLER
5255        .ndo_poll_controller = stmmac_poll_controller,
5256#endif
5257        .ndo_set_mac_address = stmmac_set_mac_address,
5258        .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
5259        .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
5260};
5261
5262static void stmmac_reset_subtask(struct stmmac_priv *priv)
5263{
5264        if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
5265                return;
5266        if (test_bit(STMMAC_DOWN, &priv->state))
5267                return;
5268
5269        netdev_err(priv->dev, "Reset adapter.\n");
5270
5271        rtnl_lock();
5272        netif_trans_update(priv->dev);
5273        while (test_and_set_bit(STMMAC_RESETING, &priv->state))
5274                usleep_range(1000, 2000);
5275
5276        set_bit(STMMAC_DOWN, &priv->state);
5277        dev_close(priv->dev);
5278        dev_open(priv->dev, NULL);
5279        clear_bit(STMMAC_DOWN, &priv->state);
5280        clear_bit(STMMAC_RESETING, &priv->state);
5281        rtnl_unlock();
5282}
5283
5284static void stmmac_service_task(struct work_struct *work)
5285{
5286        struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
5287                        service_task);
5288
5289        stmmac_reset_subtask(priv);
5290        clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
5291}
5292
5293/**
5294 *  stmmac_hw_init - Init the MAC device
5295 *  @priv: driver private structure
5296 *  Description: this function is to configure the MAC device according to
5297 *  some platform parameters or the HW capability register. It prepares the
5298 *  driver to use either ring or chain modes and to setup either enhanced or
5299 *  normal descriptors.
5300 */
5301static int stmmac_hw_init(struct stmmac_priv *priv)
5302{
5303        int ret;
5304
5305        /* dwmac-sun8i only work in chain mode */
5306        if (priv->plat->has_sun8i)
5307                chain_mode = 1;
5308        priv->chain_mode = chain_mode;
5309
5310        /* Initialize HW Interface */
5311        ret = stmmac_hwif_init(priv);
5312        if (ret)
5313                return ret;
5314
5315        /* Get the HW capability (new GMAC newer than 3.50a) */
5316        priv->hw_cap_support = stmmac_get_hw_features(priv);
5317        if (priv->hw_cap_support) {
5318                dev_info(priv->device, "DMA HW capability register supported\n");
5319
5320                /* We can override some gmac/dma configuration fields: e.g.
5321                 * enh_desc, tx_coe (e.g. that are passed through the
5322                 * platform) with the values from the HW capability
5323                 * register (if supported).
5324                 */
5325                priv->plat->enh_desc = priv->dma_cap.enh_desc;
5326                priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
5327                priv->hw->pmt = priv->plat->pmt;
5328                if (priv->dma_cap.hash_tb_sz) {
5329                        priv->hw->multicast_filter_bins =
5330                                        (BIT(priv->dma_cap.hash_tb_sz) << 5);
5331                        priv->hw->mcast_bits_log2 =
5332                                        ilog2(priv->hw->multicast_filter_bins);
5333                }
5334
5335                /* TXCOE doesn't work in thresh DMA mode */
5336                if (priv->plat->force_thresh_dma_mode)
5337                        priv->plat->tx_coe = 0;
5338                else
5339                        priv->plat->tx_coe = priv->dma_cap.tx_coe;
5340
5341                /* In case of GMAC4 rx_coe is from HW cap register. */
5342                priv->plat->rx_coe = priv->dma_cap.rx_coe;
5343
5344                if (priv->dma_cap.rx_coe_type2)
5345                        priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
5346                else if (priv->dma_cap.rx_coe_type1)
5347                        priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
5348
5349        } else {
5350                dev_info(priv->device, "No HW DMA feature register supported\n");
5351        }
5352
5353        if (priv->plat->rx_coe) {
5354                priv->hw->rx_csum = priv->plat->rx_coe;
5355                dev_info(priv->device, "RX Checksum Offload Engine supported\n");
5356                if (priv->synopsys_id < DWMAC_CORE_4_00)
5357                        dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
5358        }
5359        if (priv->plat->tx_coe)
5360                dev_info(priv->device, "TX Checksum insertion supported\n");
5361
5362        if (priv->plat->pmt) {
5363                dev_info(priv->device, "Wake-Up On Lan supported\n");
5364                device_set_wakeup_capable(priv->device, 1);
5365        }
5366
5367        if (priv->dma_cap.tsoen)
5368                dev_info(priv->device, "TSO supported\n");
5369
5370        priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
5371        priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
5372
5373        /* Run HW quirks, if any */
5374        if (priv->hwif_quirks) {
5375                ret = priv->hwif_quirks(priv);
5376                if (ret)
5377                        return ret;
5378        }
5379
5380        /* Rx Watchdog is available in the COREs newer than the 3.40.
5381         * In some case, for example on bugged HW this feature
5382         * has to be disable and this can be done by passing the
5383         * riwt_off field from the platform.
5384         */
5385        if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
5386            (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
5387                priv->use_riwt = 1;
5388                dev_info(priv->device,
5389                         "Enable RX Mitigation via HW Watchdog Timer\n");
5390        }
5391
5392        return 0;
5393}
5394
5395static void stmmac_napi_add(struct net_device *dev)
5396{
5397        struct stmmac_priv *priv = netdev_priv(dev);
5398        u32 queue, maxq;
5399
5400        maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
5401
5402        for (queue = 0; queue < maxq; queue++) {
5403                struct stmmac_channel *ch = &priv->channel[queue];
5404
5405                ch->priv_data = priv;
5406                ch->index = queue;
5407                spin_lock_init(&ch->lock);
5408
5409                if (queue < priv->plat->rx_queues_to_use) {
5410                        netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
5411                                       NAPI_POLL_WEIGHT);
5412                }
5413                if (queue < priv->plat->tx_queues_to_use) {
5414                        netif_tx_napi_add(dev, &ch->tx_napi,
5415                                          stmmac_napi_poll_tx,
5416                                          NAPI_POLL_WEIGHT);
5417                }
5418        }
5419}
5420
5421static void stmmac_napi_del(struct net_device *dev)
5422{
5423        struct stmmac_priv *priv = netdev_priv(dev);
5424        u32 queue, maxq;
5425
5426        maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
5427
5428        for (queue = 0; queue < maxq; queue++) {
5429                struct stmmac_channel *ch = &priv->channel[queue];
5430
5431                if (queue < priv->plat->rx_queues_to_use)
5432                        netif_napi_del(&ch->rx_napi);
5433                if (queue < priv->plat->tx_queues_to_use)
5434                        netif_napi_del(&ch->tx_napi);
5435        }
5436}
5437
5438int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
5439{
5440        struct stmmac_priv *priv = netdev_priv(dev);
5441        int ret = 0;
5442
5443        if (netif_running(dev))
5444                stmmac_release(dev);
5445
5446        stmmac_napi_del(dev);
5447
5448        priv->plat->rx_queues_to_use = rx_cnt;
5449        priv->plat->tx_queues_to_use = tx_cnt;
5450
5451        stmmac_napi_add(dev);
5452
5453        if (netif_running(dev))
5454                ret = stmmac_open(dev);
5455
5456        return ret;
5457}
5458
5459int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
5460{
5461        struct stmmac_priv *priv = netdev_priv(dev);
5462        int ret = 0;
5463
5464        if (netif_running(dev))
5465                stmmac_release(dev);
5466
5467        priv->dma_rx_size = rx_size;
5468        priv->dma_tx_size = tx_size;
5469
5470        if (netif_running(dev))
5471                ret = stmmac_open(dev);
5472
5473        return ret;
5474}
5475
5476#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
5477static void stmmac_fpe_lp_task(struct work_struct *work)
5478{
5479        struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
5480                                                fpe_task);
5481        struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5482        enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5483        enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5484        bool *hs_enable = &fpe_cfg->hs_enable;
5485        bool *enable = &fpe_cfg->enable;
5486        int retries = 20;
5487
5488        while (retries-- > 0) {
5489                /* Bail out immediately if FPE handshake is OFF */
5490                if (*lo_state == FPE_STATE_OFF || !*hs_enable)
5491                        break;
5492
5493                if (*lo_state == FPE_STATE_ENTERING_ON &&
5494                    *lp_state == FPE_STATE_ENTERING_ON) {
5495                        stmmac_fpe_configure(priv, priv->ioaddr,
5496                                             priv->plat->tx_queues_to_use,
5497                                             priv->plat->rx_queues_to_use,
5498                                             *enable);
5499
5500                        netdev_info(priv->dev, "configured FPE\n");
5501
5502                        *lo_state = FPE_STATE_ON;
5503                        *lp_state = FPE_STATE_ON;
5504                        netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
5505                        break;
5506                }
5507
5508                if ((*lo_state == FPE_STATE_CAPABLE ||
5509                     *lo_state == FPE_STATE_ENTERING_ON) &&
5510                     *lp_state != FPE_STATE_ON) {
5511                        netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
5512                                    *lo_state, *lp_state);
5513                        stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5514                                                MPACKET_VERIFY);
5515                }
5516                /* Sleep then retry */
5517                msleep(500);
5518        }
5519
5520        clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
5521}
5522
5523void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
5524{
5525        if (priv->plat->fpe_cfg->hs_enable != enable) {
5526                if (enable) {
5527                        stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5528                                                MPACKET_VERIFY);
5529                } else {
5530                        priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
5531                        priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
5532                }
5533
5534                priv->plat->fpe_cfg->hs_enable = enable;
5535        }
5536}
5537
5538/**
5539 * stmmac_dvr_probe
5540 * @device: device pointer
5541 * @plat_dat: platform data pointer
5542 * @res: stmmac resource pointer
5543 * Description: this is the main probe function used to
5544 * call the alloc_etherdev, allocate the priv structure.
5545 * Return:
5546 * returns 0 on success, otherwise errno.
5547 */
5548int stmmac_dvr_probe(struct device *device,
5549                     struct plat_stmmacenet_data *plat_dat,
5550                     struct stmmac_resources *res)
5551{
5552        struct net_device *ndev = NULL;
5553        struct stmmac_priv *priv;
5554        u32 rxq;
5555        int i, ret = 0;
5556
5557        ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
5558                                       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
5559        if (!ndev)
5560                return -ENOMEM;
5561
5562        SET_NETDEV_DEV(ndev, device);
5563
5564        priv = netdev_priv(ndev);
5565        priv->device = device;
5566        priv->dev = ndev;
5567
5568        stmmac_set_ethtool_ops(ndev);
5569        priv->pause = pause;
5570        priv->plat = plat_dat;
5571        priv->ioaddr = res->addr;
5572        priv->dev->base_addr = (unsigned long)res->addr;
5573        priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
5574
5575        priv->dev->irq = res->irq;
5576        priv->wol_irq = res->wol_irq;
5577        priv->lpi_irq = res->lpi_irq;
5578        priv->sfty_ce_irq = res->sfty_ce_irq;
5579        priv->sfty_ue_irq = res->sfty_ue_irq;
5580        for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
5581                priv->rx_irq[i] = res->rx_irq[i];
5582        for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
5583                priv->tx_irq[i] = res->tx_irq[i];
5584
5585        if (!is_zero_ether_addr(res->mac))
5586                memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
5587
5588        dev_set_drvdata(device, priv->dev);
5589
5590        /* Verify driver arguments */
5591        stmmac_verify_args();
5592
5593        /* Allocate workqueue */
5594        priv->wq = create_singlethread_workqueue("stmmac_wq");
5595        if (!priv->wq) {
5596                dev_err(priv->device, "failed to create workqueue\n");
5597                return -ENOMEM;
5598        }
5599
5600        INIT_WORK(&priv->service_task, stmmac_service_task);
5601
5602        /* Initialize Link Partner FPE workqueue */
5603        INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
5604
5605        /* Override with kernel parameters if supplied XXX CRS XXX
5606         * this needs to have multiple instances
5607         */
5608        if ((phyaddr >= 0) && (phyaddr <= 31))
5609                priv->plat->phy_addr = phyaddr;
5610
5611        if (priv->plat->stmmac_rst) {
5612                ret = reset_control_assert(priv->plat->stmmac_rst);
5613                reset_control_deassert(priv->plat->stmmac_rst);
5614                /* Some reset controllers have only reset callback instead of
5615                 * assert + deassert callbacks pair.
5616                 */
5617                if (ret == -ENOTSUPP)
5618                        reset_control_reset(priv->plat->stmmac_rst);
5619        }
5620
5621        /* Init MAC and get the capabilities */
5622        ret = stmmac_hw_init(priv);
5623        if (ret)
5624                goto error_hw_init;
5625
5626        /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
5627         */
5628        if (priv->synopsys_id < DWMAC_CORE_5_20)
5629                priv->plat->dma_cfg->dche = false;
5630
5631        stmmac_check_ether_addr(priv);
5632
5633        ndev->netdev_ops = &stmmac_netdev_ops;
5634
5635        ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5636                            NETIF_F_RXCSUM;
5637
5638        ret = stmmac_tc_init(priv, priv);
5639        if (!ret) {
5640                ndev->hw_features |= NETIF_F_HW_TC;
5641        }
5642
5643        if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5644                ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
5645                if (priv->plat->has_gmac4)
5646                        ndev->hw_features |= NETIF_F_GSO_UDP_L4;
5647                priv->tso = true;
5648                dev_info(priv->device, "TSO feature enabled\n");
5649        }
5650
5651        if (priv->dma_cap.sphen) {
5652                ndev->hw_features |= NETIF_F_GRO;
5653                priv->sph_cap = true;
5654                priv->sph = priv->sph_cap;
5655                dev_info(priv->device, "SPH feature enabled\n");
5656        }
5657
5658        /* The current IP register MAC_HW_Feature1[ADDR64] only define
5659         * 32/40/64 bit width, but some SOC support others like i.MX8MP
5660         * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
5661         * So overwrite dma_cap.addr64 according to HW real design.
5662         */
5663        if (priv->plat->addr64)
5664                priv->dma_cap.addr64 = priv->plat->addr64;
5665
5666        if (priv->dma_cap.addr64) {
5667                ret = dma_set_mask_and_coherent(device,
5668                                DMA_BIT_MASK(priv->dma_cap.addr64));
5669                if (!ret) {
5670                        dev_info(priv->device, "Using %d bits DMA width\n",
5671                                 priv->dma_cap.addr64);
5672
5673                        /*
5674                         * If more than 32 bits can be addressed, make sure to
5675                         * enable enhanced addressing mode.
5676                         */
5677                        if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
5678                                priv->plat->dma_cfg->eame = true;
5679                } else {
5680                        ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
5681                        if (ret) {
5682                                dev_err(priv->device, "Failed to set DMA Mask\n");
5683                                goto error_hw_init;
5684                        }
5685
5686                        priv->dma_cap.addr64 = 32;
5687                }
5688        }
5689
5690        ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
5691        ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
5692#ifdef STMMAC_VLAN_TAG_USED
5693        /* Both mac100 and gmac support receive VLAN tag detection */
5694        ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
5695        if (priv->dma_cap.vlhash) {
5696                ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
5697                ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
5698        }
5699        if (priv->dma_cap.vlins) {
5700                ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
5701                if (priv->dma_cap.dvlan)
5702                        ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
5703        }
5704#endif
5705        priv->msg_enable = netif_msg_init(debug, default_msg_level);
5706
5707        /* Initialize RSS */
5708        rxq = priv->plat->rx_queues_to_use;
5709        netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
5710        for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
5711                priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
5712
5713        if (priv->dma_cap.rssen && priv->plat->rss_en)
5714                ndev->features |= NETIF_F_RXHASH;
5715
5716        /* MTU range: 46 - hw-specific max */
5717        ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
5718        if (priv->plat->has_xgmac)
5719                ndev->max_mtu = XGMAC_JUMBO_LEN;
5720        else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
5721                ndev->max_mtu = JUMBO_LEN;
5722        else
5723                ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
5724        /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
5725         * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
5726         */
5727        if ((priv->plat->maxmtu < ndev->max_mtu) &&
5728            (priv->plat->maxmtu >= ndev->min_mtu))
5729                ndev->max_mtu = priv->plat->maxmtu;
5730        else if (priv->plat->maxmtu < ndev->min_mtu)
5731                dev_warn(priv->device,
5732                         "%s: warning: maxmtu having invalid value (%d)\n",
5733                         __func__, priv->plat->maxmtu);
5734
5735        if (flow_ctrl)
5736                priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
5737
5738        /* Setup channels NAPI */
5739        stmmac_napi_add(ndev);
5740
5741        mutex_init(&priv->lock);
5742
5743        /* If a specific clk_csr value is passed from the platform
5744         * this means that the CSR Clock Range selection cannot be
5745         * changed at run-time and it is fixed. Viceversa the driver'll try to
5746         * set the MDC clock dynamically according to the csr actual
5747         * clock input.
5748         */
5749        if (priv->plat->clk_csr >= 0)
5750                priv->clk_csr = priv->plat->clk_csr;
5751        else
5752                stmmac_clk_csr_set(priv);
5753
5754        stmmac_check_pcs_mode(priv);
5755
5756        pm_runtime_get_noresume(device);
5757        pm_runtime_set_active(device);
5758        pm_runtime_enable(device);
5759
5760        if (priv->hw->pcs != STMMAC_PCS_TBI &&
5761            priv->hw->pcs != STMMAC_PCS_RTBI) {
5762                /* MDIO bus Registration */
5763                ret = stmmac_mdio_register(ndev);
5764                if (ret < 0) {
5765                        dev_err(priv->device,
5766                                "%s: MDIO bus (id: %d) registration failed",
5767                                __func__, priv->plat->bus_id);
5768                        goto error_mdio_register;
5769                }
5770        }
5771
5772        ret = stmmac_phy_setup(priv);
5773        if (ret) {
5774                netdev_err(ndev, "failed to setup phy (%d)\n", ret);
5775                goto error_phy_setup;
5776        }
5777
5778        ret = register_netdev(ndev);
5779        if (ret) {
5780                dev_err(priv->device, "%s: ERROR %i registering the device\n",
5781                        __func__, ret);
5782                goto error_netdev_register;
5783        }
5784
5785        if (priv->plat->serdes_powerup) {
5786                ret = priv->plat->serdes_powerup(ndev,
5787                                                 priv->plat->bsp_priv);
5788
5789                if (ret < 0)
5790                        goto error_serdes_powerup;
5791        }
5792
5793#ifdef CONFIG_DEBUG_FS
5794        stmmac_init_fs(ndev);
5795#endif
5796
5797        /* Let pm_runtime_put() disable the clocks.
5798         * If CONFIG_PM is not enabled, the clocks will stay powered.
5799         */
5800        pm_runtime_put(device);
5801
5802        return ret;
5803
5804error_serdes_powerup:
5805        unregister_netdev(ndev);
5806error_netdev_register:
5807        phylink_destroy(priv->phylink);
5808error_phy_setup:
5809        if (priv->hw->pcs != STMMAC_PCS_TBI &&
5810            priv->hw->pcs != STMMAC_PCS_RTBI)
5811                stmmac_mdio_unregister(ndev);
5812error_mdio_register:
5813        stmmac_napi_del(ndev);
5814error_hw_init:
5815        destroy_workqueue(priv->wq);
5816
5817        return ret;
5818}
5819EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
5820
5821/**
5822 * stmmac_dvr_remove
5823 * @dev: device pointer
5824 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5825 * changes the link status, releases the DMA descriptor rings.
5826 */
5827int stmmac_dvr_remove(struct device *dev)
5828{
5829        struct net_device *ndev = dev_get_drvdata(dev);
5830        struct stmmac_priv *priv = netdev_priv(ndev);
5831
5832        netdev_info(priv->dev, "%s: removing driver", __func__);
5833
5834        stmmac_stop_all_dma(priv);
5835        stmmac_mac_set(priv, priv->ioaddr, false);
5836        netif_carrier_off(ndev);
5837        unregister_netdev(ndev);
5838
5839        /* Serdes power down needs to happen after VLAN filter
5840         * is deleted that is triggered by unregister_netdev().
5841         */
5842        if (priv->plat->serdes_powerdown)
5843                priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5844
5845#ifdef CONFIG_DEBUG_FS
5846        stmmac_exit_fs(ndev);
5847#endif
5848        phylink_destroy(priv->phylink);
5849        if (priv->plat->stmmac_rst)
5850                reset_control_assert(priv->plat->stmmac_rst);
5851        pm_runtime_put(dev);
5852        pm_runtime_disable(dev);
5853        if (priv->hw->pcs != STMMAC_PCS_TBI &&
5854            priv->hw->pcs != STMMAC_PCS_RTBI)
5855                stmmac_mdio_unregister(ndev);
5856        destroy_workqueue(priv->wq);
5857        mutex_destroy(&priv->lock);
5858
5859        return 0;
5860}
5861EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
5862
5863/**
5864 * stmmac_suspend - suspend callback
5865 * @dev: device pointer
5866 * Description: this is the function to suspend the device and it is called
5867 * by the platform driver to stop the network queue, release the resources,
5868 * program the PMT register (for WoL), clean and release driver resources.
5869 */
5870int stmmac_suspend(struct device *dev)
5871{
5872        struct net_device *ndev = dev_get_drvdata(dev);
5873        struct stmmac_priv *priv = netdev_priv(ndev);
5874        u32 chan;
5875        int ret;
5876
5877        if (!ndev || !netif_running(ndev))
5878                return 0;
5879
5880        phylink_mac_change(priv->phylink, false);
5881
5882        mutex_lock(&priv->lock);
5883
5884        netif_device_detach(ndev);
5885
5886        stmmac_disable_all_queues(priv);
5887
5888        for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
5889                hrtimer_cancel(&priv->tx_queue[chan].txtimer);
5890
5891        if (priv->eee_enabled) {
5892                priv->tx_path_in_lpi_mode = false;
5893                del_timer_sync(&priv->eee_ctrl_timer);
5894        }
5895
5896        /* Stop TX/RX DMA */
5897        stmmac_stop_all_dma(priv);
5898
5899        if (priv->plat->serdes_powerdown)
5900                priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
5901
5902        /* Enable Power down mode by programming the PMT regs */
5903        if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5904                stmmac_pmt(priv, priv->hw, priv->wolopts);
5905                priv->irq_wake = 1;
5906        } else {
5907                mutex_unlock(&priv->lock);
5908                rtnl_lock();
5909                if (device_may_wakeup(priv->device))
5910                        phylink_speed_down(priv->phylink, false);
5911                phylink_stop(priv->phylink);
5912                rtnl_unlock();
5913                mutex_lock(&priv->lock);
5914
5915                stmmac_mac_set(priv, priv->ioaddr, false);
5916                pinctrl_pm_select_sleep_state(priv->device);
5917                /* Disable clock in case of PWM is off */
5918                clk_disable_unprepare(priv->plat->clk_ptp_ref);
5919                ret = pm_runtime_force_suspend(dev);
5920                if (ret) {
5921                        mutex_unlock(&priv->lock);
5922                        return ret;
5923                }
5924        }
5925
5926        mutex_unlock(&priv->lock);
5927
5928        if (priv->dma_cap.fpesel) {
5929                /* Disable FPE */
5930                stmmac_fpe_configure(priv, priv->ioaddr,
5931                                     priv->plat->tx_queues_to_use,
5932                                     priv->plat->rx_queues_to_use, false);
5933
5934                stmmac_fpe_handshake(priv, false);
5935        }
5936
5937        priv->speed = SPEED_UNKNOWN;
5938        return 0;
5939}
5940EXPORT_SYMBOL_GPL(stmmac_suspend);
5941
5942/**
5943 * stmmac_reset_queues_param - reset queue parameters
5944 * @dev: device pointer
5945 */
5946static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5947{
5948        u32 rx_cnt = priv->plat->rx_queues_to_use;
5949        u32 tx_cnt = priv->plat->tx_queues_to_use;
5950        u32 queue;
5951
5952        for (queue = 0; queue < rx_cnt; queue++) {
5953                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5954
5955                rx_q->cur_rx = 0;
5956                rx_q->dirty_rx = 0;
5957        }
5958
5959        for (queue = 0; queue < tx_cnt; queue++) {
5960                struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5961
5962                tx_q->cur_tx = 0;
5963                tx_q->dirty_tx = 0;
5964                tx_q->mss = 0;
5965        }
5966}
5967
5968/**
5969 * stmmac_resume - resume callback
5970 * @dev: device pointer
5971 * Description: when resume this function is invoked to setup the DMA and CORE
5972 * in a usable state.
5973 */
5974int stmmac_resume(struct device *dev)
5975{
5976        struct net_device *ndev = dev_get_drvdata(dev);
5977        struct stmmac_priv *priv = netdev_priv(ndev);
5978        int ret;
5979
5980        if (!netif_running(ndev))
5981                return 0;
5982
5983        /* Power Down bit, into the PM register, is cleared
5984         * automatically as soon as a magic packet or a Wake-up frame
5985         * is received. Anyway, it's better to manually clear
5986         * this bit because it can generate problems while resuming
5987         * from another devices (e.g. serial console).
5988         */
5989        if (device_may_wakeup(priv->device) && priv->plat->pmt) {
5990                mutex_lock(&priv->lock);
5991                stmmac_pmt(priv, priv->hw, 0);
5992                mutex_unlock(&priv->lock);
5993                priv->irq_wake = 0;
5994        } else {
5995                pinctrl_pm_select_default_state(priv->device);
5996                /* enable the clk previously disabled */
5997                ret = pm_runtime_force_resume(dev);
5998                if (ret)
5999                        return ret;
6000                if (priv->plat->clk_ptp_ref)
6001                        clk_prepare_enable(priv->plat->clk_ptp_ref);
6002                /* reset the phy so that it's ready */
6003                if (priv->mii)
6004                        stmmac_mdio_reset(priv->mii);
6005        }
6006
6007        if (priv->plat->serdes_powerup) {
6008                ret = priv->plat->serdes_powerup(ndev,
6009                                                 priv->plat->bsp_priv);
6010
6011                if (ret < 0)
6012                        return ret;
6013        }
6014
6015        if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
6016                rtnl_lock();
6017                phylink_start(priv->phylink);
6018                /* We may have called phylink_speed_down before */
6019                phylink_speed_up(priv->phylink);
6020                rtnl_unlock();
6021        }
6022
6023        rtnl_lock();
6024        mutex_lock(&priv->lock);
6025
6026        stmmac_reset_queues_param(priv);
6027
6028        stmmac_free_tx_skbufs(priv);
6029        stmmac_clear_descriptors(priv);
6030
6031        stmmac_hw_setup(ndev, false);
6032        stmmac_init_coalesce(priv);
6033        stmmac_set_rx_mode(ndev);
6034
6035        stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
6036
6037        stmmac_enable_all_queues(priv);
6038
6039        mutex_unlock(&priv->lock);
6040        rtnl_unlock();
6041
6042        phylink_mac_change(priv->phylink, true);
6043
6044        netif_device_attach(ndev);
6045
6046        return 0;
6047}
6048EXPORT_SYMBOL_GPL(stmmac_resume);
6049
6050#ifndef MODULE
6051static int __init stmmac_cmdline_opt(char *str)
6052{
6053        char *opt;
6054
6055        if (!str || !*str)
6056                return -EINVAL;
6057        while ((opt = strsep(&str, ",")) != NULL) {
6058                if (!strncmp(opt, "debug:", 6)) {
6059                        if (kstrtoint(opt + 6, 0, &debug))
6060                                goto err;
6061                } else if (!strncmp(opt, "phyaddr:", 8)) {
6062                        if (kstrtoint(opt + 8, 0, &phyaddr))
6063                                goto err;
6064                } else if (!strncmp(opt, "buf_sz:", 7)) {
6065                        if (kstrtoint(opt + 7, 0, &buf_sz))
6066                                goto err;
6067                } else if (!strncmp(opt, "tc:", 3)) {
6068                        if (kstrtoint(opt + 3, 0, &tc))
6069                                goto err;
6070                } else if (!strncmp(opt, "watchdog:", 9)) {
6071                        if (kstrtoint(opt + 9, 0, &watchdog))
6072                                goto err;
6073                } else if (!strncmp(opt, "flow_ctrl:", 10)) {
6074                        if (kstrtoint(opt + 10, 0, &flow_ctrl))
6075                                goto err;
6076                } else if (!strncmp(opt, "pause:", 6)) {
6077                        if (kstrtoint(opt + 6, 0, &pause))
6078                                goto err;
6079                } else if (!strncmp(opt, "eee_timer:", 10)) {
6080                        if (kstrtoint(opt + 10, 0, &eee_timer))
6081                                goto err;
6082                } else if (!strncmp(opt, "chain_mode:", 11)) {
6083                        if (kstrtoint(opt + 11, 0, &chain_mode))
6084                                goto err;
6085                }
6086        }
6087        return 0;
6088
6089err:
6090        pr_err("%s: ERROR broken module parameter conversion", __func__);
6091        return -EINVAL;
6092}
6093
6094__setup("stmmaceth=", stmmac_cmdline_opt);
6095#endif /* MODULE */
6096
6097static int __init stmmac_init(void)
6098{
6099#ifdef CONFIG_DEBUG_FS
6100        /* Create debugfs main directory if it doesn't exist yet */
6101        if (!stmmac_fs_dir)
6102                stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
6103        register_netdevice_notifier(&stmmac_notifier);
6104#endif
6105
6106        return 0;
6107}
6108
6109static void __exit stmmac_exit(void)
6110{
6111#ifdef CONFIG_DEBUG_FS
6112        unregister_netdevice_notifier(&stmmac_notifier);
6113        debugfs_remove_recursive(stmmac_fs_dir);
6114#endif
6115}
6116
6117module_init(stmmac_init)
6118module_exit(stmmac_exit)
6119
6120MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
6121MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
6122MODULE_LICENSE("GPL");
6123