linux/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
<<
>>
Prefs
   1/*
   2 * Xilinx Axi Ethernet device driver
   3 *
   4 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   5 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   8 * Copyright (c) 2010 - 2011 PetaLogix
   9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  10 *
  11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  12 * and Spartan6.
  13 *
  14 * TODO:
  15 *  - Add Axi Fifo support.
  16 *  - Factor out Axi DMA code into separate driver.
  17 *  - Test and fix basic multicast filtering.
  18 *  - Add support for extended multicast filtering.
  19 *  - Test basic VLAN support.
  20 *  - Add support for extended VLAN support.
  21 */
  22
  23#include <linux/delay.h>
  24#include <linux/etherdevice.h>
  25#include <linux/module.h>
  26#include <linux/netdevice.h>
  27#include <linux/of_mdio.h>
  28#include <linux/of_platform.h>
  29#include <linux/of_irq.h>
  30#include <linux/of_address.h>
  31#include <linux/of_net.h>
  32#include <linux/skbuff.h>
  33#include <linux/spinlock.h>
  34#include <linux/phy.h>
  35#include <linux/mii.h>
  36#include <linux/ethtool.h>
  37#include <linux/iopoll.h>
  38#include <linux/ptp_classify.h>
  39#include <linux/net_tstamp.h>
  40#include <linux/random.h>
  41#include <net/sock.h>
  42#include <linux/xilinx_phy.h>
  43#include <linux/clk.h>
  44
  45#include "xilinx_axienet.h"
  46
  47#ifdef CONFIG_XILINX_TSN_PTP
  48#include "xilinx_tsn_ptp.h"
  49#include "xilinx_tsn_timer.h"
  50#endif
  51/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
  52#define TX_BD_NUM               64
  53#define RX_BD_NUM               128
  54
  55/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  56#define DRIVER_NAME             "xaxienet"
  57#define DRIVER_DESCRIPTION      "Xilinx Axi Ethernet driver"
  58#define DRIVER_VERSION          "1.00a"
  59
  60#define AXIENET_REGS_N          32
  61#define AXIENET_TS_HEADER_LEN   8
  62#define XXVENET_TS_HEADER_LEN   4
  63#define NS_PER_SEC              1000000000ULL /* Nanoseconds per second */
  64
  65#define XAE_NUM_QUEUES(lp)      ((lp)->num_queues)
  66#define for_each_dma_queue(lp, var) \
  67        for ((var) = 0; (var) < XAE_NUM_QUEUES(lp); (var)++)
  68
  69#ifdef CONFIG_XILINX_TSN_PTP
  70int axienet_phc_index = -1;
  71EXPORT_SYMBOL(axienet_phc_index);
  72#endif
  73
  74#ifdef CONFIG_AXIENET_HAS_MCDMA
  75struct axienet_stat {
  76        const char *name;
  77};
  78
  79static struct axienet_stat axienet_get_strings_stats[] = {
  80        { "txq0_packets" },
  81        { "txq0_bytes"   },
  82        { "rxq0_packets" },
  83        { "rxq0_bytes"   },
  84        { "txq1_packets" },
  85        { "txq1_bytes"   },
  86        { "rxq1_packets" },
  87        { "rxq1_bytes"   },
  88        { "txq2_packets" },
  89        { "txq2_bytes"   },
  90        { "rxq2_packets" },
  91        { "rxq2_bytes"   },
  92        { "txq3_packets" },
  93        { "txq3_bytes"   },
  94        { "rxq3_packets" },
  95        { "rxq3_bytes"   },
  96        { "txq4_packets" },
  97        { "txq4_bytes"   },
  98        { "rxq4_packets" },
  99        { "rxq4_bytes"   },
 100        { "txq5_packets" },
 101        { "txq5_bytes"   },
 102        { "rxq5_packets" },
 103        { "rxq5_bytes"   },
 104        { "txq6_packets" },
 105        { "txq6_bytes"   },
 106        { "rxq6_packets" },
 107        { "rxq6_bytes"   },
 108        { "txq7_packets" },
 109        { "txq7_bytes"   },
 110        { "rxq7_packets" },
 111        { "rxq7_bytes"   },
 112        { "txq8_packets" },
 113        { "txq8_bytes"   },
 114        { "rxq8_packets" },
 115        { "rxq8_bytes"   },
 116        { "txq9_packets" },
 117        { "txq9_bytes"   },
 118        { "rxq9_packets" },
 119        { "rxq9_bytes"   },
 120        { "txq10_packets" },
 121        { "txq10_bytes"   },
 122        { "rxq10_packets" },
 123        { "rxq10_bytes"   },
 124        { "txq11_packets" },
 125        { "txq11_bytes"   },
 126        { "rxq11_packets" },
 127        { "rxq11_bytes"   },
 128        { "txq12_packets" },
 129        { "txq12_bytes"   },
 130        { "rxq12_packets" },
 131        { "rxq12_bytes"   },
 132        { "txq13_packets" },
 133        { "txq13_bytes"   },
 134        { "rxq13_packets" },
 135        { "rxq13_bytes"   },
 136        { "txq14_packets" },
 137        { "txq14_bytes"   },
 138        { "rxq14_packets" },
 139        { "rxq14_bytes"   },
 140        { "txq15_packets" },
 141        { "txq15_bytes"   },
 142        { "rxq15_packets" },
 143        { "rxq15_bytes"   },
 144};
 145#endif
 146
 147/* Option table for setting up Axi Ethernet hardware options */
 148static struct axienet_option axienet_options[] = {
 149        /* Turn on jumbo packet support for both Rx and Tx */
 150        {
 151                .opt = XAE_OPTION_JUMBO,
 152                .reg = XAE_TC_OFFSET,
 153                .m_or = XAE_TC_JUM_MASK,
 154        }, {
 155                .opt = XAE_OPTION_JUMBO,
 156                .reg = XAE_RCW1_OFFSET,
 157                .m_or = XAE_RCW1_JUM_MASK,
 158        }, { /* Turn on VLAN packet support for both Rx and Tx */
 159                .opt = XAE_OPTION_VLAN,
 160                .reg = XAE_TC_OFFSET,
 161                .m_or = XAE_TC_VLAN_MASK,
 162        }, {
 163                .opt = XAE_OPTION_VLAN,
 164                .reg = XAE_RCW1_OFFSET,
 165                .m_or = XAE_RCW1_VLAN_MASK,
 166        }, { /* Turn on FCS stripping on receive packets */
 167                .opt = XAE_OPTION_FCS_STRIP,
 168                .reg = XAE_RCW1_OFFSET,
 169                .m_or = XAE_RCW1_FCS_MASK,
 170        }, { /* Turn on FCS insertion on transmit packets */
 171                .opt = XAE_OPTION_FCS_INSERT,
 172                .reg = XAE_TC_OFFSET,
 173                .m_or = XAE_TC_FCS_MASK,
 174        }, { /* Turn off length/type field checking on receive packets */
 175                .opt = XAE_OPTION_LENTYPE_ERR,
 176                .reg = XAE_RCW1_OFFSET,
 177                .m_or = XAE_RCW1_LT_DIS_MASK,
 178        }, { /* Turn on Rx flow control */
 179                .opt = XAE_OPTION_FLOW_CONTROL,
 180                .reg = XAE_FCC_OFFSET,
 181                .m_or = XAE_FCC_FCRX_MASK,
 182        }, { /* Turn on Tx flow control */
 183                .opt = XAE_OPTION_FLOW_CONTROL,
 184                .reg = XAE_FCC_OFFSET,
 185                .m_or = XAE_FCC_FCTX_MASK,
 186        }, { /* Turn on promiscuous frame filtering */
 187                .opt = XAE_OPTION_PROMISC,
 188                .reg = XAE_FMI_OFFSET,
 189                .m_or = XAE_FMI_PM_MASK,
 190        }, { /* Enable transmitter */
 191                .opt = XAE_OPTION_TXEN,
 192                .reg = XAE_TC_OFFSET,
 193                .m_or = XAE_TC_TX_MASK,
 194        }, { /* Enable receiver */
 195                .opt = XAE_OPTION_RXEN,
 196                .reg = XAE_RCW1_OFFSET,
 197                .m_or = XAE_RCW1_RX_MASK,
 198        },
 199        {}
 200};
 201
 202/* Option table for setting up Axi Ethernet hardware options */
 203static struct xxvenet_option xxvenet_options[] = {
 204        { /* Turn on FCS stripping on receive packets */
 205                .opt = XAE_OPTION_FCS_STRIP,
 206                .reg = XXV_RCW1_OFFSET,
 207                .m_or = XXV_RCW1_FCS_MASK,
 208        }, { /* Turn on FCS insertion on transmit packets */
 209                .opt = XAE_OPTION_FCS_INSERT,
 210                .reg = XXV_TC_OFFSET,
 211                .m_or = XXV_TC_FCS_MASK,
 212        }, { /* Enable transmitter */
 213                .opt = XAE_OPTION_TXEN,
 214                .reg = XXV_TC_OFFSET,
 215                .m_or = XXV_TC_TX_MASK,
 216        }, { /* Enable receiver */
 217                .opt = XAE_OPTION_RXEN,
 218                .reg = XXV_RCW1_OFFSET,
 219                .m_or = XXV_RCW1_RX_MASK,
 220        },
 221        {}
 222};
 223
 224/**
 225 * axienet_dma_in32 - Memory mapped Axi DMA register read
 226 * @q:          Pointer to DMA queue structure
 227 * @reg:        Address offset from the base address of the Axi DMA core
 228 *
 229 * Return: The contents of the Axi DMA register
 230 *
 231 * This function returns the contents of the corresponding Axi DMA register.
 232 */
 233static inline u32 axienet_dma_in32(struct axienet_dma_q *q, off_t reg)
 234{
 235        return in_be32(q->dma_regs + reg);
 236}
 237
 238/**
 239 * axienet_dma_out32 - Memory mapped Axi DMA register write.
 240 * @q:          Pointer to DMA queue structure
 241 * @reg:        Address offset from the base address of the Axi DMA core
 242 * @value:      Value to be written into the Axi DMA register
 243 *
 244 * This function writes the desired value into the corresponding Axi DMA
 245 * register.
 246 */
 247static inline void axienet_dma_out32(struct axienet_dma_q *q,
 248                                     off_t reg, u32 value)
 249{
 250        out_be32((q->dma_regs + reg), value);
 251}
 252
 253/**
 254 * axienet_dma_bdout - Memory mapped Axi DMA register Buffer Descriptor write.
 255 * @q:          Pointer to DMA queue structure
 256 * @reg:        Address offset from the base address of the Axi DMA core
 257 * @value:      Value to be written into the Axi DMA register
 258 *
 259 * This function writes the desired value into the corresponding Axi DMA
 260 * register.
 261 */
 262static inline void axienet_dma_bdout(struct axienet_dma_q *q,
 263                                     off_t reg, dma_addr_t value)
 264{
 265#if defined(CONFIG_PHYS_ADDR_T_64BIT)
 266        writeq(value, (q->dma_regs + reg));
 267#else
 268        writel(value, (q->dma_regs + reg));
 269#endif
 270}
 271
 272/**
 273 * axienet_bd_free - Release buffer descriptor rings for individual dma queue
 274 * @ndev:       Pointer to the net_device structure
 275 * @q:          Pointer to DMA queue structure
 276 *
 277 * This function is helper function to axienet_dma_bd_release.
 278 */
 279
 280static void __maybe_unused axienet_bd_free(struct net_device *ndev,
 281                                           struct axienet_dma_q *q)
 282{
 283        int i;
 284        struct axienet_local *lp = netdev_priv(ndev);
 285
 286        for (i = 0; i < RX_BD_NUM; i++) {
 287                dma_unmap_single(ndev->dev.parent, q->rx_bd_v[i].phys,
 288                                 lp->max_frm_size, DMA_FROM_DEVICE);
 289                dev_kfree_skb((struct sk_buff *)
 290                              (q->rx_bd_v[i].sw_id_offset));
 291        }
 292
 293        if (q->rx_bd_v) {
 294                dma_free_coherent(ndev->dev.parent,
 295                                  sizeof(*q->rx_bd_v) * RX_BD_NUM,
 296                                  q->rx_bd_v,
 297                                  q->rx_bd_p);
 298        }
 299        if (q->tx_bd_v) {
 300                dma_free_coherent(ndev->dev.parent,
 301                                  sizeof(*q->tx_bd_v) * TX_BD_NUM,
 302                                  q->tx_bd_v,
 303                                  q->tx_bd_p);
 304        }
 305}
 306
 307static void __maybe_unused axienet_mcdma_bd_free(struct net_device *ndev,
 308                                                 struct axienet_dma_q *q)
 309{
 310        int i;
 311        struct axienet_local *lp = netdev_priv(ndev);
 312
 313        for (i = 0; i < RX_BD_NUM; i++) {
 314                dma_unmap_single(ndev->dev.parent, q->rxq_bd_v[i].phys,
 315                                 lp->max_frm_size, DMA_FROM_DEVICE);
 316                dev_kfree_skb((struct sk_buff *)
 317                              (q->rxq_bd_v[i].sw_id_offset));
 318        }
 319
 320        if (q->rxq_bd_v) {
 321                dma_free_coherent(ndev->dev.parent,
 322                                  sizeof(*q->rxq_bd_v) * RX_BD_NUM,
 323                                  q->rxq_bd_v,
 324                                  q->rx_bd_p);
 325        }
 326
 327        if (q->txq_bd_v) {
 328                dma_free_coherent(ndev->dev.parent,
 329                                  sizeof(*q->txq_bd_v) * TX_BD_NUM,
 330                                  q->txq_bd_v,
 331                                  q->tx_bd_p);
 332        }
 333}
 334
 335/**
 336 * axienet_dma_bd_release - Release buffer descriptor rings
 337 * @ndev:       Pointer to the net_device structure
 338 *
 339 * This function is used to release the descriptors allocated in
 340 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 341 * driver stop api is called.
 342 */
 343static void axienet_dma_bd_release(struct net_device *ndev)
 344{
 345        int i;
 346        struct axienet_local *lp = netdev_priv(ndev);
 347
 348        for_each_dma_queue(lp, i) {
 349#ifdef CONFIG_AXIENET_HAS_MCDMA
 350                axienet_mcdma_bd_free(ndev, lp->dq[i]);
 351#else
 352                axienet_bd_free(ndev, lp->dq[i]);
 353#endif
 354        }
 355}
 356
 357/**
 358 * axienet_mcdma_q_init - Setup buffer qriptor rings for individual Axi DMA
 359 * @ndev:       Pointer to the net_device structure
 360 * @q:          Pointer to MCDMA queue structure
 361 *
 362 * Return: 0, on success -ENOMEM, on failure
 363 *
 364 * This function is helper function to axienet_mcdma_bd_init
 365 */
 366static int __maybe_unused axienet_mcdma_q_init(struct net_device *ndev,
 367                                               struct axienet_dma_q *q)
 368{
 369        u32 cr, chan_en;
 370        int i;
 371        struct sk_buff *skb;
 372        struct axienet_local *lp = netdev_priv(ndev);
 373
 374        /* Reset the indexes which are used for accessing the BDs */
 375        q->tx_bd_ci = 0;
 376        q->tx_bd_tail = 0;
 377        q->rx_bd_ci = 0;
 378        q->rx_offset = XMCDMA_CHAN_RX_OFFSET;
 379
 380        /* Allocate the Tx and Rx buffer qriptors. */
 381        q->txq_bd_v = dma_zalloc_coherent(ndev->dev.parent,
 382                                          sizeof(*q->txq_bd_v) * TX_BD_NUM,
 383                                          &q->tx_bd_p, GFP_KERNEL);
 384        if (!q->txq_bd_v)
 385                goto out;
 386
 387        q->rxq_bd_v = dma_zalloc_coherent(ndev->dev.parent,
 388                                          sizeof(*q->rxq_bd_v) * RX_BD_NUM,
 389                                          &q->rx_bd_p, GFP_KERNEL);
 390        if (!q->rxq_bd_v)
 391                goto out;
 392
 393        if (!q->eth_hasdre) {
 394                q->tx_bufs = dma_zalloc_coherent(ndev->dev.parent,
 395                                                  XAE_MAX_PKT_LEN * TX_BD_NUM,
 396                                                  &q->tx_bufs_dma,
 397                                                  GFP_KERNEL);
 398                if (!q->tx_bufs)
 399                        goto out;
 400
 401                for (i = 0; i < TX_BD_NUM; i++)
 402                        q->tx_buf[i] = &q->tx_bufs[i * XAE_MAX_PKT_LEN];
 403        }
 404
 405        for (i = 0; i < TX_BD_NUM; i++) {
 406                q->txq_bd_v[i].next = q->tx_bd_p +
 407                                      sizeof(*q->txq_bd_v) *
 408                                      ((i + 1) % TX_BD_NUM);
 409        }
 410
 411        for (i = 0; i < RX_BD_NUM; i++) {
 412                q->rxq_bd_v[i].next = q->rx_bd_p +
 413                                      sizeof(*q->rxq_bd_v) *
 414                                      ((i + 1) % RX_BD_NUM);
 415
 416                skb = netdev_alloc_skb(ndev, lp->max_frm_size);
 417                if (!skb)
 418                        goto out;
 419
 420                /* Ensure that the skb is completely updated
 421                 * prio to mapping the DMA
 422                 */
 423                wmb();
 424
 425                q->rxq_bd_v[i].sw_id_offset = (phys_addr_t)skb;
 426                q->rxq_bd_v[i].phys = dma_map_single(ndev->dev.parent,
 427                                                     skb->data,
 428                                                     lp->max_frm_size,
 429                                                     DMA_FROM_DEVICE);
 430                q->rxq_bd_v[i].cntrl = lp->max_frm_size;
 431        }
 432
 433        /* Start updating the Rx channel control register */
 434        cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
 435                              q->rx_offset);
 436        /* Update the interrupt coalesce count */
 437        cr = ((cr & ~XMCDMA_COALESCE_MASK) |
 438              ((lp->coalesce_count_rx) << XMCDMA_COALESCE_SHIFT));
 439        /* Update the delay timer count */
 440        cr = ((cr & ~XMCDMA_DELAY_MASK) |
 441              (XAXIDMA_DFT_RX_WAITBOUND << XMCDMA_DELAY_SHIFT));
 442        /* Enable coalesce, delay timer and error interrupts */
 443        cr |= XMCDMA_IRQ_ALL_MASK;
 444        /* Write to the Rx channel control register */
 445        axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
 446                          q->rx_offset, cr);
 447
 448        /* Start updating the Tx channel control register */
 449        cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
 450        /* Update the interrupt coalesce count */
 451        cr = (((cr & ~XMCDMA_COALESCE_MASK)) |
 452              ((lp->coalesce_count_tx) << XMCDMA_COALESCE_SHIFT));
 453        /* Update the delay timer count */
 454        cr = (((cr & ~XMCDMA_DELAY_MASK)) |
 455              (XAXIDMA_DFT_TX_WAITBOUND << XMCDMA_DELAY_SHIFT));
 456        /* Enable coalesce, delay timer and error interrupts */
 457        cr |= XMCDMA_IRQ_ALL_MASK;
 458        /* Write to the Tx channel control register */
 459        axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
 460
 461        /* Populate the tail pointer and bring the Rx Axi DMA engine out of
 462         * halted state. This will make the Rx side ready for reception.
 463         */
 464        axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id) +
 465                            q->rx_offset, q->rx_bd_p);
 466        cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET +  q->rx_offset);
 467        axienet_dma_out32(q, XMCDMA_CR_OFFSET +  q->rx_offset,
 468                          cr | XMCDMA_CR_RUNSTOP_MASK);
 469        cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
 470                                q->rx_offset);
 471        axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) + q->rx_offset,
 472                          cr | XMCDMA_CR_RUNSTOP_MASK);
 473        axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
 474                            q->rx_offset, q->rx_bd_p + (sizeof(*q->rxq_bd_v) *
 475                            (RX_BD_NUM - 1)));
 476        chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET + q->rx_offset);
 477        chan_en |= (1 << (q->chan_id - 1));
 478        axienet_dma_out32(q, XMCDMA_CHEN_OFFSET + q->rx_offset, chan_en);
 479
 480        /* Write to the RS (Run-stop) bit in the Tx channel control register.
 481         * Tx channel is now ready to run. But only after we write to the
 482         * tail pointer register that the Tx channel will start transmitting.
 483         */
 484        axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id),
 485                          q->tx_bd_p);
 486        cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET);
 487        axienet_dma_out32(q, XMCDMA_CR_OFFSET,
 488                          cr | XMCDMA_CR_RUNSTOP_MASK);
 489        cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
 490        axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id),
 491                          cr | XMCDMA_CR_RUNSTOP_MASK);
 492        chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET);
 493        chan_en |= (1 << (q->chan_id - 1));
 494        axienet_dma_out32(q, XMCDMA_CHEN_OFFSET, chan_en);
 495
 496        return 0;
 497out:
 498        axienet_dma_bd_release(ndev);
 499        return -ENOMEM;
 500}
 501
 502/**
 503 * axienet_dma_q_init - Setup buffer descriptor rings for individual Axi DMA
 504 * @ndev:       Pointer to the net_device structure
 505 * @q:          Pointer to DMA queue structure
 506 *
 507 * Return: 0, on success -ENOMEM, on failure
 508 *
 509 * This function is helper function to axienet_dma_bd_init
 510 */
 511static int __maybe_unused axienet_dma_q_init(struct net_device *ndev,
 512                                             struct axienet_dma_q *q)
 513{
 514        u32 cr;
 515        int i;
 516        struct sk_buff *skb;
 517        struct axienet_local *lp = netdev_priv(ndev);
 518
 519        /* Reset the indexes which are used for accessing the BDs */
 520        q->tx_bd_ci = 0;
 521        q->tx_bd_tail = 0;
 522        q->rx_bd_ci = 0;
 523
 524        /* Allocate the Tx and Rx buffer descriptors. */
 525        q->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
 526                                          sizeof(*q->tx_bd_v) * TX_BD_NUM,
 527                                          &q->tx_bd_p, GFP_KERNEL);
 528        if (!q->tx_bd_v)
 529                goto out;
 530
 531        q->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
 532                                          sizeof(*q->rx_bd_v) * RX_BD_NUM,
 533                                          &q->rx_bd_p, GFP_KERNEL);
 534        if (!q->rx_bd_v)
 535                goto out;
 536
 537        for (i = 0; i < TX_BD_NUM; i++) {
 538                q->tx_bd_v[i].next = q->tx_bd_p +
 539                                      sizeof(*q->tx_bd_v) *
 540                                      ((i + 1) % TX_BD_NUM);
 541        }
 542
 543        if (!q->eth_hasdre) {
 544                q->tx_bufs = dma_zalloc_coherent(ndev->dev.parent,
 545                                                  XAE_MAX_PKT_LEN * TX_BD_NUM,
 546                                                  &q->tx_bufs_dma,
 547                                                  GFP_KERNEL);
 548                if (!q->tx_bufs)
 549                        goto out;
 550
 551                for (i = 0; i < TX_BD_NUM; i++)
 552                        q->tx_buf[i] = &q->tx_bufs[i * XAE_MAX_PKT_LEN];
 553        }
 554
 555        for (i = 0; i < RX_BD_NUM; i++) {
 556                q->rx_bd_v[i].next = q->rx_bd_p +
 557                                      sizeof(*q->rx_bd_v) *
 558                                      ((i + 1) % RX_BD_NUM);
 559
 560                skb = netdev_alloc_skb(ndev, lp->max_frm_size);
 561                if (!skb)
 562                        goto out;
 563
 564                /* Ensure that the skb is completely updated
 565                 * prio to mapping the DMA
 566                 */
 567                wmb();
 568
 569                q->rx_bd_v[i].sw_id_offset = (phys_addr_t)skb;
 570                q->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
 571                                                     skb->data,
 572                                                     lp->max_frm_size,
 573                                                     DMA_FROM_DEVICE);
 574                q->rx_bd_v[i].cntrl = lp->max_frm_size;
 575        }
 576
 577        /* Start updating the Rx channel control register */
 578        cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
 579        /* Update the interrupt coalesce count */
 580        cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
 581              ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
 582        /* Update the delay timer count */
 583        cr = ((cr & ~XAXIDMA_DELAY_MASK) |
 584              (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 585        /* Enable coalesce, delay timer and error interrupts */
 586        cr |= XAXIDMA_IRQ_ALL_MASK;
 587        /* Write to the Rx channel control register */
 588        axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
 589
 590        /* Start updating the Tx channel control register */
 591        cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
 592        /* Update the interrupt coalesce count */
 593        cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
 594              ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
 595        /* Update the delay timer count */
 596        cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
 597              (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 598        /* Enable coalesce, delay timer and error interrupts */
 599        cr |= XAXIDMA_IRQ_ALL_MASK;
 600        /* Write to the Tx channel control register */
 601        axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
 602
 603        /* Populate the tail pointer and bring the Rx Axi DMA engine out of
 604         * halted state. This will make the Rx side ready for reception.
 605         */
 606        axienet_dma_bdout(q, XAXIDMA_RX_CDESC_OFFSET, q->rx_bd_p);
 607        cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
 608        axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
 609                          cr | XAXIDMA_CR_RUNSTOP_MASK);
 610        axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, q->rx_bd_p +
 611                          (sizeof(*q->rx_bd_v) * (RX_BD_NUM - 1)));
 612
 613        /* Write to the RS (Run-stop) bit in the Tx channel control register.
 614         * Tx channel is now ready to run. But only after we write to the
 615         * tail pointer register that the Tx channel will start transmitting.
 616         */
 617        axienet_dma_bdout(q, XAXIDMA_TX_CDESC_OFFSET, q->tx_bd_p);
 618        cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
 619        axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
 620                          cr | XAXIDMA_CR_RUNSTOP_MASK);
 621
 622        return 0;
 623out:
 624        axienet_dma_bd_release(ndev);
 625        return -ENOMEM;
 626}
 627
 628/**
 629 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 630 * @ndev:       Pointer to the net_device structure
 631 *
 632 * Return: 0, on success -ENOMEM, on failure
 633 *
 634 * This function is called to initialize the Rx and Tx DMA descriptor
 635 * rings. This initializes the descriptors with required default values
 636 * and is called when Axi Ethernet driver reset is called.
 637 */
 638static int axienet_dma_bd_init(struct net_device *ndev)
 639{
 640        int i, ret;
 641        struct axienet_local *lp = netdev_priv(ndev);
 642
 643        for_each_dma_queue(lp, i) {
 644#ifdef CONFIG_AXIENET_HAS_MCDMA
 645                ret = axienet_mcdma_q_init(ndev, lp->dq[i]);
 646#else
 647                ret = axienet_dma_q_init(ndev, lp->dq[i]);
 648#endif
 649                if (ret != 0)
 650                        break;
 651        }
 652
 653        return ret;
 654}
 655
 656/**
 657 * axienet_set_mac_address - Write the MAC address
 658 * @ndev:       Pointer to the net_device structure
 659 * @address:    6 byte Address to be written as MAC address
 660 *
 661 * This function is called to initialize the MAC address of the Axi Ethernet
 662 * core. It writes to the UAW0 and UAW1 registers of the core.
 663 */
 664static void axienet_set_mac_address(struct net_device *ndev, void *address)
 665{
 666        struct axienet_local *lp = netdev_priv(ndev);
 667
 668        if (address)
 669                ether_addr_copy(ndev->dev_addr, address);
 670        if (!is_valid_ether_addr(ndev->dev_addr))
 671                eth_random_addr(ndev->dev_addr);
 672
 673        if (lp->axienet_config->mactype != XAXIENET_1G &&
 674            lp->axienet_config->mactype != XAXIENET_2_5G)
 675                return;
 676
 677        /* Set up unicast MAC address filter set its mac address */
 678        axienet_iow(lp, XAE_UAW0_OFFSET,
 679                    (ndev->dev_addr[0]) |
 680                    (ndev->dev_addr[1] << 8) |
 681                    (ndev->dev_addr[2] << 16) |
 682                    (ndev->dev_addr[3] << 24));
 683        axienet_iow(lp, XAE_UAW1_OFFSET,
 684                    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 685                      ~XAE_UAW1_UNICASTADDR_MASK) |
 686                     (ndev->dev_addr[4] |
 687                     (ndev->dev_addr[5] << 8))));
 688}
 689
 690/**
 691 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 692 * @ndev:       Pointer to the net_device structure
 693 * @p:          6 byte Address to be written as MAC address
 694 *
 695 * Return: 0 for all conditions. Presently, there is no failure case.
 696 *
 697 * This function is called to initialize the MAC address of the Axi Ethernet
 698 * core. It calls the core specific axienet_set_mac_address. This is the
 699 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 700 */
 701static int netdev_set_mac_address(struct net_device *ndev, void *p)
 702{
 703        struct sockaddr *addr = p;
 704
 705        axienet_set_mac_address(ndev, addr->sa_data);
 706        return 0;
 707}
 708
 709/**
 710 * axienet_set_multicast_list - Prepare the multicast table
 711 * @ndev:       Pointer to the net_device structure
 712 *
 713 * This function is called to initialize the multicast table during
 714 * initialization. The Axi Ethernet basic multicast support has a four-entry
 715 * multicast table which is initialized here. Additionally this function
 716 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 717 * means whenever the multicast table entries need to be updated this
 718 * function gets called.
 719 */
 720static void axienet_set_multicast_list(struct net_device *ndev)
 721{
 722        int i;
 723        u32 reg, af0reg, af1reg;
 724        struct axienet_local *lp = netdev_priv(ndev);
 725
 726        if ((lp->axienet_config->mactype != XAXIENET_1G) || lp->eth_hasnobuf)
 727                return;
 728
 729        if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
 730            netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 731                /* We must make the kernel realize we had to move into
 732                 * promiscuous mode. If it was a promiscuous mode request
 733                 * the flag is already set. If not we set it.
 734                 */
 735                ndev->flags |= IFF_PROMISC;
 736                reg = axienet_ior(lp, XAE_FMI_OFFSET);
 737                reg |= XAE_FMI_PM_MASK;
 738                axienet_iow(lp, XAE_FMI_OFFSET, reg);
 739                dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 740        } else if (!netdev_mc_empty(ndev)) {
 741                struct netdev_hw_addr *ha;
 742
 743                i = 0;
 744                netdev_for_each_mc_addr(ha, ndev) {
 745                        if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 746                                break;
 747
 748                        af0reg = (ha->addr[0]);
 749                        af0reg |= (ha->addr[1] << 8);
 750                        af0reg |= (ha->addr[2] << 16);
 751                        af0reg |= (ha->addr[3] << 24);
 752
 753                        af1reg = (ha->addr[4]);
 754                        af1reg |= (ha->addr[5] << 8);
 755
 756                        reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 757                        reg |= i;
 758
 759                        axienet_iow(lp, XAE_FMI_OFFSET, reg);
 760                        axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 761                        axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 762                        i++;
 763                }
 764        } else {
 765                reg = axienet_ior(lp, XAE_FMI_OFFSET);
 766                reg &= ~XAE_FMI_PM_MASK;
 767
 768                axienet_iow(lp, XAE_FMI_OFFSET, reg);
 769
 770                for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 771                        reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 772                        reg |= i;
 773
 774                        axienet_iow(lp, XAE_FMI_OFFSET, reg);
 775                        axienet_iow(lp, XAE_AF0_OFFSET, 0);
 776                        axienet_iow(lp, XAE_AF1_OFFSET, 0);
 777                }
 778
 779                dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 780        }
 781}
 782
 783/**
 784 * axienet_setoptions - Set an Axi Ethernet option
 785 * @ndev:       Pointer to the net_device structure
 786 * @options:    Option to be enabled/disabled
 787 *
 788 * The Axi Ethernet core has multiple features which can be selectively turned
 789 * on or off. The typical options could be jumbo frame option, basic VLAN
 790 * option, promiscuous mode option etc. This function is used to set or clear
 791 * these options in the Axi Ethernet hardware. This is done through
 792 * axienet_option structure .
 793 */
 794static void axienet_setoptions(struct net_device *ndev, u32 options)
 795{
 796        int reg;
 797        struct axienet_local *lp = netdev_priv(ndev);
 798        struct axienet_option *tp = &axienet_options[0];
 799
 800        while (tp->opt) {
 801                reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 802                if (options & tp->opt)
 803                        reg |= tp->m_or;
 804                axienet_iow(lp, tp->reg, reg);
 805                tp++;
 806        }
 807
 808        lp->options |= options;
 809}
 810
 811static void xxvenet_setoptions(struct net_device *ndev, u32 options)
 812{
 813        int reg;
 814        struct axienet_local *lp = netdev_priv(ndev);
 815        struct xxvenet_option *tp = &xxvenet_options[0];
 816
 817        while (tp->opt) {
 818                reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 819                if (options & tp->opt)
 820                        reg |= tp->m_or;
 821                axienet_iow(lp, tp->reg, reg);
 822                tp++;
 823        }
 824
 825        lp->options |= options;
 826}
 827
 828static void __axienet_device_reset(struct axienet_dma_q *q, off_t offset)
 829{
 830        u32 timeout;
 831        /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 832         * process of Axi DMA takes a while to complete as all pending
 833         * commands/transfers will be flushed or completed during this
 834         * reset process.
 835         */
 836        axienet_dma_out32(q, offset, XAXIDMA_CR_RESET_MASK);
 837        timeout = DELAY_OF_ONE_MILLISEC;
 838        while (axienet_dma_in32(q, offset) & XAXIDMA_CR_RESET_MASK) {
 839                udelay(1);
 840                if (--timeout == 0) {
 841                        netdev_err(q->lp->ndev, "%s: DMA reset timeout!\n",
 842                                   __func__);
 843                        break;
 844                }
 845        }
 846}
 847
 848/**
 849 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 850 * @ndev:       Pointer to the net_device structure
 851 *
 852 * This function is called to reset and initialize the Axi Ethernet core. This
 853 * is typically called during initialization. It does a reset of the Axi DMA
 854 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 855 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
 856 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 857 * core.
 858 */
 859static void axienet_device_reset(struct net_device *ndev)
 860{
 861        u32 axienet_status;
 862        struct axienet_local *lp = netdev_priv(ndev);
 863        u32 err, val;
 864        struct axienet_dma_q *q;
 865        u32 i;
 866
 867        if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
 868                for_each_dma_queue(lp, i) {
 869                        q = lp->dq[i];
 870                        __axienet_device_reset(q, XAXIDMA_TX_CR_OFFSET);
 871#ifndef CONFIG_AXIENET_HAS_MCDMA
 872                        __axienet_device_reset(q, XAXIDMA_RX_CR_OFFSET);
 873#endif
 874                }
 875        }
 876
 877        lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 878        if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
 879                lp->options |= XAE_OPTION_VLAN;
 880                lp->options &= (~XAE_OPTION_JUMBO);
 881        }
 882
 883        if ((ndev->mtu > XAE_MTU) && (ndev->mtu <= XAE_JUMBO_MTU)) {
 884                lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
 885                                        XAE_TRL_SIZE;
 886                if (lp->max_frm_size <= lp->rxmem &&
 887                    (lp->axienet_config->mactype != XAXIENET_10G_25G))
 888                        lp->options |= XAE_OPTION_JUMBO;
 889        }
 890
 891        if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
 892                if (axienet_dma_bd_init(ndev)) {
 893                netdev_err(ndev, "%s: descriptor allocation failed\n",
 894                           __func__);
 895                }
 896        }
 897
 898        if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
 899                axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 900                axienet_status &= ~XAE_RCW1_RX_MASK;
 901                axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 902        }
 903
 904        if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
 905                /* Check for block lock bit got set or not
 906                 * This ensures that 10G ethernet IP
 907                 * is functioning normally or not.
 908                 */
 909                err = readl_poll_timeout(lp->regs + XXV_STATRX_BLKLCK_OFFSET,
 910                                         val, (val & XXV_RX_BLKLCK_MASK),
 911                                         10, DELAY_OF_ONE_MILLISEC);
 912                if (err) {
 913                        netdev_err(ndev, "%s: Block lock bit of XXV MAC didn't",
 914                                   __func__);
 915                        netdev_err(ndev, "Got Set cross check the ref clock");
 916                        netdev_err(ndev, "Configuration for the mac");
 917                }
 918#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
 919                if (!lp->is_tsn) {
 920                        axienet_rxts_iow(lp, XAXIFIFO_TXTS_RDFR,
 921                                         XAXIFIFO_TXTS_RESET_MASK);
 922                        axienet_rxts_iow(lp, XAXIFIFO_TXTS_SRR,
 923                                         XAXIFIFO_TXTS_RESET_MASK);
 924                }
 925#endif
 926        }
 927
 928        if ((lp->axienet_config->mactype == XAXIENET_1G) &&
 929            !lp->eth_hasnobuf) {
 930                axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 931                if (axienet_status & XAE_INT_RXRJECT_MASK)
 932                        axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 933
 934                /* Enable Receive errors */
 935                axienet_iow(lp, XAE_IE_OFFSET, XAE_INT_RECV_ERROR_MASK);
 936        }
 937
 938        if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
 939                lp->options |= XAE_OPTION_FCS_STRIP;
 940                lp->options |= XAE_OPTION_FCS_INSERT;
 941        } else {
 942                axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 943        }
 944        lp->axienet_config->setoptions(ndev, lp->options &
 945                                       ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 946
 947        axienet_set_mac_address(ndev, NULL);
 948        axienet_set_multicast_list(ndev);
 949        lp->axienet_config->setoptions(ndev, lp->options);
 950
 951        netif_trans_update(ndev);
 952}
 953
 954/**
 955 * axienet_adjust_link - Adjust the PHY link speed/duplex.
 956 * @ndev:       Pointer to the net_device structure
 957 *
 958 * This function is called to change the speed and duplex setting after
 959 * auto negotiation is done by the PHY. This is the function that gets
 960 * registered with the PHY interface through the "of_phy_connect" call.
 961 */
 962static void axienet_adjust_link(struct net_device *ndev)
 963{
 964        u32 emmc_reg;
 965        u32 link_state;
 966        u32 setspeed = 1;
 967        struct axienet_local *lp = netdev_priv(ndev);
 968        struct phy_device *phy = ndev->phydev;
 969
 970        link_state = phy->speed | (phy->duplex << 1) | phy->link;
 971        if (lp->last_link != link_state) {
 972                if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
 973                        if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X)
 974                                setspeed = 0;
 975                } else {
 976                        if ((phy->speed == SPEED_1000) &&
 977                            (lp->phy_type == XAE_PHY_TYPE_MII))
 978                                setspeed = 0;
 979                }
 980
 981                if (setspeed == 1) {
 982                        emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
 983                        emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
 984
 985                        switch (phy->speed) {
 986                        case SPEED_2500:
 987                                emmc_reg |= XAE_EMMC_LINKSPD_2500;
 988                        case SPEED_1000:
 989                                emmc_reg |= XAE_EMMC_LINKSPD_1000;
 990                                break;
 991                        case SPEED_100:
 992                                emmc_reg |= XAE_EMMC_LINKSPD_100;
 993                                break;
 994                        case SPEED_10:
 995                                emmc_reg |= XAE_EMMC_LINKSPD_10;
 996                                break;
 997                        default:
 998                                dev_err(&ndev->dev, "Speed other than 10, 100 ");
 999                                dev_err(&ndev->dev, "or 1Gbps is not supported\n");
1000                                break;
1001                        }
1002
1003                        axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
1004                        phy_print_status(phy);
1005                } else {
1006                        netdev_err(ndev,
1007                                   "Error setting Axi Ethernet mac speed\n");
1008                }
1009
1010                lp->last_link = link_state;
1011        }
1012}
1013
1014#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
1015/**
1016 * axienet_tx_hwtstamp - Read tx timestamp from hw and update it to the skbuff
1017 * @lp:         Pointer to axienet local structure
1018 * @cur_p:      Pointer to the axi_dma/axi_mcdma current bd
1019 *
1020 * Return:      None.
1021 */
1022#ifdef CONFIG_AXIENET_HAS_MCDMA
1023static void axienet_tx_hwtstamp(struct axienet_local *lp,
1024                                struct aximcdma_bd *cur_p)
1025#else
1026static void axienet_tx_hwtstamp(struct axienet_local *lp,
1027                                struct axidma_bd *cur_p)
1028#endif
1029{
1030        u32 sec = 0, nsec = 0, val;
1031        u64 time64;
1032        int err = 0;
1033        u32 count, len = lp->axienet_config->tx_ptplen;
1034        struct skb_shared_hwtstamps *shhwtstamps =
1035                skb_hwtstamps((struct sk_buff *)cur_p->ptp_tx_skb);
1036
1037        val = axienet_txts_ior(lp, XAXIFIFO_TXTS_ISR);
1038        if (unlikely(!(val & XAXIFIFO_TXTS_INT_RC_MASK)))
1039                dev_info(lp->dev, "Did't get FIFO rx interrupt %d\n", val);
1040
1041        /* If FIFO is configured in cut through Mode we will get Rx complete
1042         * interrupt even one byte is there in the fifo wait for the full packet
1043         */
1044        err = readl_poll_timeout_atomic(lp->tx_ts_regs + XAXIFIFO_TXTS_RLR, val,
1045                                        ((val & XAXIFIFO_TXTS_RXFD_MASK) >=
1046                                        len), 0, 1000000);
1047        if (err)
1048                netdev_err(lp->ndev, "%s: Didn't get the full timestamp packet",
1049                           __func__);
1050
1051        nsec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
1052        sec  = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
1053        val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
1054        val = ((val & XAXIFIFO_TXTS_TAG_MASK) >> XAXIFIFO_TXTS_TAG_SHIFT);
1055        if (val != cur_p->ptp_tx_ts_tag) {
1056                count = axienet_txts_ior(lp, XAXIFIFO_TXTS_RFO);
1057                while (count) {
1058                        nsec = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
1059                        sec  = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
1060                        val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
1061                        val = ((val & XAXIFIFO_TXTS_TAG_MASK) >>
1062                                XAXIFIFO_TXTS_TAG_SHIFT);
1063                        if (val == cur_p->ptp_tx_ts_tag)
1064                                break;
1065                        count = axienet_txts_ior(lp, XAXIFIFO_TXTS_RFO);
1066                }
1067                if (val != cur_p->ptp_tx_ts_tag) {
1068                        dev_info(lp->dev, "Mismatching 2-step tag. Got %x",
1069                                 val);
1070                        dev_info(lp->dev, "Expected %x\n",
1071                                 cur_p->ptp_tx_ts_tag);
1072                }
1073        }
1074
1075        if (lp->axienet_config->mactype != XAXIENET_10G_25G)
1076                val = axienet_txts_ior(lp, XAXIFIFO_TXTS_RXFD);
1077
1078        time64 = sec * NS_PER_SEC + nsec;
1079        memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1080        shhwtstamps->hwtstamp = ns_to_ktime(time64);
1081        if (lp->axienet_config->mactype != XAXIENET_10G_25G)
1082                skb_pull((struct sk_buff *)cur_p->ptp_tx_skb,
1083                         AXIENET_TS_HEADER_LEN);
1084
1085        skb_tstamp_tx((struct sk_buff *)cur_p->ptp_tx_skb, shhwtstamps);
1086        dev_kfree_skb_any((struct sk_buff *)cur_p->ptp_tx_skb);
1087        cur_p->ptp_tx_skb = 0;
1088}
1089
1090/**
1091 * axienet_rx_hwtstamp - Read rx timestamp from hw and update it to the skbuff
1092 * @lp:         Pointer to axienet local structure
1093 * @skb:        Pointer to the sk_buff structure
1094 *
1095 * Return:      None.
1096 */
1097static void axienet_rx_hwtstamp(struct axienet_local *lp,
1098                                struct sk_buff *skb)
1099{
1100        u32 sec = 0, nsec = 0, val;
1101        u64 time64;
1102        int err = 0;
1103        struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1104
1105        val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_ISR);
1106        if (unlikely(!(val & XAXIFIFO_TXTS_INT_RC_MASK))) {
1107                dev_info(lp->dev, "Did't get FIFO rx interrupt %d\n", val);
1108                return;
1109        }
1110
1111        val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RFO);
1112        if (!val)
1113                return;
1114
1115        /* If FIFO is configured in cut through Mode we will get Rx complete
1116         * interrupt even one byte is there in the fifo wait for the full packet
1117         */
1118        err = readl_poll_timeout_atomic(lp->rx_ts_regs + XAXIFIFO_TXTS_RLR, val,
1119                                        ((val & XAXIFIFO_TXTS_RXFD_MASK) >= 12),
1120                                        0, 1000000);
1121        if (err) {
1122                netdev_err(lp->ndev, "%s: Didn't get the full timestamp packet",
1123                           __func__);
1124                return;
1125        }
1126
1127        nsec = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
1128        sec  = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
1129        val = axienet_rxts_ior(lp, XAXIFIFO_TXTS_RXFD);
1130
1131        if (lp->tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
1132                time64 = sec * NS_PER_SEC + nsec;
1133                shhwtstamps->hwtstamp = ns_to_ktime(time64);
1134        }
1135}
1136#endif
1137
1138/**
1139 * axienet_start_xmit_done - Invoked once a transmit is completed by the
1140 * Axi DMA Tx channel.
1141 * @ndev:       Pointer to the net_device structure
1142 * @q:          Pointer to DMA queue structure
1143 *
1144 * This function is invoked from the Axi DMA Tx isr to notify the completion
1145 * of transmit operation. It clears fields in the corresponding Tx BDs and
1146 * unmaps the corresponding buffer so that CPU can regain ownership of the
1147 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
1148 * required.
1149 */
1150static void axienet_start_xmit_done(struct net_device *ndev,
1151                                    struct axienet_dma_q *q)
1152{
1153        u32 size = 0;
1154        u32 packets = 0;
1155#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
1156        struct axienet_local *lp = netdev_priv(ndev);
1157#endif
1158#ifdef CONFIG_AXIENET_HAS_MCDMA
1159        struct aximcdma_bd *cur_p;
1160#else
1161        struct axidma_bd *cur_p;
1162#endif
1163        unsigned int status = 0;
1164
1165#ifdef CONFIG_AXIENET_HAS_MCDMA
1166        cur_p = &q->txq_bd_v[q->tx_bd_ci];
1167        status = cur_p->sband_stats;
1168#else
1169        cur_p = &q->tx_bd_v[q->tx_bd_ci];
1170        status = cur_p->status;
1171#endif
1172        while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
1173#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
1174                if (cur_p->ptp_tx_skb)
1175                        axienet_tx_hwtstamp(lp, cur_p);
1176#endif
1177                if (cur_p->tx_desc_mapping == DESC_DMA_MAP_PAGE)
1178                        dma_unmap_page(ndev->dev.parent, cur_p->phys,
1179                                       cur_p->cntrl &
1180                                       XAXIDMA_BD_CTRL_LENGTH_MASK,
1181                                       DMA_TO_DEVICE);
1182                else
1183                        dma_unmap_single(ndev->dev.parent, cur_p->phys,
1184                                         cur_p->cntrl &
1185                                         XAXIDMA_BD_CTRL_LENGTH_MASK,
1186                                         DMA_TO_DEVICE);
1187                if (cur_p->tx_skb)
1188                        dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
1189                /*cur_p->phys = 0;*/
1190                cur_p->app0 = 0;
1191                cur_p->app1 = 0;
1192                cur_p->app2 = 0;
1193                cur_p->app4 = 0;
1194                cur_p->status = 0;
1195                cur_p->tx_skb = 0;
1196#ifdef CONFIG_AXIENET_HAS_MCDMA
1197                cur_p->sband_stats = 0;
1198#endif
1199
1200                size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
1201                packets++;
1202
1203                ++q->tx_bd_ci;
1204                q->tx_bd_ci %= TX_BD_NUM;
1205#ifdef CONFIG_AXIENET_HAS_MCDMA
1206                cur_p = &q->txq_bd_v[q->tx_bd_ci];
1207                status = cur_p->sband_stats;
1208#else
1209                cur_p = &q->tx_bd_v[q->tx_bd_ci];
1210                status = cur_p->status;
1211#endif
1212        }
1213
1214        ndev->stats.tx_packets += packets;
1215        ndev->stats.tx_bytes += size;
1216        /* Fixme: With the existing multiqueue implementation
1217         * in the driver it is difficult to get the exact queue info.
1218         * We should wake only the particular queue
1219         * instead of waking all ndev queues.
1220         */
1221        netif_tx_wake_all_queues(ndev);
1222}
1223
1224/**
1225 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
1226 * @q:          Pointer to DMA queue structure
1227 * @num_frag:   The number of BDs to check for
1228 *
1229 * Return: 0, on success
1230 *          NETDEV_TX_BUSY, if any of the descriptors are not free
1231 *
1232 * This function is invoked before BDs are allocated and transmission starts.
1233 * This function returns 0 if a BD or group of BDs can be allocated for
1234 * transmission. If the BD or any of the BDs are not free the function
1235 * returns a busy status. This is invoked from axienet_start_xmit.
1236 */
1237static inline int axienet_check_tx_bd_space(struct axienet_dma_q *q,
1238                                            int num_frag)
1239{
1240#ifdef CONFIG_AXIENET_HAS_MCDMA
1241        struct aximcdma_bd *cur_p;
1242
1243        cur_p = &q->txq_bd_v[(q->tx_bd_tail + num_frag) % TX_BD_NUM];
1244        if (cur_p->sband_stats & XMCDMA_BD_STS_ALL_MASK)
1245                return NETDEV_TX_BUSY;
1246#else
1247        struct axidma_bd *cur_p;
1248
1249        cur_p = &q->tx_bd_v[(q->tx_bd_tail + num_frag) % TX_BD_NUM];
1250        if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
1251                return NETDEV_TX_BUSY;
1252#endif
1253        return 0;
1254}
1255
1256#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
1257/**
1258 * axienet_create_tsheader - Create timestamp header for tx
1259 * @q:          Pointer to DMA queue structure
1260 * @buf:        Pointer to the buf to copy timestamp header
1261 * @msg_type:   PTP message type
1262 *
1263 * Return:      None.
1264 */
1265static void axienet_create_tsheader(u8 *buf, u8 msg_type,
1266                                    struct axienet_dma_q *q)
1267{
1268        struct axienet_local *lp = q->lp;
1269#ifdef CONFIG_AXIENET_HAS_MCDMA
1270        struct aximcdma_bd *cur_p;
1271#else
1272        struct axidma_bd *cur_p;
1273#endif
1274        u64 val;
1275        u32 tmp;
1276
1277#ifdef CONFIG_AXIENET_HAS_MCDMA
1278        cur_p = &q->txq_bd_v[q->tx_bd_tail];
1279#else
1280        cur_p = &q->tx_bd_v[q->tx_bd_tail];
1281#endif
1282
1283        if (msg_type == TX_TS_OP_ONESTEP) {
1284                buf[0] = TX_TS_OP_ONESTEP;
1285                buf[1] = TX_TS_CSUM_UPDATE;
1286                buf[4] = TX_PTP_TS_OFFSET;
1287                buf[6] = TX_PTP_CSUM_OFFSET;
1288        } else {
1289                buf[0] = TX_TS_OP_TWOSTEP;
1290                buf[2] = cur_p->ptp_tx_ts_tag & 0xFF;
1291                buf[3] = (cur_p->ptp_tx_ts_tag >> 8) & 0xFF;
1292        }
1293
1294        if (lp->axienet_config->mactype == XAXIENET_1G ||
1295            lp->axienet_config->mactype == XAXIENET_2_5G) {
1296                memcpy(&val, buf, AXIENET_TS_HEADER_LEN);
1297                swab64s(&val);
1298                memcpy(buf, &val, AXIENET_TS_HEADER_LEN);
1299        } else if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
1300                memcpy(&tmp, buf, XXVENET_TS_HEADER_LEN);
1301                axienet_txts_iow(lp, XAXIFIFO_TXTS_TXFD, tmp);
1302                axienet_txts_iow(lp, XAXIFIFO_TXTS_TLR, XXVENET_TS_HEADER_LEN);
1303        }
1304}
1305#endif
1306
1307#ifdef CONFIG_XILINX_TSN
1308static inline u16 tsn_queue_mapping(const struct sk_buff *skb)
1309{
1310        int queue = XAE_BE;
1311        u16 vlan_tci;
1312        u8 pcp;
1313
1314        struct ethhdr *hdr = (struct ethhdr *)skb->data;
1315        u16 ether_type = ntohs(hdr->h_proto);
1316
1317        if (unlikely(ether_type == ETH_P_8021Q)) {
1318                struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)skb->data;
1319
1320                /* ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); */
1321
1322                vlan_tci = ntohs(vhdr->h_vlan_TCI);
1323
1324                pcp = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1325                pr_debug("vlan_tci: %x\n", vlan_tci);
1326                pr_debug("pcp: %d\n", pcp);
1327
1328                if (pcp == 4)
1329                        queue = XAE_ST;
1330                else if (pcp == 2 || pcp == 3)
1331                        queue = XAE_RE;
1332        }
1333        pr_debug("selected queue: %d\n", queue);
1334        return queue;
1335}
1336#endif
1337
1338/**
1339 * axienet_start_xmit - Starts the transmission.
1340 * @skb:        sk_buff pointer that contains data to be Txed.
1341 * @ndev:       Pointer to net_device structure.
1342 *
1343 * Return: NETDEV_TX_OK, on success
1344 *          NETDEV_TX_BUSY, if any of the descriptors are not free
1345 *
1346 * This function is invoked from upper layers to initiate transmission. The
1347 * function uses the next available free BDs and populates their fields to
1348 * start the transmission. Additionally if checksum offloading is supported,
1349 * it populates AXI Stream Control fields with appropriate values.
1350 */
1351static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1352{
1353        u32 ii;
1354        u32 num_frag;
1355        u32 csum_start_off;
1356        u32 csum_index_off;
1357        dma_addr_t tail_p;
1358        struct axienet_local *lp = netdev_priv(ndev);
1359#ifdef CONFIG_AXIENET_HAS_MCDMA
1360        struct aximcdma_bd *cur_p;
1361#else
1362        struct axidma_bd *cur_p;
1363#endif
1364        unsigned long flags;
1365        u32 pad = 0;
1366        struct axienet_dma_q *q;
1367        u16 map = skb_get_queue_mapping(skb); /* Single dma queue default*/
1368
1369#ifdef CONFIG_XILINX_TSN
1370        if (lp->is_tsn) {
1371                map = tsn_queue_mapping(skb);
1372#ifdef CONFIG_XILINX_TSN_PTP
1373                const struct ethhdr *eth;
1374
1375                eth = (struct ethhdr *)skb->data;
1376                /* check if skb is a PTP frame ? */
1377                if (eth->h_proto == htons(ETH_P_1588))
1378                        return axienet_ptp_xmit(skb, ndev);
1379#endif
1380                if (lp->temac_no == XAE_TEMAC2) {
1381                        dev_kfree_skb_any(skb);
1382                        return NETDEV_TX_OK;
1383                }
1384        }
1385#endif
1386        num_frag = skb_shinfo(skb)->nr_frags;
1387
1388        q = lp->dq[map];
1389
1390#ifdef CONFIG_AXIENET_HAS_MCDMA
1391        cur_p = &q->txq_bd_v[q->tx_bd_tail];
1392#else
1393        cur_p = &q->tx_bd_v[q->tx_bd_tail];
1394#endif
1395
1396        spin_lock_irqsave(&q->tx_lock, flags);
1397        if (axienet_check_tx_bd_space(q, num_frag)) {
1398                if (!__netif_subqueue_stopped(ndev, map))
1399                        netif_stop_subqueue(ndev, map);
1400                spin_unlock_irqrestore(&q->tx_lock, flags);
1401                return NETDEV_TX_BUSY;
1402        }
1403
1404#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
1405        if (!lp->is_tsn) {
1406                if ((((lp->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_SYNC) ||
1407                      (lp->tstamp_config.tx_type == HWTSTAMP_TX_ON)) ||
1408                       lp->eth_hasptp) && (lp->axienet_config->mactype !=
1409                       XAXIENET_10G_25G)) {
1410                        u8 *tmp;
1411                        struct sk_buff *new_skb;
1412
1413                        if (skb_headroom(skb) < AXIENET_TS_HEADER_LEN) {
1414                                new_skb = skb_realloc_headroom(skb,
1415                                                               AXIENET_TS_HEADER_LEN);
1416                                if (!new_skb) {
1417                                        dev_err(&ndev->dev, "failed to allocate new socket buffer\n");
1418                                        dev_kfree_skb_any(skb);
1419                                        spin_unlock_irqrestore(&q->tx_lock,
1420                                                               flags);
1421                                        return NETDEV_TX_OK;
1422                                }
1423
1424                                /*  Transfer the ownership to the
1425                                 *  new socket buffer if required
1426                                 */
1427                                if (skb->sk)
1428                                        skb_set_owner_w(new_skb, skb->sk);
1429                                dev_kfree_skb(skb);
1430                                skb = new_skb;
1431                        }
1432
1433                        tmp = skb_push(skb, AXIENET_TS_HEADER_LEN);
1434                        memset(tmp, 0, AXIENET_TS_HEADER_LEN);
1435                        cur_p->ptp_tx_ts_tag++;
1436
1437                        if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1438                                if (lp->tstamp_config.tx_type ==
1439                                        HWTSTAMP_TX_ONESTEP_SYNC) {
1440                                        axienet_create_tsheader(tmp,
1441                                                                TX_TS_OP_ONESTEP
1442                                                                , q);
1443                                } else {
1444                                        axienet_create_tsheader(tmp,
1445                                                                TX_TS_OP_TWOSTEP
1446                                                                , q);
1447                                        skb_shinfo(skb)->tx_flags
1448                                                        |= SKBTX_IN_PROGRESS;
1449                                        cur_p->ptp_tx_skb =
1450                                                (unsigned long)skb_get(skb);
1451                                }
1452                        }
1453                } else if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1454                           (lp->axienet_config->mactype == XAXIENET_10G_25G)) {
1455                        cur_p->ptp_tx_ts_tag = (prandom_u32() &
1456                                                ~XAXIFIFO_TXTS_TAG_MASK) + 1;
1457                        if (lp->tstamp_config.tx_type ==
1458                                                HWTSTAMP_TX_ONESTEP_SYNC) {
1459                                axienet_create_tsheader(lp->tx_ptpheader,
1460                                                        TX_TS_OP_ONESTEP, q);
1461                        } else {
1462                                axienet_create_tsheader(lp->tx_ptpheader,
1463                                                        TX_TS_OP_TWOSTEP, q);
1464                                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1465                                cur_p->ptp_tx_skb = (phys_addr_t)skb_get(skb);
1466                        }
1467                }
1468        }
1469#endif
1470        /* Work around for XXV MAC as MAC will drop the packets
1471         * of size less than 64 bytes we need to append data
1472         * to make packet length greater than or equal to 64
1473         */
1474        if (skb->len < XXV_MAC_MIN_PKT_LEN &&
1475            (lp->axienet_config->mactype == XAXIENET_10G_25G))
1476                pad = XXV_MAC_MIN_PKT_LEN - skb->len;
1477
1478        if (skb->ip_summed == CHECKSUM_PARTIAL && !lp->eth_hasnobuf &&
1479            (lp->axienet_config->mactype == XAXIENET_1G)) {
1480                if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1481                        /* Tx Full Checksum Offload Enabled */
1482                        cur_p->app0 |= 2;
1483                } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1484                        csum_start_off = skb_transport_offset(skb);
1485                        csum_index_off = csum_start_off + skb->csum_offset;
1486                        /* Tx Partial Checksum Offload Enabled */
1487                        cur_p->app0 |= 1;
1488                        cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1489                }
1490        } else if (skb->ip_summed == CHECKSUM_UNNECESSARY &&
1491                   !lp->eth_hasnobuf &&
1492                   (lp->axienet_config->mactype == XAXIENET_1G)) {
1493                cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1494        }
1495
1496#ifdef CONFIG_AXIENET_HAS_MCDMA
1497        cur_p->cntrl = (skb_headlen(skb) | XMCDMA_BD_CTRL_TXSOF_MASK) + pad;
1498#else
1499        cur_p->cntrl = (skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK) + pad;
1500#endif
1501        if (!q->eth_hasdre &&
1502            (((phys_addr_t)skb->data & 0x3) || (num_frag > 0))) {
1503                skb_copy_and_csum_dev(skb, q->tx_buf[q->tx_bd_tail]);
1504
1505                cur_p->phys = q->tx_bufs_dma +
1506                              (q->tx_buf[q->tx_bd_tail] - q->tx_bufs);
1507
1508                if (num_frag > 0) {
1509                        pad = skb_pagelen(skb) - skb_headlen(skb);
1510#ifdef CONFIG_AXIENET_HAS_MCDMA
1511                        cur_p->cntrl = (skb_headlen(skb) |
1512                                        XMCDMA_BD_CTRL_TXSOF_MASK) + pad;
1513#else
1514                        cur_p->cntrl = (skb_headlen(skb) |
1515                                        XAXIDMA_BD_CTRL_TXSOF_MASK) + pad;
1516#endif
1517                }
1518                goto out;
1519        } else {
1520                cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
1521                                             skb_headlen(skb), DMA_TO_DEVICE);
1522        }
1523        cur_p->tx_desc_mapping = DESC_DMA_MAP_SINGLE;
1524
1525        for (ii = 0; ii < num_frag; ii++) {
1526                u32 len;
1527                skb_frag_t *frag;
1528
1529                ++q->tx_bd_tail;
1530                q->tx_bd_tail %= TX_BD_NUM;
1531#ifdef CONFIG_AXIENET_HAS_MCDMA
1532                cur_p = &q->txq_bd_v[q->tx_bd_tail];
1533#else
1534                cur_p = &q->tx_bd_v[q->tx_bd_tail];
1535#endif
1536                frag = &skb_shinfo(skb)->frags[ii];
1537                len = skb_frag_size(frag);
1538                cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, len,
1539                                               DMA_TO_DEVICE);
1540                cur_p->cntrl = len + pad;
1541                cur_p->tx_desc_mapping = DESC_DMA_MAP_PAGE;
1542        }
1543
1544out:
1545#ifdef CONFIG_AXIENET_HAS_MCDMA
1546        cur_p->cntrl |= XMCDMA_BD_CTRL_TXEOF_MASK;
1547        tail_p = q->tx_bd_p + sizeof(*q->txq_bd_v) * q->tx_bd_tail;
1548#else
1549        cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1550        tail_p = q->tx_bd_p + sizeof(*q->tx_bd_v) * q->tx_bd_tail;
1551#endif
1552        cur_p->tx_skb = (phys_addr_t)skb;
1553
1554        /* Ensure BD write before starting transfer */
1555        wmb();
1556
1557        /* Start the transfer */
1558#ifdef CONFIG_AXIENET_HAS_MCDMA
1559        axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id),
1560                          tail_p);
1561#else
1562        axienet_dma_bdout(q, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1563#endif
1564        ++q->tx_bd_tail;
1565        q->tx_bd_tail %= TX_BD_NUM;
1566
1567        spin_unlock_irqrestore(&q->tx_lock, flags);
1568
1569        return NETDEV_TX_OK;
1570}
1571
1572/**
1573 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
1574 *                BD processing.
1575 * @ndev:       Pointer to net_device structure.
1576 * @budget:     NAPI budget
1577 * @q:          Pointer to axienet DMA queue structure
1578 *
1579 * This function is invoked from the Axi DMA Rx isr(poll) to process the Rx BDs
1580 * It does minimal processing and invokes "netif_receive_skb" to complete
1581 * further processing.
1582 * Return: Number of BD's processed.
1583 */
1584static int axienet_recv(struct net_device *ndev, int budget,
1585                        struct axienet_dma_q *q)
1586{
1587        u32 length;
1588        u32 csumstatus;
1589        u32 size = 0;
1590        u32 packets = 0;
1591        dma_addr_t tail_p = 0;
1592        struct axienet_local *lp = netdev_priv(ndev);
1593        struct sk_buff *skb, *new_skb;
1594#ifdef CONFIG_AXIENET_HAS_MCDMA
1595        struct aximcdma_bd *cur_p;
1596#else
1597        struct axidma_bd *cur_p;
1598#endif
1599        unsigned int numbdfree = 0;
1600
1601        /* Get relevat BD status value */
1602        rmb();
1603#ifdef CONFIG_AXIENET_HAS_MCDMA
1604        cur_p = &q->rxq_bd_v[q->rx_bd_ci];
1605#else
1606        cur_p = &q->rx_bd_v[q->rx_bd_ci];
1607#endif
1608
1609        while ((numbdfree < budget) &&
1610               (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1611#ifdef CONFIG_AXIENET_HAS_MCDMA
1612                tail_p = q->rx_bd_p + sizeof(*q->rxq_bd_v) * q->rx_bd_ci;
1613#else
1614                tail_p = q->rx_bd_p + sizeof(*q->rx_bd_v) * q->rx_bd_ci;
1615#endif
1616                skb = (struct sk_buff *)(cur_p->sw_id_offset);
1617
1618                if (lp->eth_hasnobuf ||
1619                    (lp->axienet_config->mactype != XAXIENET_1G))
1620                        length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
1621                else
1622                        length = cur_p->app4 & 0x0000FFFF;
1623
1624                dma_unmap_single(ndev->dev.parent, cur_p->phys,
1625                                 lp->max_frm_size,
1626                                 DMA_FROM_DEVICE);
1627
1628                skb_put(skb, length);
1629#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
1630        if (!lp->is_tsn) {
1631                if ((lp->tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL ||
1632                     lp->eth_hasptp) && (lp->axienet_config->mactype !=
1633                     XAXIENET_10G_25G)) {
1634                        u32 sec, nsec;
1635                        u64 time64;
1636                        struct skb_shared_hwtstamps *shhwtstamps;
1637
1638                        if (lp->axienet_config->mactype == XAXIENET_1G ||
1639                            lp->axienet_config->mactype == XAXIENET_2_5G) {
1640                                /* The first 8 bytes will be the timestamp */
1641                                memcpy(&sec, &skb->data[0], 4);
1642                                memcpy(&nsec, &skb->data[4], 4);
1643
1644                                sec = cpu_to_be32(sec);
1645                                nsec = cpu_to_be32(nsec);
1646                        } else {
1647                                /* The first 8 bytes will be the timestamp */
1648                                memcpy(&nsec, &skb->data[0], 4);
1649                                memcpy(&sec, &skb->data[4], 4);
1650                        }
1651
1652                        /* Remove these 8 bytes from the buffer */
1653                        skb_pull(skb, 8);
1654                        time64 = sec * NS_PER_SEC + nsec;
1655                        shhwtstamps = skb_hwtstamps(skb);
1656                        shhwtstamps->hwtstamp = ns_to_ktime(time64);
1657                } else if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
1658                        axienet_rx_hwtstamp(lp, skb);
1659                }
1660        }
1661#endif
1662                skb->protocol = eth_type_trans(skb, ndev);
1663                /*skb_checksum_none_assert(skb);*/
1664                skb->ip_summed = CHECKSUM_NONE;
1665
1666                /* if we're doing Rx csum offload, set it up */
1667                if (lp->features & XAE_FEATURE_FULL_RX_CSUM &&
1668                    (lp->axienet_config->mactype == XAXIENET_1G) &&
1669                    !lp->eth_hasnobuf) {
1670                        csumstatus = (cur_p->app2 &
1671                                      XAE_FULL_CSUM_STATUS_MASK) >> 3;
1672                        if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
1673                            (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
1674                                skb->ip_summed = CHECKSUM_UNNECESSARY;
1675                        }
1676                } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
1677                           skb->protocol == htons(ETH_P_IP) &&
1678                           skb->len > 64 && !lp->eth_hasnobuf &&
1679                           (lp->axienet_config->mactype == XAXIENET_1G)) {
1680                        skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1681                        skb->ip_summed = CHECKSUM_COMPLETE;
1682                }
1683
1684                netif_receive_skb(skb);
1685
1686                size += length;
1687                packets++;
1688
1689                new_skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1690                if (!new_skb) {
1691                        dev_err(lp->dev, "No memory for new_skb\n\r");
1692                        break;
1693                }
1694
1695                /* Ensure that the skb is completely updated
1696                 * prio to mapping the DMA
1697                 */
1698                wmb();
1699
1700                cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
1701                                             lp->max_frm_size,
1702                                             DMA_FROM_DEVICE);
1703                cur_p->cntrl = lp->max_frm_size;
1704                cur_p->status = 0;
1705                cur_p->sw_id_offset = (phys_addr_t)new_skb;
1706
1707                ++q->rx_bd_ci;
1708                q->rx_bd_ci %= RX_BD_NUM;
1709
1710                /* Get relevat BD status value */
1711                rmb();
1712#ifdef CONFIG_AXIENET_HAS_MCDMA
1713                cur_p = &q->rxq_bd_v[q->rx_bd_ci];
1714#else
1715                cur_p = &q->rx_bd_v[q->rx_bd_ci];
1716#endif
1717                numbdfree++;
1718        }
1719
1720        ndev->stats.rx_packets += packets;
1721        ndev->stats.rx_bytes += size;
1722        q->rx_packets += packets;
1723        q->rx_bytes += size;
1724
1725        if (tail_p) {
1726#ifdef CONFIG_AXIENET_HAS_MCDMA
1727                axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
1728                                  q->rx_offset, tail_p);
1729#else
1730                axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1731#endif
1732        }
1733
1734        return numbdfree;
1735}
1736
1737/**
1738 * xaxienet_rx_poll - Poll routine for rx packets (NAPI)
1739 * @napi:       napi structure pointer
1740 * @quota:      Max number of rx packets to be processed.
1741 *
1742 * This is the poll routine for rx part.
1743 * It will process the packets maximux quota value.
1744 *
1745 * Return: number of packets received
1746 */
1747static int xaxienet_rx_poll(struct napi_struct *napi, int quota)
1748{
1749        struct net_device *ndev = napi->dev;
1750        struct axienet_local *lp = netdev_priv(ndev);
1751        int work_done = 0;
1752        unsigned int status, cr;
1753
1754        int map = napi - lp->napi;
1755
1756        struct axienet_dma_q *q = lp->dq[map];
1757
1758#ifdef CONFIG_AXIENET_HAS_MCDMA
1759        spin_lock(&q->rx_lock);
1760        status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
1761                                  q->rx_offset);
1762        while ((status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) &&
1763               (work_done < quota)) {
1764                axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
1765                                  q->rx_offset, status);
1766                if (status & XMCDMA_IRQ_ERR_MASK) {
1767                        dev_err(lp->dev, "Rx error 0x%x\n\r", status);
1768                        break;
1769                }
1770                work_done += axienet_recv(lp->ndev, quota - work_done, q);
1771                status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
1772                                          q->rx_offset);
1773        }
1774        spin_unlock(&q->rx_lock);
1775#else
1776        spin_lock(&q->rx_lock);
1777
1778        status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
1779        while ((status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) &&
1780               (work_done < quota)) {
1781                axienet_dma_out32(q, XAXIDMA_RX_SR_OFFSET, status);
1782                if (status & XAXIDMA_IRQ_ERROR_MASK) {
1783                        dev_err(lp->dev, "Rx error 0x%x\n\r", status);
1784                        break;
1785                }
1786                work_done += axienet_recv(lp->ndev, quota - work_done, q);
1787                status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
1788        }
1789        spin_unlock(&q->rx_lock);
1790#endif
1791
1792        if (work_done < quota) {
1793                napi_complete(napi);
1794#ifdef CONFIG_AXIENET_HAS_MCDMA
1795                /* Enable the interrupts again */
1796                cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
1797                                      XMCDMA_RX_OFFSET);
1798                cr |= (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK);
1799                axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
1800                                  XMCDMA_RX_OFFSET, cr);
1801#else
1802                /* Enable the interrupts again */
1803                cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
1804                cr |= (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1805                axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
1806#endif
1807        }
1808
1809        return work_done;
1810}
1811
1812/**
1813 * axienet_err_irq - Axi Ethernet error irq.
1814 * @irq:        irq number
1815 * @_ndev:      net_device pointer
1816 *
1817 * Return: IRQ_HANDLED for all cases.
1818 *
1819 * This is the Axi DMA error ISR. It updates the rx memory over run condition.
1820 */
1821static irqreturn_t axienet_err_irq(int irq, void *_ndev)
1822{
1823        unsigned int status;
1824        struct net_device *ndev = _ndev;
1825        struct axienet_local *lp = netdev_priv(ndev);
1826
1827        status = axienet_ior(lp, XAE_IS_OFFSET);
1828        if (status & XAE_INT_RXFIFOOVR_MASK) {
1829                ndev->stats.rx_fifo_errors++;
1830                axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXFIFOOVR_MASK);
1831        }
1832
1833        if (status & XAE_INT_RXRJECT_MASK) {
1834                ndev->stats.rx_dropped++;
1835                axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1836        }
1837
1838        return IRQ_HANDLED;
1839}
1840
1841static inline int get_mcdma_q(struct axienet_local *lp, u32 chan_id)
1842{
1843        int i;
1844
1845        for_each_dma_queue(lp, i) {
1846                if (chan_id == lp->chan_num[i])
1847                        return lp->qnum[i];
1848        }
1849
1850        return -ENODEV;
1851}
1852
1853static inline int map_dma_q_txirq(int irq, struct axienet_local *lp)
1854{
1855        int i, chan_sermask;
1856        u16 chan_id = 1;
1857        struct axienet_dma_q *q = lp->dq[0];
1858
1859        chan_sermask = axienet_dma_in32(q, XMCDMA_TXINT_SER_OFFSET);
1860
1861        for (i = 1, chan_id = 1; i != 0 && i <= chan_sermask;
1862             i <<= 1, chan_id++) {
1863                if (chan_sermask & i)
1864                        return chan_id;
1865        }
1866
1867        return -ENODEV;
1868}
1869
1870static irqreturn_t __maybe_unused axienet_mcdma_tx_irq(int irq, void *_ndev)
1871{
1872        u32 cr;
1873        unsigned int status;
1874        struct net_device *ndev = _ndev;
1875        struct axienet_local *lp = netdev_priv(ndev);
1876        int i, j = map_dma_q_txirq(irq, lp);
1877        struct axienet_dma_q *q;
1878
1879        if (j < 0)
1880                return IRQ_NONE;
1881
1882        i = get_mcdma_q(lp, j);
1883        q = lp->dq[i];
1884
1885        status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id));
1886        if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) {
1887                axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id), status);
1888                axienet_start_xmit_done(lp->ndev, q);
1889                goto out;
1890        }
1891        if (!(status & XMCDMA_IRQ_ALL_MASK))
1892                return IRQ_NONE;
1893        if (status & XMCDMA_IRQ_ERR_MASK) {
1894                dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
1895                dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
1896                        (q->txq_bd_v[q->tx_bd_ci]).phys);
1897
1898                cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
1899                /* Disable coalesce, delay timer and error interrupts */
1900                cr &= (~XMCDMA_IRQ_ALL_MASK);
1901                /* Finally write to the Tx channel control register */
1902                axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
1903
1904                cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
1905                                      q->rx_offset);
1906                /* Disable coalesce, delay timer and error interrupts */
1907                cr &= (~XMCDMA_IRQ_ALL_MASK);
1908                /* write to the Rx channel control register */
1909                axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
1910                                  q->rx_offset, cr);
1911
1912                tasklet_schedule(&lp->dma_err_tasklet[i]);
1913                axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
1914                                  q->rx_offset, status);
1915        }
1916out:
1917        return IRQ_HANDLED;
1918}
1919
1920static inline int map_dma_q_rxirq(int irq, struct axienet_local *lp)
1921{
1922        int i, chan_sermask;
1923        u16 chan_id = 1;
1924        struct axienet_dma_q *q = lp->dq[0];
1925
1926        chan_sermask = axienet_dma_in32(q, XMCDMA_RXINT_SER_OFFSET +
1927                                        q->rx_offset);
1928
1929        for (i = 1, chan_id = 1; i != 0 && i <= chan_sermask;
1930                i <<= 1, chan_id++) {
1931                if (chan_sermask & i)
1932                        return chan_id;
1933        }
1934
1935        return -ENODEV;
1936}
1937
1938static irqreturn_t __maybe_unused axienet_mcdma_rx_irq(int irq, void *_ndev)
1939{
1940        u32 cr;
1941        unsigned int status;
1942        struct net_device *ndev = _ndev;
1943        struct axienet_local *lp = netdev_priv(ndev);
1944        int i, j = map_dma_q_rxirq(irq, lp);
1945        struct axienet_dma_q *q;
1946
1947        if (j < 0)
1948                return IRQ_NONE;
1949
1950        i = get_mcdma_q(lp, j);
1951        q = lp->dq[i];
1952
1953        status = axienet_dma_in32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
1954                                  q->rx_offset);
1955        if (status & (XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK)) {
1956                cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
1957                                      q->rx_offset);
1958                cr &= ~(XMCDMA_IRQ_IOC_MASK | XMCDMA_IRQ_DELAY_MASK);
1959                axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
1960                                  q->rx_offset, cr);
1961                napi_schedule(&lp->napi[i]);
1962        }
1963
1964        if (!(status & XMCDMA_IRQ_ALL_MASK))
1965                return IRQ_NONE;
1966
1967        if (status & XMCDMA_IRQ_ERR_MASK) {
1968                dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
1969                dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
1970                        (q->rxq_bd_v[q->rx_bd_ci]).phys);
1971
1972                cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
1973                /* Disable coalesce, delay timer and error interrupts */
1974                cr &= (~XMCDMA_IRQ_ALL_MASK);
1975                /* Finally write to the Tx channel control register */
1976                axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
1977
1978                cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
1979                                      q->rx_offset);
1980                /* Disable coalesce, delay timer and error interrupts */
1981                cr &= (~XMCDMA_IRQ_ALL_MASK);
1982                /* write to the Rx channel control register */
1983                axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
1984                                  q->rx_offset, cr);
1985
1986                tasklet_schedule(&lp->dma_err_tasklet[i]);
1987                axienet_dma_out32(q, XMCDMA_CHAN_SR_OFFSET(q->chan_id) +
1988                                  q->rx_offset, status);
1989        }
1990
1991        return IRQ_HANDLED;
1992}
1993
1994/**
1995 * map_dma_q_irq - Map dma q based on interrupt number.
1996 * @irq:        irq number
1997 * @lp:         axienet local structure
1998 *
1999 * Return: DMA queue.
2000 *
2001 * This returns the DMA number on which interrupt has occurred.
2002 */
2003static int map_dma_q_irq(int irq, struct axienet_local *lp)
2004{
2005        int i;
2006
2007        for_each_dma_queue(lp, i) {
2008                if (irq == lp->dq[i]->tx_irq || irq == lp->dq[i]->rx_irq)
2009                        return i;
2010        }
2011        pr_err("Error mapping DMA irq\n");
2012        return -ENODEV;
2013}
2014
2015/**
2016 * axienet_tx_irq - Tx Done Isr.
2017 * @irq:        irq number
2018 * @_ndev:      net_device pointer
2019 *
2020 * Return: IRQ_HANDLED or IRQ_NONE.
2021 *
2022 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
2023 * to complete the BD processing.
2024 */
2025static irqreturn_t __maybe_unused axienet_tx_irq(int irq, void *_ndev)
2026{
2027        u32 cr;
2028        unsigned int status;
2029        struct net_device *ndev = _ndev;
2030        struct axienet_local *lp = netdev_priv(ndev);
2031        int i = map_dma_q_irq(irq, lp);
2032        struct axienet_dma_q *q;
2033
2034        if (i < 0)
2035                return IRQ_NONE;
2036
2037        q = lp->dq[i];
2038
2039        status = axienet_dma_in32(q, XAXIDMA_TX_SR_OFFSET);
2040        if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
2041                axienet_dma_out32(q, XAXIDMA_TX_SR_OFFSET, status);
2042                axienet_start_xmit_done(lp->ndev, q);
2043                goto out;
2044        }
2045
2046        if (!(status & XAXIDMA_IRQ_ALL_MASK))
2047                dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
2048
2049        if (status & XAXIDMA_IRQ_ERROR_MASK) {
2050                dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
2051                dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
2052                        (q->tx_bd_v[q->tx_bd_ci]).phys);
2053
2054                cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
2055                /* Disable coalesce, delay timer and error interrupts */
2056                cr &= (~XAXIDMA_IRQ_ALL_MASK);
2057                /* Write to the Tx channel control register */
2058                axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
2059
2060                cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
2061                /* Disable coalesce, delay timer and error interrupts */
2062                cr &= (~XAXIDMA_IRQ_ALL_MASK);
2063                /* Write to the Rx channel control register */
2064                axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
2065
2066                tasklet_schedule(&lp->dma_err_tasklet[i]);
2067                axienet_dma_out32(q, XAXIDMA_TX_SR_OFFSET, status);
2068        }
2069out:
2070        return IRQ_HANDLED;
2071}
2072
2073/**
2074 * axienet_rx_irq - Rx Isr.
2075 * @irq:        irq number
2076 * @_ndev:      net_device pointer
2077 *
2078 * Return: IRQ_HANDLED or IRQ_NONE.
2079 *
2080 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
2081 * processing.
2082 */
2083static irqreturn_t __maybe_unused axienet_rx_irq(int irq, void *_ndev)
2084{
2085        u32 cr;
2086        unsigned int status;
2087        struct net_device *ndev = _ndev;
2088        struct axienet_local *lp = netdev_priv(ndev);
2089        int i = map_dma_q_irq(irq, lp);
2090        struct axienet_dma_q *q;
2091
2092        if (i < 0)
2093                return IRQ_NONE;
2094
2095        q = lp->dq[i];
2096
2097        status = axienet_dma_in32(q, XAXIDMA_RX_SR_OFFSET);
2098        if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
2099                cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
2100                cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
2101                axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
2102                napi_schedule(&lp->napi[i]);
2103        }
2104
2105        if (!(status & XAXIDMA_IRQ_ALL_MASK))
2106                dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
2107
2108        if (status & XAXIDMA_IRQ_ERROR_MASK) {
2109                dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
2110                dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
2111                        (q->rx_bd_v[q->rx_bd_ci]).phys);
2112
2113                cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
2114                /* Disable coalesce, delay timer and error interrupts */
2115                cr &= (~XAXIDMA_IRQ_ALL_MASK);
2116                /* Finally write to the Tx channel control register */
2117                axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
2118
2119                cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
2120                /* Disable coalesce, delay timer and error interrupts */
2121                cr &= (~XAXIDMA_IRQ_ALL_MASK);
2122                        /* write to the Rx channel control register */
2123                axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
2124
2125                tasklet_schedule(&lp->dma_err_tasklet[i]);
2126                axienet_dma_out32(q, XAXIDMA_RX_SR_OFFSET, status);
2127        }
2128
2129        return IRQ_HANDLED;
2130}
2131
2132static void axienet_dma_err_handler(unsigned long data);
2133static void axienet_mcdma_err_handler(unsigned long data);
2134
2135static int axienet_mii_init(struct net_device *ndev)
2136{
2137        struct axienet_local *lp = netdev_priv(ndev);
2138        int ret, mdio_mcreg;
2139
2140        mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
2141        ret = axienet_mdio_wait_until_ready(lp);
2142        if (ret < 0)
2143                return ret;
2144
2145        /* Disable the MDIO interface till Axi Ethernet Reset is completed.
2146         * When we do an Axi Ethernet reset, it resets the complete core
2147         * Including the MDIO. If MDIO is not disabled when the reset process is
2148         * Started, MDIO will be broken afterwards.
2149         */
2150        axienet_iow(lp, XAE_MDIO_MC_OFFSET,
2151                    (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
2152        axienet_device_reset(ndev);
2153        /* Enable the MDIO */
2154        axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
2155        ret = axienet_mdio_wait_until_ready(lp);
2156        if (ret < 0)
2157                return ret;
2158
2159        return 0;
2160}
2161
2162/**
2163 * axienet_open - Driver open routine.
2164 * @ndev:       Pointer to net_device structure
2165 *
2166 * Return: 0, on success.
2167 *          -ENODEV, if PHY cannot be connected to
2168 *          non-zero error value on failure
2169 *
2170 * This is the driver open routine. It calls phy_start to start the PHY device.
2171 * It also allocates interrupt service routines, enables the interrupt lines
2172 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
2173 * descriptors are initialized.
2174 */
2175static int axienet_open(struct net_device *ndev)
2176{
2177        int ret = 0, i;
2178        struct axienet_local *lp = netdev_priv(ndev);
2179        struct phy_device *phydev = NULL;
2180        struct axienet_dma_q *q;
2181
2182        dev_dbg(&ndev->dev, "axienet_open()\n");
2183
2184        if (lp->axienet_config->mactype == XAXIENET_10G_25G)
2185                axienet_device_reset(ndev);
2186        else
2187                ret = axienet_mii_init(ndev);
2188        if (ret < 0)
2189                return ret;
2190
2191        if (lp->phy_node) {
2192                if (lp->phy_type == XAE_PHY_TYPE_GMII) {
2193                        phydev = of_phy_connect(lp->ndev, lp->phy_node,
2194                                                axienet_adjust_link, 0,
2195                                                PHY_INTERFACE_MODE_GMII);
2196                } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) {
2197                        phydev = of_phy_connect(lp->ndev, lp->phy_node,
2198                                                axienet_adjust_link, 0,
2199                                                PHY_INTERFACE_MODE_RGMII_ID);
2200                } else if ((lp->axienet_config->mactype == XAXIENET_1G) ||
2201                             (lp->axienet_config->mactype == XAXIENET_2_5G)) {
2202                        phydev = of_phy_connect(lp->ndev, lp->phy_node,
2203                                                axienet_adjust_link,
2204                                                lp->phy_flags,
2205                                                lp->phy_interface);
2206                }
2207
2208                if (!phydev)
2209                        dev_err(lp->dev, "of_phy_connect() failed\n");
2210                else
2211                        phy_start(phydev);
2212        }
2213
2214        if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
2215                /* Enable tasklets for Axi DMA error handling */
2216                for_each_dma_queue(lp, i) {
2217#ifdef CONFIG_AXIENET_HAS_MCDMA
2218                        tasklet_init(&lp->dma_err_tasklet[i],
2219                                     axienet_mcdma_err_handler,
2220                                     (unsigned long)lp->dq[i]);
2221#else
2222                        tasklet_init(&lp->dma_err_tasklet[i],
2223                                     axienet_dma_err_handler,
2224                                     (unsigned long)lp->dq[i]);
2225#endif
2226
2227        /* Enable NAPI scheduling before enabling Axi DMA Rx IRQ, or you
2228         * might run into a race condition; the RX ISR disables IRQ processing
2229         * before scheduling the NAPI function to complete the processing.
2230         * If NAPI scheduling is (still) disabled at that time, no more RX IRQs
2231         * will be processed as only the NAPI function re-enables them!
2232         */
2233                        napi_enable(&lp->napi[i]);
2234                }
2235                for_each_dma_queue(lp, i) {
2236                        struct axienet_dma_q *q = lp->dq[i];
2237#ifdef CONFIG_AXIENET_HAS_MCDMA
2238                        /* Enable interrupts for Axi MCDMA Tx */
2239                        ret = request_irq(q->tx_irq, axienet_mcdma_tx_irq,
2240                                          IRQF_SHARED, ndev->name, ndev);
2241                        if (ret)
2242                                goto err_tx_irq;
2243
2244                        /* Enable interrupts for Axi MCDMA Rx */
2245                        ret = request_irq(q->rx_irq, axienet_mcdma_rx_irq,
2246                                          IRQF_SHARED, ndev->name, ndev);
2247                        if (ret)
2248                                goto err_rx_irq;
2249#else
2250                        /* Enable interrupts for Axi DMA Tx */
2251                        ret = request_irq(q->tx_irq, axienet_tx_irq,
2252                                          0, ndev->name, ndev);
2253                        if (ret)
2254                                goto err_tx_irq;
2255                        /* Enable interrupts for Axi DMA Rx */
2256                        ret = request_irq(q->rx_irq, axienet_rx_irq,
2257                                          0, ndev->name, ndev);
2258                        if (ret)
2259                                goto err_rx_irq;
2260#endif
2261                }
2262        }
2263#ifdef CONFIG_XILINX_TSN_PTP
2264        if (lp->is_tsn) {
2265                INIT_WORK(&lp->tx_tstamp_work, axienet_tx_tstamp);
2266                skb_queue_head_init(&lp->ptp_txq);
2267
2268                lp->ptp_rx_hw_pointer = 0;
2269                lp->ptp_rx_sw_pointer = 0xff;
2270
2271                axienet_iow(lp, PTP_RX_CONTROL_OFFSET, PTP_RX_PACKET_CLEAR);
2272
2273                ret = request_irq(lp->ptp_rx_irq, axienet_ptp_rx_irq,
2274                                  0, "ptp_rx", ndev);
2275                if (ret)
2276                        goto err_ptp_rx_irq;
2277
2278                ret = request_irq(lp->ptp_tx_irq, axienet_ptp_tx_irq,
2279                                  0, "ptp_tx", ndev);
2280                if (ret)
2281                        goto err_ptp_rx_irq;
2282        }
2283#endif
2284
2285        if (!lp->eth_hasnobuf && (lp->axienet_config->mactype == XAXIENET_1G)) {
2286                /* Enable interrupts for Axi Ethernet */
2287                ret = request_irq(lp->eth_irq, axienet_err_irq, 0, ndev->name,
2288                                  ndev);
2289                if (ret)
2290                        goto err_eth_irq;
2291        }
2292
2293        netif_tx_start_all_queues(ndev);
2294        return 0;
2295
2296err_eth_irq:
2297        while (i--) {
2298                q = lp->dq[i];
2299                free_irq(q->rx_irq, ndev);
2300        }
2301        i = lp->num_queues;
2302err_rx_irq:
2303        while (i--) {
2304                q = lp->dq[i];
2305                free_irq(q->tx_irq, ndev);
2306        }
2307err_tx_irq:
2308        for_each_dma_queue(lp, i)
2309                napi_disable(&lp->napi[i]);
2310#ifdef CONFIG_XILINX_TSN_PTP
2311err_ptp_rx_irq:
2312#endif
2313        if (phydev)
2314                phy_disconnect(phydev);
2315        phydev = NULL;
2316        for_each_dma_queue(lp, i)
2317                tasklet_kill(&lp->dma_err_tasklet[i]);
2318        dev_err(lp->dev, "request_irq() failed\n");
2319        return ret;
2320}
2321
2322/**
2323 * axienet_stop - Driver stop routine.
2324 * @ndev:       Pointer to net_device structure
2325 *
2326 * Return: 0, on success.
2327 *
2328 * This is the driver stop routine. It calls phy_disconnect to stop the PHY
2329 * device. It also removes the interrupt handlers and disables the interrupts.
2330 * The Axi DMA Tx/Rx BDs are released.
2331 */
2332static int axienet_stop(struct net_device *ndev)
2333{
2334        u32 cr;
2335        u32 i;
2336        struct axienet_local *lp = netdev_priv(ndev);
2337        struct axienet_dma_q *q;
2338
2339        dev_dbg(&ndev->dev, "axienet_close()\n");
2340
2341        if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
2342                for_each_dma_queue(lp, i) {
2343                        q = lp->dq[i];
2344                        cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
2345                        axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
2346                                          cr & (~XAXIDMA_CR_RUNSTOP_MASK));
2347                        cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
2348                        axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
2349                                          cr & (~XAXIDMA_CR_RUNSTOP_MASK));
2350                        lp->axienet_config->setoptions(ndev, lp->options &
2351                                   ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2352
2353                        netif_stop_queue(ndev);
2354                        napi_disable(&lp->napi[i]);
2355                        tasklet_kill(&lp->dma_err_tasklet[i]);
2356
2357                        free_irq(q->tx_irq, ndev);
2358                        free_irq(q->rx_irq, ndev);
2359                }
2360        }
2361
2362#ifdef CONFIG_XILINX_TSN_PTP
2363        if (lp->is_tsn) {
2364                free_irq(lp->ptp_tx_irq, ndev);
2365                free_irq(lp->ptp_rx_irq, ndev);
2366        }
2367#endif
2368
2369        if ((lp->axienet_config->mactype == XAXIENET_1G) && !lp->eth_hasnobuf)
2370                free_irq(lp->eth_irq, ndev);
2371
2372        if (ndev->phydev)
2373                phy_disconnect(ndev->phydev);
2374
2375        if (lp->temac_no != XAE_TEMAC2)
2376                axienet_dma_bd_release(ndev);
2377        return 0;
2378}
2379
2380/**
2381 * axienet_change_mtu - Driver change mtu routine.
2382 * @ndev:       Pointer to net_device structure
2383 * @new_mtu:    New mtu value to be applied
2384 *
2385 * Return: Always returns 0 (success).
2386 *
2387 * This is the change mtu driver routine. It checks if the Axi Ethernet
2388 * hardware supports jumbo frames before changing the mtu. This can be
2389 * called only when the device is not up.
2390 */
2391static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
2392{
2393        struct axienet_local *lp = netdev_priv(ndev);
2394
2395        if (netif_running(ndev))
2396                return -EBUSY;
2397
2398        if ((new_mtu + VLAN_ETH_HLEN +
2399                XAE_TRL_SIZE) > lp->rxmem)
2400                return -EINVAL;
2401
2402        if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
2403                return -EINVAL;
2404
2405        ndev->mtu = new_mtu;
2406
2407        return 0;
2408}
2409
2410#ifdef CONFIG_NET_POLL_CONTROLLER
2411/**
2412 * axienet_poll_controller - Axi Ethernet poll mechanism.
2413 * @ndev:       Pointer to net_device structure
2414 *
2415 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
2416 * to polling the ISRs and are enabled back after the polling is done.
2417 */
2418static void axienet_poll_controller(struct net_device *ndev)
2419{
2420        struct axienet_local *lp = netdev_priv(ndev);
2421
2422        disable_irq(lp->tx_irq);
2423        disable_irq(lp->rx_irq);
2424        axienet_rx_irq(lp->tx_irq, ndev);
2425        axienet_tx_irq(lp->rx_irq, ndev);
2426        enable_irq(lp->tx_irq);
2427        enable_irq(lp->rx_irq);
2428}
2429#endif
2430
2431#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
2432/**
2433 *  axienet_set_timestamp_mode - sets up the hardware for the requested mode
2434 *  @lp: Pointer to axienet local structure
2435 *  @config: the hwtstamp configuration requested
2436 *
2437 * Return: 0 on success, Negative value on errors
2438 */
2439static int axienet_set_timestamp_mode(struct axienet_local *lp,
2440                                      struct hwtstamp_config *config)
2441{
2442        u32 regval;
2443
2444#ifdef CONFIG_XILINX_TSN_PTP
2445        if (lp->is_tsn) {
2446                /* reserved for future extensions */
2447                if (config->flags)
2448                        return -EINVAL;
2449
2450                if ((config->tx_type != HWTSTAMP_TX_OFF) &&
2451                    (config->tx_type != HWTSTAMP_TX_ON))
2452                        return -ERANGE;
2453
2454                config->tx_type = HWTSTAMP_TX_ON;
2455
2456                /* On RX always timestamp everything */
2457                switch (config->rx_filter) {
2458                case HWTSTAMP_FILTER_NONE:
2459                        break;
2460                default:
2461                        config->rx_filter = HWTSTAMP_FILTER_ALL;
2462                }
2463                return 0;
2464        }
2465#endif
2466        /* reserved for future extensions */
2467        if (config->flags)
2468                return -EINVAL;
2469
2470        /* Read the current value in the MAC TX CTRL register */
2471        regval = axienet_ior(lp, XAE_TC_OFFSET);
2472
2473        switch (config->tx_type) {
2474        case HWTSTAMP_TX_OFF:
2475                regval &= ~XAE_TC_INBAND1588_MASK;
2476                break;
2477        case HWTSTAMP_TX_ON:
2478                config->tx_type = HWTSTAMP_TX_ON;
2479                regval |= XAE_TC_INBAND1588_MASK;
2480                break;
2481        case HWTSTAMP_TX_ONESTEP_SYNC:
2482                config->tx_type = HWTSTAMP_TX_ONESTEP_SYNC;
2483                regval |= XAE_TC_INBAND1588_MASK;
2484                break;
2485        default:
2486                return -ERANGE;
2487        }
2488
2489        if (lp->axienet_config->mactype != XAXIENET_10G_25G)
2490                axienet_iow(lp, XAE_TC_OFFSET, regval);
2491
2492        /* Read the current value in the MAC RX RCW1 register */
2493        regval = axienet_ior(lp, XAE_RCW1_OFFSET);
2494
2495        /* On RX always timestamp everything */
2496        switch (config->rx_filter) {
2497        case HWTSTAMP_FILTER_NONE:
2498                regval &= ~XAE_RCW1_INBAND1588_MASK;
2499                break;
2500        default:
2501                config->rx_filter = HWTSTAMP_FILTER_ALL;
2502                regval |= XAE_RCW1_INBAND1588_MASK;
2503        }
2504
2505        if (lp->axienet_config->mactype != XAXIENET_10G_25G)
2506                axienet_iow(lp, XAE_RCW1_OFFSET, regval);
2507
2508        return 0;
2509}
2510
2511/**
2512 * axienet_set_ts_config - user entry point for timestamp mode
2513 * @lp: Pointer to axienet local structure
2514 * @ifr: ioctl data
2515 *
2516 * Set hardware to the requested more. If unsupported return an error
2517 * with no changes. Otherwise, store the mode for future reference
2518 *
2519 * Return: 0 on success, Negative value on errors
2520 */
2521static int axienet_set_ts_config(struct axienet_local *lp, struct ifreq *ifr)
2522{
2523        struct hwtstamp_config config;
2524        int err;
2525
2526        if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2527                return -EFAULT;
2528
2529        err = axienet_set_timestamp_mode(lp, &config);
2530        if (err)
2531                return err;
2532
2533        /* save these settings for future reference */
2534        memcpy(&lp->tstamp_config, &config, sizeof(lp->tstamp_config));
2535
2536        return copy_to_user(ifr->ifr_data, &config,
2537                            sizeof(config)) ? -EFAULT : 0;
2538}
2539
2540/**
2541 * axienet_get_ts_config - return the current timestamp configuration
2542 * to the user
2543 * @lp: pointer to axienet local structure
2544 * @ifr: ioctl data
2545 *
2546 * Return: 0 on success, Negative value on errors
2547 */
2548static int axienet_get_ts_config(struct axienet_local *lp, struct ifreq *ifr)
2549{
2550        struct hwtstamp_config *config = &lp->tstamp_config;
2551
2552        return copy_to_user(ifr->ifr_data, config,
2553                            sizeof(*config)) ? -EFAULT : 0;
2554}
2555#endif
2556
2557/* Ioctl MII Interface */
2558static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2559{
2560#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
2561        struct axienet_local *lp = netdev_priv(dev);
2562#endif
2563
2564        if (!netif_running(dev))
2565                return -EINVAL;
2566
2567        switch (cmd) {
2568        case SIOCGMIIPHY:
2569        case SIOCGMIIREG:
2570        case SIOCSMIIREG:
2571                return phy_mii_ioctl(dev->phydev, rq, cmd);
2572#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
2573        case SIOCSHWTSTAMP:
2574                return axienet_set_ts_config(lp, rq);
2575        case SIOCGHWTSTAMP:
2576                return axienet_get_ts_config(lp, rq);
2577#endif
2578#ifdef CONFIG_XILINX_TSN_QBV
2579        case SIOCCHIOCTL:
2580                return axienet_set_schedule(dev, rq->ifr_data);
2581#endif
2582        default:
2583                return -EOPNOTSUPP;
2584        }
2585}
2586
2587static const struct net_device_ops axienet_netdev_ops = {
2588        .ndo_open = axienet_open,
2589        .ndo_stop = axienet_stop,
2590        .ndo_start_xmit = axienet_start_xmit,
2591        .ndo_change_mtu = axienet_change_mtu,
2592        .ndo_set_mac_address = netdev_set_mac_address,
2593        .ndo_validate_addr = eth_validate_addr,
2594        .ndo_set_rx_mode = axienet_set_multicast_list,
2595        .ndo_do_ioctl = axienet_ioctl,
2596#ifdef CONFIG_NET_POLL_CONTROLLER
2597        .ndo_poll_controller = axienet_poll_controller,
2598#endif
2599};
2600
2601/**
2602 * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY.
2603 * @ndev:       Pointer to net_device structure
2604 * @ecmd:       Pointer to ethtool_cmd structure
2605 *
2606 * This implements ethtool command for getting PHY settings. If PHY could
2607 * not be found, the function returns -ENODEV. This function calls the
2608 * relevant PHY ethtool API to get the PHY settings.
2609 * Issue "ethtool ethX" under linux prompt to execute this function.
2610 *
2611 * Return: 0 on success, -ENODEV if PHY doesn't exist
2612 */
2613static int axienet_ethtools_get_settings(struct net_device *ndev,
2614                                         struct ethtool_cmd *ecmd)
2615{
2616        struct phy_device *phydev = ndev->phydev;
2617
2618        if (!phydev)
2619                return -ENODEV;
2620        return phy_ethtool_gset(phydev, ecmd);
2621}
2622
2623/**
2624 * axienet_ethtools_set_settings - Set PHY settings as passed in the argument.
2625 * @ndev:       Pointer to net_device structure
2626 * @ecmd:       Pointer to ethtool_cmd structure
2627 *
2628 * This implements ethtool command for setting various PHY settings. If PHY
2629 * could not be found, the function returns -ENODEV. This function calls the
2630 * relevant PHY ethtool API to set the PHY.
2631 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
2632 * function.
2633 *
2634 * Return: 0 on success, -ENODEV if PHY doesn't exist
2635 */
2636static int axienet_ethtools_set_settings(struct net_device *ndev,
2637                                         struct ethtool_cmd *ecmd)
2638{
2639        struct phy_device *phydev = ndev->phydev;
2640
2641        if (!phydev)
2642                return -ENODEV;
2643        return phy_ethtool_sset(phydev, ecmd);
2644}
2645
2646/**
2647 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
2648 * @ndev:       Pointer to net_device structure
2649 * @ed:         Pointer to ethtool_drvinfo structure
2650 *
2651 * This implements ethtool command for getting the driver information.
2652 * Issue "ethtool -i ethX" under linux prompt to execute this function.
2653 */
2654static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
2655                                         struct ethtool_drvinfo *ed)
2656{
2657        strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
2658        strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
2659}
2660
2661/**
2662 * axienet_ethtools_get_regs_len - Get the total regs length present in the
2663 *                                 AxiEthernet core.
2664 * @ndev:       Pointer to net_device structure
2665 *
2666 * This implements ethtool command for getting the total register length
2667 * information.
2668 *
2669 * Return: the total regs length
2670 */
2671static int axienet_ethtools_get_regs_len(struct net_device *ndev)
2672{
2673        return sizeof(u32) * AXIENET_REGS_N;
2674}
2675
2676/**
2677 * axienet_ethtools_get_regs - Dump the contents of all registers present
2678 *                             in AxiEthernet core.
2679 * @ndev:       Pointer to net_device structure
2680 * @regs:       Pointer to ethtool_regs structure
2681 * @ret:        Void pointer used to return the contents of the registers.
2682 *
2683 * This implements ethtool command for getting the Axi Ethernet register dump.
2684 * Issue "ethtool -d ethX" to execute this function.
2685 */
2686static void axienet_ethtools_get_regs(struct net_device *ndev,
2687                                      struct ethtool_regs *regs, void *ret)
2688{
2689        u32 *data = (u32 *)ret;
2690        size_t len = sizeof(u32) * AXIENET_REGS_N;
2691        struct axienet_local *lp = netdev_priv(ndev);
2692
2693        regs->version = 0;
2694        regs->len = len;
2695
2696        memset(data, 0, len);
2697        data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
2698        data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
2699        data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
2700        data[3] = axienet_ior(lp, XAE_IS_OFFSET);
2701        data[4] = axienet_ior(lp, XAE_IP_OFFSET);
2702        data[5] = axienet_ior(lp, XAE_IE_OFFSET);
2703        data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
2704        data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
2705        data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
2706        data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
2707        data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
2708        data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
2709        data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
2710        data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
2711        data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
2712        data[15] = axienet_ior(lp, XAE_TC_OFFSET);
2713        data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
2714        data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
2715        data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
2716        data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
2717        data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
2718        data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
2719        data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
2720        data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
2721        data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
2722        data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
2723        data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
2724        data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
2725        data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
2726        data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
2727        data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
2728        data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
2729}
2730
2731/**
2732 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
2733 *                                   Tx and Rx paths.
2734 * @ndev:       Pointer to net_device structure
2735 * @epauseparm: Pointer to ethtool_pauseparam structure.
2736 *
2737 * This implements ethtool command for getting axi ethernet pause frame
2738 * setting. Issue "ethtool -a ethX" to execute this function.
2739 */
2740static void
2741axienet_ethtools_get_pauseparam(struct net_device *ndev,
2742                                struct ethtool_pauseparam *epauseparm)
2743{
2744        u32 regval;
2745        struct axienet_local *lp = netdev_priv(ndev);
2746
2747        epauseparm->autoneg  = 0;
2748        regval = axienet_ior(lp, XAE_FCC_OFFSET);
2749        epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
2750        epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
2751}
2752
2753/**
2754 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
2755 *                                   settings.
2756 * @ndev:       Pointer to net_device structure
2757 * @epauseparm: Pointer to ethtool_pauseparam structure
2758 *
2759 * This implements ethtool command for enabling flow control on Rx and Tx
2760 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
2761 * function.
2762 *
2763 * Return: 0 on success, -EFAULT if device is running
2764 */
2765static int
2766axienet_ethtools_set_pauseparam(struct net_device *ndev,
2767                                struct ethtool_pauseparam *epauseparm)
2768{
2769        u32 regval = 0;
2770        struct axienet_local *lp = netdev_priv(ndev);
2771
2772        if (netif_running(ndev)) {
2773                netdev_err(ndev,
2774                           "Please stop netif before applying configuration\n");
2775                return -EFAULT;
2776        }
2777
2778        regval = axienet_ior(lp, XAE_FCC_OFFSET);
2779        if (epauseparm->tx_pause)
2780                regval |= XAE_FCC_FCTX_MASK;
2781        else
2782                regval &= ~XAE_FCC_FCTX_MASK;
2783        if (epauseparm->rx_pause)
2784                regval |= XAE_FCC_FCRX_MASK;
2785        else
2786                regval &= ~XAE_FCC_FCRX_MASK;
2787        axienet_iow(lp, XAE_FCC_OFFSET, regval);
2788
2789        return 0;
2790}
2791
2792/**
2793 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2794 * @ndev:       Pointer to net_device structure
2795 * @ecoalesce:  Pointer to ethtool_coalesce structure
2796 *
2797 * This implements ethtool command for getting the DMA interrupt coalescing
2798 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2799 * execute this function.
2800 *
2801 * Return: 0 always
2802 */
2803static int axienet_ethtools_get_coalesce(struct net_device *ndev,
2804                                         struct ethtool_coalesce *ecoalesce)
2805{
2806        u32 regval = 0;
2807        struct axienet_local *lp = netdev_priv(ndev);
2808        struct axienet_dma_q *q;
2809        int i;
2810
2811        for_each_dma_queue(lp, i) {
2812                q = lp->dq[i];
2813
2814                regval = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
2815                ecoalesce->rx_max_coalesced_frames +=
2816                                                (regval & XAXIDMA_COALESCE_MASK)
2817                                                     >> XAXIDMA_COALESCE_SHIFT;
2818                regval = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
2819                ecoalesce->tx_max_coalesced_frames +=
2820                                                (regval & XAXIDMA_COALESCE_MASK)
2821                                                     >> XAXIDMA_COALESCE_SHIFT;
2822        }
2823        return 0;
2824}
2825
2826/**
2827 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2828 * @ndev:       Pointer to net_device structure
2829 * @ecoalesce:  Pointer to ethtool_coalesce structure
2830 *
2831 * This implements ethtool command for setting the DMA interrupt coalescing
2832 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2833 * prompt to execute this function.
2834 *
2835 * Return: 0, on success, Non-zero error value on failure.
2836 */
2837static int axienet_ethtools_set_coalesce(struct net_device *ndev,
2838                                         struct ethtool_coalesce *ecoalesce)
2839{
2840        struct axienet_local *lp = netdev_priv(ndev);
2841
2842        if (netif_running(ndev)) {
2843                netdev_err(ndev,
2844                           "Please stop netif before applying configuration\n");
2845                return -EFAULT;
2846        }
2847
2848        if ((ecoalesce->rx_coalesce_usecs) ||
2849            (ecoalesce->rx_coalesce_usecs_irq) ||
2850            (ecoalesce->rx_max_coalesced_frames_irq) ||
2851            (ecoalesce->tx_coalesce_usecs) ||
2852            (ecoalesce->tx_coalesce_usecs_irq) ||
2853            (ecoalesce->tx_max_coalesced_frames_irq) ||
2854            (ecoalesce->stats_block_coalesce_usecs) ||
2855            (ecoalesce->use_adaptive_rx_coalesce) ||
2856            (ecoalesce->use_adaptive_tx_coalesce) ||
2857            (ecoalesce->pkt_rate_low) ||
2858            (ecoalesce->rx_coalesce_usecs_low) ||
2859            (ecoalesce->rx_max_coalesced_frames_low) ||
2860            (ecoalesce->tx_coalesce_usecs_low) ||
2861            (ecoalesce->tx_max_coalesced_frames_low) ||
2862            (ecoalesce->pkt_rate_high) ||
2863            (ecoalesce->rx_coalesce_usecs_high) ||
2864            (ecoalesce->rx_max_coalesced_frames_high) ||
2865            (ecoalesce->tx_coalesce_usecs_high) ||
2866            (ecoalesce->tx_max_coalesced_frames_high) ||
2867            (ecoalesce->rate_sample_interval))
2868                return -EOPNOTSUPP;
2869        if (ecoalesce->rx_max_coalesced_frames)
2870                lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2871        if (ecoalesce->tx_max_coalesced_frames)
2872                lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2873
2874        return 0;
2875}
2876
2877#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
2878/**
2879 * axienet_ethtools_get_ts_info - Get h/w timestamping capabilities.
2880 * @ndev:       Pointer to net_device structure
2881 * @info:       Pointer to ethtool_ts_info structure
2882 *
2883 * Return: 0, on success, Non-zero error value on failure.
2884 */
2885static int axienet_ethtools_get_ts_info(struct net_device *ndev,
2886                                        struct ethtool_ts_info *info)
2887{
2888        info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
2889                                SOF_TIMESTAMPING_RX_HARDWARE |
2890                                SOF_TIMESTAMPING_RAW_HARDWARE;
2891        info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
2892        info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2893                           (1 << HWTSTAMP_FILTER_ALL);
2894        info->phc_index = 0;
2895
2896#ifdef CONFIG_XILINX_TSN_PTP
2897        info->phc_index = axienet_phc_index;
2898#endif
2899        return 0;
2900}
2901#endif
2902
2903#ifdef CONFIG_AXIENET_HAS_MCDMA
2904static void axienet_strings(struct net_device *ndev,
2905                            u32 sset, u8 *data)
2906{
2907        struct axienet_local *lp = netdev_priv(ndev);
2908        struct axienet_dma_q *q;
2909        int i, j, k = 0;
2910
2911        for (i = 0, j = 0; i < AXIENET_SSTATS_LEN(lp);) {
2912                if (j >= lp->num_queues)
2913                        break;
2914                q = lp->dq[j];
2915                if (i % 4 == 0)
2916                        k = (q->chan_id - 1) * 4;
2917                if (sset == ETH_SS_STATS)
2918                        memcpy(data + i * ETH_GSTRING_LEN,
2919                               axienet_get_strings_stats[k].name,
2920                               ETH_GSTRING_LEN);
2921                ++i;
2922                k++;
2923                if (i % 4 == 0)
2924                        ++j;
2925        }
2926}
2927
2928static int axienet_sset_count(struct net_device *ndev,
2929                              int sset)
2930{
2931        struct axienet_local *lp = netdev_priv(ndev);
2932
2933        switch (sset) {
2934        case ETH_SS_STATS:
2935                return AXIENET_SSTATS_LEN(lp);
2936        default:
2937                return -EOPNOTSUPP;
2938        }
2939}
2940
2941static void axienet_get_stats(struct net_device *ndev,
2942                              struct ethtool_stats *stats,
2943                              u64 *data)
2944{
2945        struct axienet_local *lp = netdev_priv(ndev);
2946        struct axienet_dma_q *q;
2947        unsigned int i = 0, j;
2948
2949        for (i = 0, j = 0; i < AXIENET_SSTATS_LEN(lp);) {
2950                if (j >= lp->num_queues)
2951                        break;
2952
2953                q = lp->dq[j];
2954                data[i++] = q->tx_packets;
2955                data[i++] = q->tx_bytes;
2956                data[i++] = q->rx_packets;
2957                data[i++] = q->rx_bytes;
2958                ++j;
2959        }
2960}
2961#endif
2962
2963static const struct ethtool_ops axienet_ethtool_ops = {
2964        .get_settings   = axienet_ethtools_get_settings,
2965        .set_settings   = axienet_ethtools_set_settings,
2966        .get_drvinfo    = axienet_ethtools_get_drvinfo,
2967        .get_regs_len   = axienet_ethtools_get_regs_len,
2968        .get_regs       = axienet_ethtools_get_regs,
2969        .get_link       = ethtool_op_get_link,
2970        .get_pauseparam = axienet_ethtools_get_pauseparam,
2971        .set_pauseparam = axienet_ethtools_set_pauseparam,
2972        .get_coalesce   = axienet_ethtools_get_coalesce,
2973        .set_coalesce   = axienet_ethtools_set_coalesce,
2974#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
2975        .get_ts_info    = axienet_ethtools_get_ts_info,
2976#endif
2977        .get_link_ksettings = phy_ethtool_get_link_ksettings,
2978        .set_link_ksettings = phy_ethtool_set_link_ksettings,
2979#ifdef CONFIG_AXIENET_HAS_MCDMA
2980        .get_sset_count  = axienet_sset_count,
2981        .get_ethtool_stats = axienet_get_stats,
2982        .get_strings = axienet_strings,
2983#endif
2984};
2985
2986/**
2987 * axienet_mcdma_err_handler - Tasklet handler for Axi MCDMA Error
2988 * @data:       Data passed
2989 *
2990 * Resets the Axi MCDMA and Axi Ethernet devices, and reconfigures the
2991 * Tx/Rx BDs.
2992 */
2993static void __maybe_unused axienet_mcdma_err_handler(unsigned long data)
2994{
2995        u32 axienet_status;
2996        u32 cr, i, chan_en;
2997        int mdio_mcreg = 0;
2998        struct axienet_dma_q *q = (struct axienet_dma_q *)data;
2999        struct axienet_local *lp = q->lp;
3000        struct net_device *ndev = lp->ndev;
3001        struct aximcdma_bd *cur_p;
3002
3003        lp->axienet_config->setoptions(ndev, lp->options &
3004                                       ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
3005
3006        if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
3007                mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
3008                axienet_mdio_wait_until_ready(lp);
3009                /* Disable the MDIO interface till Axi Ethernet Reset is
3010                 * Completed. When we do an Axi Ethernet reset, it resets the
3011                 * Complete core including the MDIO. So if MDIO is not disabled
3012                 * When the reset process is started,
3013                 * MDIO will be broken afterwards.
3014                 */
3015                axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
3016                            ~XAE_MDIO_MC_MDIOEN_MASK));
3017        }
3018
3019        __axienet_device_reset(q, XAXIDMA_TX_CR_OFFSET);
3020
3021        if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
3022                axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
3023                axienet_mdio_wait_until_ready(lp);
3024        }
3025
3026        for (i = 0; i < TX_BD_NUM; i++) {
3027                cur_p = &q->txq_bd_v[i];
3028                if (cur_p->phys)
3029                        dma_unmap_single(ndev->dev.parent, cur_p->phys,
3030                                         (cur_p->cntrl &
3031                                          XAXIDMA_BD_CTRL_LENGTH_MASK),
3032                                         DMA_TO_DEVICE);
3033                if (cur_p->tx_skb)
3034                        dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
3035                cur_p->phys = 0;
3036                cur_p->cntrl = 0;
3037                cur_p->status = 0;
3038                cur_p->app0 = 0;
3039                cur_p->app1 = 0;
3040                cur_p->app2 = 0;
3041                cur_p->app3 = 0;
3042                cur_p->app4 = 0;
3043                cur_p->sw_id_offset = 0;
3044                cur_p->tx_skb = 0;
3045        }
3046
3047        for (i = 0; i < RX_BD_NUM; i++) {
3048                cur_p = &q->rxq_bd_v[i];
3049                cur_p->status = 0;
3050                cur_p->app0 = 0;
3051                cur_p->app1 = 0;
3052                cur_p->app2 = 0;
3053                cur_p->app3 = 0;
3054                cur_p->app4 = 0;
3055        }
3056
3057        q->tx_bd_ci = 0;
3058        q->tx_bd_tail = 0;
3059        q->rx_bd_ci = 0;
3060
3061        /* Start updating the Rx channel control register */
3062        cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
3063                              q->rx_offset);
3064        /* Update the interrupt coalesce count */
3065        cr = ((cr & ~XMCDMA_COALESCE_MASK) |
3066              ((lp->coalesce_count_rx) << XMCDMA_COALESCE_SHIFT));
3067        /* Update the delay timer count */
3068        cr = ((cr & ~XMCDMA_DELAY_MASK) |
3069              (XAXIDMA_DFT_RX_WAITBOUND << XMCDMA_DELAY_SHIFT));
3070        /* Enable coalesce, delay timer and error interrupts */
3071        cr |= XMCDMA_IRQ_ALL_MASK;
3072        /* Write to the Rx channel control register */
3073        axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
3074                          q->rx_offset, cr);
3075
3076        /* Start updating the Tx channel control register */
3077        cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
3078        /* Update the interrupt coalesce count */
3079        cr = (((cr & ~XMCDMA_COALESCE_MASK)) |
3080              ((lp->coalesce_count_tx) << XMCDMA_COALESCE_SHIFT));
3081        /* Update the delay timer count */
3082        cr = (((cr & ~XMCDMA_DELAY_MASK)) |
3083              (XAXIDMA_DFT_TX_WAITBOUND << XMCDMA_DELAY_SHIFT));
3084        /* Enable coalesce, delay timer and error interrupts */
3085        cr |= XMCDMA_IRQ_ALL_MASK;
3086        /* Write to the Tx channel control register */
3087        axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id), cr);
3088
3089        /* Populate the tail pointer and bring the Rx Axi DMA engine out of
3090         * halted state. This will make the Rx side ready for reception.
3091         */
3092        axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id) +
3093                            q->rx_offset, q->rx_bd_p);
3094        cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET +  q->rx_offset);
3095        axienet_dma_out32(q, XMCDMA_CR_OFFSET +  q->rx_offset,
3096                          cr | XMCDMA_CR_RUNSTOP_MASK);
3097        cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) +
3098                                q->rx_offset);
3099        axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id) + q->rx_offset,
3100                          cr | XMCDMA_CR_RUNSTOP_MASK);
3101        axienet_dma_bdout(q, XMCDMA_CHAN_TAILDESC_OFFSET(q->chan_id) +
3102                            q->rx_offset, q->rx_bd_p + (sizeof(*q->rxq_bd_v) *
3103                            (RX_BD_NUM - 1)));
3104        chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET + q->rx_offset);
3105        chan_en |= (1 << (q->chan_id - 1));
3106        axienet_dma_out32(q, XMCDMA_CHEN_OFFSET + q->rx_offset, chan_en);
3107
3108        /* Write to the RS (Run-stop) bit in the Tx channel control register.
3109         * Tx channel is now ready to run. But only after we write to the
3110         * tail pointer register that the Tx channel will start transmitting.
3111         */
3112        axienet_dma_bdout(q, XMCDMA_CHAN_CURDESC_OFFSET(q->chan_id),
3113                          q->tx_bd_p);
3114        cr = axienet_dma_in32(q, XMCDMA_CR_OFFSET);
3115        axienet_dma_out32(q, XMCDMA_CR_OFFSET,
3116                          cr | XMCDMA_CR_RUNSTOP_MASK);
3117        cr = axienet_dma_in32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id));
3118        axienet_dma_out32(q, XMCDMA_CHAN_CR_OFFSET(q->chan_id),
3119                          cr | XMCDMA_CR_RUNSTOP_MASK);
3120        chan_en = axienet_dma_in32(q, XMCDMA_CHEN_OFFSET);
3121        chan_en |= (1 << (q->chan_id - 1));
3122        axienet_dma_out32(q, XMCDMA_CHEN_OFFSET, chan_en);
3123
3124        if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
3125                axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
3126                axienet_status &= ~XAE_RCW1_RX_MASK;
3127                axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
3128        }
3129
3130        if ((lp->axienet_config->mactype == XAXIENET_1G) && !lp->eth_hasnobuf) {
3131                axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
3132                if (axienet_status & XAE_INT_RXRJECT_MASK)
3133                        axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
3134        }
3135
3136        if (lp->axienet_config->mactype != XAXIENET_10G_25G)
3137                axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
3138
3139        lp->axienet_config->setoptions(ndev, lp->options &
3140                                       ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
3141        axienet_set_mac_address(ndev, NULL);
3142        axienet_set_multicast_list(ndev);
3143        lp->axienet_config->setoptions(ndev, lp->options);
3144}
3145
3146/**
3147 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
3148 * @data:       Data passed
3149 *
3150 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
3151 * Tx/Rx BDs.
3152 */
3153static void __maybe_unused axienet_dma_err_handler(unsigned long data)
3154{
3155        u32 axienet_status;
3156        u32 cr, i;
3157        int mdio_mcreg = 0;
3158        struct axienet_dma_q *q = (struct axienet_dma_q *)data;
3159        struct axienet_local *lp = q->lp;
3160        struct net_device *ndev = lp->ndev;
3161        struct axidma_bd *cur_p;
3162
3163        lp->axienet_config->setoptions(ndev, lp->options &
3164                                       ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
3165
3166        if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
3167                mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
3168                axienet_mdio_wait_until_ready(lp);
3169                /* Disable the MDIO interface till Axi Ethernet Reset is
3170                 * Completed. When we do an Axi Ethernet reset, it resets the
3171                 * Complete core including the MDIO. So if MDIO is not disabled
3172                 * When the reset process is started,
3173                 * MDIO will be broken afterwards.
3174                 */
3175                axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
3176                            ~XAE_MDIO_MC_MDIOEN_MASK));
3177        }
3178
3179        __axienet_device_reset(q, XAXIDMA_TX_CR_OFFSET);
3180        __axienet_device_reset(q, XAXIDMA_RX_CR_OFFSET);
3181
3182        if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
3183                axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
3184                axienet_mdio_wait_until_ready(lp);
3185        }
3186
3187        for (i = 0; i < TX_BD_NUM; i++) {
3188                cur_p = &q->tx_bd_v[i];
3189                if (cur_p->phys)
3190                        dma_unmap_single(ndev->dev.parent, cur_p->phys,
3191                                         (cur_p->cntrl &
3192                                          XAXIDMA_BD_CTRL_LENGTH_MASK),
3193                                         DMA_TO_DEVICE);
3194                if (cur_p->tx_skb)
3195                        dev_kfree_skb_irq((struct sk_buff *)cur_p->tx_skb);
3196                cur_p->phys = 0;
3197                cur_p->cntrl = 0;
3198                cur_p->status = 0;
3199                cur_p->app0 = 0;
3200                cur_p->app1 = 0;
3201                cur_p->app2 = 0;
3202                cur_p->app3 = 0;
3203                cur_p->app4 = 0;
3204                cur_p->sw_id_offset = 0;
3205                cur_p->tx_skb = 0;
3206        }
3207
3208        for (i = 0; i < RX_BD_NUM; i++) {
3209                cur_p = &q->rx_bd_v[i];
3210                cur_p->status = 0;
3211                cur_p->app0 = 0;
3212                cur_p->app1 = 0;
3213                cur_p->app2 = 0;
3214                cur_p->app3 = 0;
3215                cur_p->app4 = 0;
3216        }
3217
3218        q->tx_bd_ci = 0;
3219        q->tx_bd_tail = 0;
3220        q->rx_bd_ci = 0;
3221
3222        /* Start updating the Rx channel control register */
3223        cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
3224        /* Update the interrupt coalesce count */
3225        cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
3226              (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
3227        /* Update the delay timer count */
3228        cr = ((cr & ~XAXIDMA_DELAY_MASK) |
3229              (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
3230        /* Enable coalesce, delay timer and error interrupts */
3231        cr |= XAXIDMA_IRQ_ALL_MASK;
3232        /* Finally write to the Rx channel control register */
3233        axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET, cr);
3234
3235        /* Start updating the Tx channel control register */
3236        cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
3237        /* Update the interrupt coalesce count */
3238        cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
3239              (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
3240        /* Update the delay timer count */
3241        cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
3242              (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
3243        /* Enable coalesce, delay timer and error interrupts */
3244        cr |= XAXIDMA_IRQ_ALL_MASK;
3245        /* Finally write to the Tx channel control register */
3246        axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET, cr);
3247
3248        /* Populate the tail pointer and bring the Rx Axi DMA engine out of
3249         * halted state. This will make the Rx side ready for reception.
3250         */
3251        axienet_dma_bdout(q, XAXIDMA_RX_CDESC_OFFSET, q->rx_bd_p);
3252        cr = axienet_dma_in32(q, XAXIDMA_RX_CR_OFFSET);
3253        axienet_dma_out32(q, XAXIDMA_RX_CR_OFFSET,
3254                          cr | XAXIDMA_CR_RUNSTOP_MASK);
3255        axienet_dma_bdout(q, XAXIDMA_RX_TDESC_OFFSET, q->rx_bd_p +
3256                          (sizeof(*q->rx_bd_v) * (RX_BD_NUM - 1)));
3257
3258        /* Write to the RS (Run-stop) bit in the Tx channel control register.
3259         * Tx channel is now ready to run. But only after we write to the
3260         * tail pointer register that the Tx channel will start transmitting
3261         */
3262        axienet_dma_bdout(q, XAXIDMA_TX_CDESC_OFFSET, q->tx_bd_p);
3263        cr = axienet_dma_in32(q, XAXIDMA_TX_CR_OFFSET);
3264        axienet_dma_out32(q, XAXIDMA_TX_CR_OFFSET,
3265                          cr | XAXIDMA_CR_RUNSTOP_MASK);
3266
3267        if (lp->axienet_config->mactype != XAXIENET_10G_25G) {
3268                axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
3269                axienet_status &= ~XAE_RCW1_RX_MASK;
3270                axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
3271        }
3272
3273        if ((lp->axienet_config->mactype == XAXIENET_1G) && !lp->eth_hasnobuf) {
3274                axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
3275                if (axienet_status & XAE_INT_RXRJECT_MASK)
3276                        axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
3277        }
3278
3279        if (lp->axienet_config->mactype != XAXIENET_10G_25G)
3280                axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
3281
3282        lp->axienet_config->setoptions(ndev, lp->options &
3283                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
3284        axienet_set_mac_address(ndev, NULL);
3285        axienet_set_multicast_list(ndev);
3286        lp->axienet_config->setoptions(ndev, lp->options);
3287}
3288
3289static int __maybe_unused axienet_mcdma_probe(struct platform_device *pdev,
3290                                              struct axienet_local *lp,
3291                                              struct net_device *ndev)
3292{
3293        int i, ret = 0;
3294        struct axienet_dma_q *q;
3295        struct device_node *np;
3296        struct resource dmares;
3297        char dma_name[16];
3298        const char *str;
3299
3300        ret = of_property_count_strings(pdev->dev.of_node, "xlnx,channel-ids");
3301        if (ret < 0)
3302                return -EINVAL;
3303
3304        for_each_dma_queue(lp, i) {
3305                q = kzalloc(sizeof(*q), GFP_KERNEL);
3306
3307                /* parent */
3308                q->lp = lp;
3309                lp->dq[i] = q;
3310                ret = of_property_read_string_index(pdev->dev.of_node,
3311                                                    "xlnx,channel-ids", i,
3312                                                    &str);
3313                ret = kstrtou16(str, 16, &q->chan_id);
3314                lp->qnum[i] = i;
3315                lp->chan_num[i] = q->chan_id;
3316        }
3317
3318        np = of_parse_phandle(pdev->dev.of_node, "axistream-connected",
3319                              0);
3320        if (IS_ERR(np)) {
3321                dev_err(&pdev->dev, "could not find DMA node\n");
3322                return ret;
3323        }
3324
3325        ret = of_address_to_resource(np, 0, &dmares);
3326        if (ret) {
3327                dev_err(&pdev->dev, "unable to get DMA resource\n");
3328                return ret;
3329        }
3330
3331        lp->mcdma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
3332        if (IS_ERR(lp->mcdma_regs)) {
3333                dev_err(&pdev->dev, "iormeap failed for the dma\n");
3334                ret = PTR_ERR(lp->mcdma_regs);
3335                return ret;
3336        }
3337
3338        /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
3339        for_each_dma_queue(lp, i) {
3340                struct axienet_dma_q *q;
3341
3342                q = lp->dq[i];
3343
3344                q->dma_regs = lp->mcdma_regs;
3345                sprintf(dma_name, "dma%d_tx", i);
3346                q->tx_irq = platform_get_irq_byname(pdev, dma_name);
3347                sprintf(dma_name, "dma%d_rx", i);
3348                q->rx_irq = platform_get_irq_byname(pdev, dma_name);
3349                q->eth_hasdre = of_property_read_bool(np,
3350                                                      "xlnx,include-dre");
3351        }
3352        of_node_put(np);
3353
3354        for_each_dma_queue(lp, i) {
3355                struct axienet_dma_q *q = lp->dq[i];
3356
3357                spin_lock_init(&q->tx_lock);
3358                spin_lock_init(&q->rx_lock);
3359        }
3360
3361        for_each_dma_queue(lp, i) {
3362                netif_napi_add(ndev, &lp->napi[i], xaxienet_rx_poll,
3363                               XAXIENET_NAPI_WEIGHT);
3364        }
3365
3366        return 0;
3367}
3368
3369static int __maybe_unused axienet_dma_probe(struct platform_device *pdev,
3370                                            struct net_device *ndev)
3371{
3372        int i, ret;
3373        struct axienet_local *lp = netdev_priv(ndev);
3374        struct axienet_dma_q *q;
3375        struct device_node *np;
3376        struct resource dmares;
3377#ifdef CONFIG_XILINX_TSN
3378        char dma_name[10];
3379#endif
3380
3381        for_each_dma_queue(lp, i) {
3382                q = kmalloc(sizeof(*q), GFP_KERNEL);
3383
3384                /* parent */
3385                q->lp = lp;
3386
3387                lp->dq[i] = q;
3388        }
3389
3390        /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
3391        /* TODO handle error ret */
3392        for_each_dma_queue(lp, i) {
3393                q = lp->dq[i];
3394
3395                np = of_parse_phandle(pdev->dev.of_node, "axistream-connected",
3396                                      i);
3397                if (np) {
3398                        ret = of_address_to_resource(np, 0, &dmares);
3399                        if (ret >= 0)
3400                                q->dma_regs = devm_ioremap_resource(&pdev->dev,
3401                                                                &dmares);
3402                        else
3403                                return -ENODEV;
3404                        q->eth_hasdre = of_property_read_bool(np,
3405                                                        "xlnx,include-dre");
3406                } else {
3407                        return -EINVAL;
3408                }
3409        }
3410
3411#ifdef CONFIG_XILINX_TSN
3412        if (lp->is_tsn) {
3413                for_each_dma_queue(lp, i) {
3414                        sprintf(dma_name, "dma%d_tx", i);
3415                        lp->dq[i]->tx_irq = platform_get_irq_byname(pdev,
3416                                                                    dma_name);
3417                        sprintf(dma_name, "dma%d_rx", i);
3418                        lp->dq[i]->rx_irq = platform_get_irq_byname(pdev,
3419                                                                    dma_name);
3420                        pr_info("lp->dq[%d]->tx_irq  %d\n", i,
3421                                lp->dq[i]->tx_irq);
3422                        pr_info("lp->dq[%d]->rx_irq  %d\n", i,
3423                                lp->dq[i]->rx_irq);
3424                }
3425        } else {
3426#endif /* This should remove when axienet device tree irq comply to dma name */
3427                for_each_dma_queue(lp, i) {
3428                        lp->dq[i]->tx_irq = irq_of_parse_and_map(np, 0);
3429                        lp->dq[i]->rx_irq = irq_of_parse_and_map(np, 1);
3430                }
3431#ifdef CONFIG_XILINX_TSN
3432        }
3433#endif
3434
3435        of_node_put(np);
3436
3437        for_each_dma_queue(lp, i) {
3438                struct axienet_dma_q *q = lp->dq[i];
3439
3440                spin_lock_init(&q->tx_lock);
3441                spin_lock_init(&q->rx_lock);
3442        }
3443
3444        for_each_dma_queue(lp, i) {
3445                netif_napi_add(ndev, &lp->napi[i], xaxienet_rx_poll,
3446                               XAXIENET_NAPI_WEIGHT);
3447        }
3448
3449        return 0;
3450}
3451
3452static const struct axienet_config axienet_1g_config = {
3453        .mactype = XAXIENET_1G,
3454        .setoptions = axienet_setoptions,
3455        .tx_ptplen = XAE_TX_PTP_LEN,
3456};
3457
3458static const struct axienet_config axienet_2_5g_config = {
3459        .mactype = XAXIENET_2_5G,
3460        .setoptions = axienet_setoptions,
3461        .tx_ptplen = XAE_TX_PTP_LEN,
3462};
3463
3464static const struct axienet_config axienet_10g_config = {
3465        .mactype = XAXIENET_LEGACY_10G,
3466        .setoptions = axienet_setoptions,
3467        .tx_ptplen = XAE_TX_PTP_LEN,
3468};
3469
3470static const struct axienet_config axienet_10g25g_config = {
3471        .mactype = XAXIENET_10G_25G,
3472        .setoptions = xxvenet_setoptions,
3473        .tx_ptplen = XXV_TX_PTP_LEN,
3474};
3475
3476/* Match table for of_platform binding */
3477static const struct of_device_id axienet_of_match[] = {
3478        { .compatible = "xlnx,axi-ethernet-1.00.a", .data = &axienet_1g_config},
3479        { .compatible = "xlnx,axi-ethernet-1.01.a", .data = &axienet_1g_config},
3480        { .compatible = "xlnx,axi-ethernet-2.01.a", .data = &axienet_1g_config},
3481        { .compatible = "xlnx,axi-2_5-gig-ethernet-1.0",
3482                                                .data = &axienet_2_5g_config},
3483        { .compatible = "xlnx,ten-gig-eth-mac", .data = &axienet_10g_config},
3484        { .compatible = "xlnx,xxv-ethernet-1.0",
3485                                                .data = &axienet_10g25g_config},
3486        { .compatible = "xlnx,tsn-ethernet-1.00.a", .data = &axienet_1g_config},
3487        {},
3488};
3489
3490MODULE_DEVICE_TABLE(of, axienet_of_match);
3491
3492#ifdef CONFIG_AXIENET_HAS_MCDMA
3493static ssize_t rxch_obs1_show(struct device *dev,
3494                              struct device_attribute *attr, char *buf)
3495{
3496        struct net_device *ndev = dev_get_drvdata(dev);
3497        struct axienet_local *lp = netdev_priv(ndev);
3498        struct axienet_dma_q *q = lp->dq[0];
3499        u32 reg;
3500
3501        reg = axienet_dma_in32(q, XMCDMA_CHOBS1_OFFSET + q->rx_offset);
3502
3503        return sprintf(buf, "Ingress Channel Observer 1 Contents is 0x%x\n",
3504                       reg);
3505}
3506
3507static ssize_t rxch_obs2_show(struct device *dev,
3508                              struct device_attribute *attr, char *buf)
3509{
3510        struct net_device *ndev = dev_get_drvdata(dev);
3511        struct axienet_local *lp = netdev_priv(ndev);
3512        struct axienet_dma_q *q = lp->dq[0];
3513        u32 reg;
3514
3515        reg = axienet_dma_in32(q, XMCDMA_CHOBS2_OFFSET + q->rx_offset);
3516
3517        return sprintf(buf, "Ingress Channel Observer 2 Contents is 0x%x\n",
3518                       reg);
3519}
3520
3521static ssize_t rxch_obs3_show(struct device *dev,
3522                              struct device_attribute *attr, char *buf)
3523{
3524        struct net_device *ndev = dev_get_drvdata(dev);
3525        struct axienet_local *lp = netdev_priv(ndev);
3526        struct axienet_dma_q *q = lp->dq[0];
3527        u32 reg;
3528
3529        reg = axienet_dma_in32(q, XMCDMA_CHOBS3_OFFSET + q->rx_offset);
3530
3531        return sprintf(buf, "Ingress Channel Observer 3 Contents is 0x%x\n",
3532                       reg);
3533}
3534
3535static ssize_t rxch_obs4_show(struct device *dev,
3536                              struct device_attribute *attr, char *buf)
3537{
3538        struct net_device *ndev = dev_get_drvdata(dev);
3539        struct axienet_local *lp = netdev_priv(ndev);
3540        struct axienet_dma_q *q = lp->dq[0];
3541        u32 reg;
3542
3543        reg = axienet_dma_in32(q, XMCDMA_CHOBS4_OFFSET + q->rx_offset);
3544
3545        return sprintf(buf, "Ingress Channel Observer 4 Contents is 0x%x\n",
3546                       reg);
3547}
3548
3549static ssize_t rxch_obs5_show(struct device *dev,
3550                              struct device_attribute *attr, char *buf)
3551{
3552        struct net_device *ndev = dev_get_drvdata(dev);
3553        struct axienet_local *lp = netdev_priv(ndev);
3554        struct axienet_dma_q *q = lp->dq[0];
3555        u32 reg;
3556
3557        reg = axienet_dma_in32(q, XMCDMA_CHOBS5_OFFSET + q->rx_offset);
3558
3559        return sprintf(buf, "Ingress Channel Observer 5 Contents is 0x%x\n",
3560                       reg);
3561}
3562
3563static ssize_t rxch_obs6_show(struct device *dev,
3564                              struct device_attribute *attr, char *buf)
3565{
3566        struct net_device *ndev = dev_get_drvdata(dev);
3567        struct axienet_local *lp = netdev_priv(ndev);
3568        struct axienet_dma_q *q = lp->dq[0];
3569        u32 reg;
3570
3571        reg = axienet_dma_in32(q, XMCDMA_CHOBS6_OFFSET + q->rx_offset);
3572
3573        return sprintf(buf, "Ingress Channel Observer 6 Contents is 0x%x\n\r",
3574                       reg);
3575}
3576
3577static ssize_t txch_obs1_show(struct device *dev,
3578                              struct device_attribute *attr, char *buf)
3579{
3580        struct net_device *ndev = dev_get_drvdata(dev);
3581        struct axienet_local *lp = netdev_priv(ndev);
3582        struct axienet_dma_q *q = lp->dq[0];
3583        u32 reg;
3584
3585        reg = axienet_dma_in32(q, XMCDMA_CHOBS1_OFFSET);
3586
3587        return sprintf(buf, "Egress Channel Observer 1 Contents is 0x%x\n",
3588                       reg);
3589}
3590
3591static ssize_t txch_obs2_show(struct device *dev,
3592                              struct device_attribute *attr, char *buf)
3593{
3594        struct net_device *ndev = dev_get_drvdata(dev);
3595        struct axienet_local *lp = netdev_priv(ndev);
3596        struct axienet_dma_q *q = lp->dq[0];
3597        u32 reg;
3598
3599        reg = axienet_dma_in32(q, XMCDMA_CHOBS2_OFFSET);
3600
3601        return sprintf(buf, "Egress Channel Observer 2 Contents is 0x%x\n\r",
3602                       reg);
3603}
3604
3605static ssize_t txch_obs3_show(struct device *dev,
3606                              struct device_attribute *attr, char *buf)
3607{
3608        struct net_device *ndev = dev_get_drvdata(dev);
3609        struct axienet_local *lp = netdev_priv(ndev);
3610        struct axienet_dma_q *q = lp->dq[0];
3611        u32 reg;
3612
3613        reg = axienet_dma_in32(q, XMCDMA_CHOBS3_OFFSET);
3614
3615        return sprintf(buf, "Egress Channel Observer 3 Contents is 0x%x\n\r",
3616                       reg);
3617}
3618
3619static ssize_t txch_obs4_show(struct device *dev,
3620                              struct device_attribute *attr, char *buf)
3621{
3622        struct net_device *ndev = dev_get_drvdata(dev);
3623        struct axienet_local *lp = netdev_priv(ndev);
3624        struct axienet_dma_q *q = lp->dq[0];
3625        u32 reg;
3626
3627        reg = axienet_dma_in32(q, XMCDMA_CHOBS4_OFFSET);
3628
3629        return sprintf(buf, "Egress Channel Observer 4 Contents is 0x%x\n\r",
3630                       reg);
3631}
3632
3633static ssize_t txch_obs5_show(struct device *dev,
3634                              struct device_attribute *attr, char *buf)
3635{
3636        struct net_device *ndev = dev_get_drvdata(dev);
3637        struct axienet_local *lp = netdev_priv(ndev);
3638        struct axienet_dma_q *q = lp->dq[0];
3639        u32 reg;
3640
3641        reg = axienet_dma_in32(q, XMCDMA_CHOBS5_OFFSET);
3642
3643        return sprintf(buf, "Egress Channel Observer 5 Contents is 0x%x\n\r",
3644                       reg);
3645}
3646
3647static ssize_t txch_obs6_show(struct device *dev,
3648                              struct device_attribute *attr, char *buf)
3649{
3650        struct net_device *ndev = dev_get_drvdata(dev);
3651        struct axienet_local *lp = netdev_priv(ndev);
3652        struct axienet_dma_q *q = lp->dq[0];
3653        u32 reg;
3654
3655        reg = axienet_dma_in32(q, XMCDMA_CHOBS6_OFFSET);
3656
3657        return sprintf(buf, "Egress Channel Observer 6 Contents is 0x%x\n\r",
3658                       reg);
3659}
3660
3661static ssize_t chan_weight_show(struct device *dev,
3662                                struct device_attribute *attr, char *buf)
3663{
3664        struct net_device *ndev = dev_get_drvdata(dev);
3665        struct axienet_local *lp = netdev_priv(ndev);
3666
3667        return sprintf(buf, "chan_id is %d and weight is %d\n",
3668                       lp->chan_id, lp->weight);
3669}
3670
3671static ssize_t chan_weight_store(struct device *dev,
3672                                 struct device_attribute *attr,
3673                                 const char *buf, size_t count)
3674{
3675        struct net_device *ndev = dev_get_drvdata(dev);
3676        struct axienet_local *lp = netdev_priv(ndev);
3677        struct axienet_dma_q *q = lp->dq[0];
3678        int ret;
3679        u16 flags, chan_id;
3680        u32 val;
3681
3682        ret = kstrtou16(buf, 16, &flags);
3683        if (ret)
3684                return ret;
3685
3686        lp->chan_id = (flags & 0xF0) >> 4;
3687        lp->weight = flags & 0x0F;
3688
3689        if (lp->chan_id < 8)
3690                val = axienet_dma_in32(q, XMCDMA_TXWEIGHT0_OFFSET);
3691        else
3692                val = axienet_dma_in32(q, XMCDMA_TXWEIGHT1_OFFSET);
3693
3694        if (lp->chan_id > 7)
3695                chan_id = lp->chan_id - 8;
3696        else
3697                chan_id = lp->chan_id;
3698
3699        val &= ~XMCDMA_TXWEIGHT_CH_MASK(chan_id);
3700        val |= lp->weight << XMCDMA_TXWEIGHT_CH_SHIFT(chan_id);
3701
3702        if (lp->chan_id < 8)
3703                axienet_dma_out32(q, XMCDMA_TXWEIGHT0_OFFSET, val);
3704        else
3705                axienet_dma_out32(q, XMCDMA_TXWEIGHT1_OFFSET, val);
3706
3707        return count;
3708}
3709
3710static DEVICE_ATTR_RW(chan_weight);
3711static DEVICE_ATTR_RO(rxch_obs1);
3712static DEVICE_ATTR_RO(rxch_obs2);
3713static DEVICE_ATTR_RO(rxch_obs3);
3714static DEVICE_ATTR_RO(rxch_obs4);
3715static DEVICE_ATTR_RO(rxch_obs5);
3716static DEVICE_ATTR_RO(rxch_obs6);
3717static DEVICE_ATTR_RO(txch_obs1);
3718static DEVICE_ATTR_RO(txch_obs2);
3719static DEVICE_ATTR_RO(txch_obs3);
3720static DEVICE_ATTR_RO(txch_obs4);
3721static DEVICE_ATTR_RO(txch_obs5);
3722static DEVICE_ATTR_RO(txch_obs6);
3723static const struct attribute *mcdma_attrs[] = {
3724        &dev_attr_chan_weight.attr,
3725        &dev_attr_rxch_obs1.attr,
3726        &dev_attr_rxch_obs2.attr,
3727        &dev_attr_rxch_obs3.attr,
3728        &dev_attr_rxch_obs4.attr,
3729        &dev_attr_rxch_obs5.attr,
3730        &dev_attr_rxch_obs6.attr,
3731        &dev_attr_txch_obs1.attr,
3732        &dev_attr_txch_obs2.attr,
3733        &dev_attr_txch_obs3.attr,
3734        &dev_attr_txch_obs4.attr,
3735        &dev_attr_txch_obs5.attr,
3736        &dev_attr_txch_obs6.attr,
3737        NULL,
3738};
3739
3740static const struct attribute_group mcdma_attributes = {
3741        .attrs = (struct attribute **)mcdma_attrs,
3742};
3743#endif
3744
3745/**
3746 * axienet_probe - Axi Ethernet probe function.
3747 * @pdev:       Pointer to platform device structure.
3748 *
3749 * Return: 0, on success
3750 *          Non-zero error value on failure.
3751 *
3752 * This is the probe routine for Axi Ethernet driver. This is called before
3753 * any other driver routines are invoked. It allocates and sets up the Ethernet
3754 * device. Parses through device tree and populates fields of
3755 * axienet_local. It registers the Ethernet device.
3756 */
3757static int axienet_probe(struct platform_device *pdev)
3758{
3759        int ret = 0;
3760#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
3761        struct device_node *np;
3762#endif
3763        struct axienet_local *lp;
3764        struct net_device *ndev;
3765        u8 mac_addr[6];
3766        struct resource *ethres;
3767        u32 value, num_queues;
3768        bool slave = false;
3769
3770        ret = of_property_read_u32(pdev->dev.of_node, "xlnx,num-queues",
3771                                   &num_queues);
3772        if (ret)
3773                num_queues = XAE_MAX_QUEUES;
3774
3775        ndev = alloc_etherdev_mq(sizeof(*lp), num_queues);
3776        if (!ndev)
3777                return -ENOMEM;
3778
3779        platform_set_drvdata(pdev, ndev);
3780
3781        SET_NETDEV_DEV(ndev, &pdev->dev);
3782        ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
3783        ndev->features = NETIF_F_SG;
3784        ndev->netdev_ops = &axienet_netdev_ops;
3785        ndev->ethtool_ops = &axienet_ethtool_ops;
3786
3787        lp = netdev_priv(ndev);
3788        lp->ndev = ndev;
3789        lp->dev = &pdev->dev;
3790        lp->options = XAE_OPTION_DEFAULTS;
3791        lp->num_queues = num_queues;
3792        lp->is_tsn = of_property_read_bool(pdev->dev.of_node, "xlnx,tsn");
3793        /* Map device registers */
3794        ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3795        lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
3796
3797        if (IS_ERR(lp->regs)) {
3798                ret = PTR_ERR(lp->regs);
3799                goto free_netdev;
3800        }
3801
3802#ifdef CONFIG_XILINX_TSN
3803        of_property_read_u32(pdev->dev.of_node, "xlnx,num-queue", &lp->num_q);
3804        pr_info("Number of TSN priority queues: %d\n", lp->num_q);
3805
3806        slave = of_property_read_bool(pdev->dev.of_node,
3807                                      "xlnx,tsn-slave");
3808        if (slave)
3809                lp->temac_no = XAE_TEMAC2;
3810        else
3811                lp->temac_no = XAE_TEMAC1;
3812#endif
3813
3814        /* Setup checksum offload, but default to off if not specified */
3815        lp->features = 0;
3816
3817        if (pdev->dev.of_node) {
3818                const struct of_device_id *match;
3819
3820                match = of_match_node(axienet_of_match, pdev->dev.of_node);
3821                if (match && match->data)
3822                        lp->axienet_config = match->data;
3823        }
3824
3825        ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
3826        if (!ret) {
3827                dev_info(&pdev->dev, "TX_CSUM %d\n", value);
3828
3829                switch (value) {
3830                case 1:
3831                        lp->csum_offload_on_tx_path =
3832                                XAE_FEATURE_PARTIAL_TX_CSUM;
3833                        lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
3834                        /* Can checksum TCP/UDP over IPv4. */
3835                        ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3836                        break;
3837                case 2:
3838                        lp->csum_offload_on_tx_path =
3839                                XAE_FEATURE_FULL_TX_CSUM;
3840                        lp->features |= XAE_FEATURE_FULL_TX_CSUM;
3841                        /* Can checksum TCP/UDP over IPv4. */
3842                        ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3843                        break;
3844                default:
3845                        lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
3846                }
3847        }
3848        ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
3849        if (!ret) {
3850                dev_info(&pdev->dev, "RX_CSUM %d\n", value);
3851
3852                switch (value) {
3853                case 1:
3854                        lp->csum_offload_on_rx_path =
3855                                XAE_FEATURE_PARTIAL_RX_CSUM;
3856                        lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
3857                        break;
3858                case 2:
3859                        lp->csum_offload_on_rx_path =
3860                                XAE_FEATURE_FULL_RX_CSUM;
3861                        lp->features |= XAE_FEATURE_FULL_RX_CSUM;
3862                        break;
3863                default:
3864                        lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
3865                }
3866        }
3867        /* For supporting jumbo frames, the Axi Ethernet hardware must have
3868         * a larger Rx/Tx Memory. Typically, the size must be large so that
3869         * we can enable jumbo option and start supporting jumbo frames.
3870         * Here we check for memory allocated for Rx/Tx in the hardware from
3871         * the device-tree and accordingly set flags.
3872         */
3873        of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
3874
3875        /* The phy_type is optional but when it is not specified it should not
3876         *  be a value that alters the driver behavior so set it to an invalid
3877         *  value as the default.
3878         */
3879        lp->phy_type = ~0;
3880        of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type);
3881
3882        lp->eth_hasnobuf = of_property_read_bool(pdev->dev.of_node,
3883                                                 "xlnx,eth-hasnobuf");
3884        lp->eth_hasptp = of_property_read_bool(pdev->dev.of_node,
3885                                               "xlnx,eth-hasptp");
3886
3887        if ((lp->axienet_config->mactype == XAXIENET_1G) && !lp->eth_hasnobuf)
3888                lp->eth_irq = platform_get_irq(pdev, 0);
3889
3890#ifdef CONFIG_XILINX_AXI_EMAC_HWTSTAMP
3891        if (!lp->is_tsn) {
3892                struct resource txtsres, rxtsres;
3893
3894                /* Find AXI Stream FIFO */
3895                np = of_parse_phandle(pdev->dev.of_node, "axififo-connected",
3896                                      0);
3897                if (IS_ERR(np)) {
3898                        dev_err(&pdev->dev, "could not find TX Timestamp FIFO\n");
3899                        ret = PTR_ERR(np);
3900                        goto free_netdev;
3901                }
3902
3903                ret = of_address_to_resource(np, 0, &txtsres);
3904                if (ret) {
3905                        dev_err(&pdev->dev,
3906                                "unable to get Tx Timestamp resource\n");
3907                        goto free_netdev;
3908                }
3909
3910                lp->tx_ts_regs = devm_ioremap_resource(&pdev->dev, &txtsres);
3911                if (IS_ERR(lp->tx_ts_regs)) {
3912                        dev_err(&pdev->dev, "could not map Tx Timestamp regs\n");
3913                        ret = PTR_ERR(lp->tx_ts_regs);
3914                        goto free_netdev;
3915                }
3916
3917                if (lp->axienet_config->mactype == XAXIENET_10G_25G) {
3918                        np = of_parse_phandle(pdev->dev.of_node,
3919                                              "xlnx,rxtsfifo", 0);
3920                        if (IS_ERR(np)) {
3921                                dev_err(&pdev->dev,
3922                                        "couldn't find rx-timestamp FIFO\n");
3923                                ret = PTR_ERR(np);
3924                                goto free_netdev;
3925                        }
3926
3927                        ret = of_address_to_resource(np, 0, &rxtsres);
3928                        if (ret) {
3929                                dev_err(&pdev->dev,
3930                                        "unable to get rx-timestamp resource\n");
3931                                goto free_netdev;
3932                        }
3933
3934                        lp->rx_ts_regs = devm_ioremap_resource(&pdev->dev,
3935                                                                &rxtsres);
3936                        if (IS_ERR(lp->rx_ts_regs)) {
3937                                dev_err(&pdev->dev,
3938                                        "couldn't map rx-timestamp regs\n");
3939                                ret = PTR_ERR(lp->rx_ts_regs);
3940                                goto free_netdev;
3941                        }
3942                        lp->tx_ptpheader = devm_kzalloc(&pdev->dev,
3943                                                XXVENET_TS_HEADER_LEN,
3944                                                GFP_KERNEL);
3945                }
3946
3947                of_node_put(np);
3948        }
3949#endif
3950        if (!slave) {
3951#ifdef CONFIG_AXIENET_HAS_MCDMA
3952                ret = axienet_mcdma_probe(pdev, lp, ndev);
3953#else
3954                ret = axienet_dma_probe(pdev, ndev);
3955#endif
3956                if (ret) {
3957                        pr_err("Getting DMA resource failed\n");
3958                        goto free_netdev;
3959                }
3960        }
3961
3962        lp->dma_clk = devm_clk_get(&pdev->dev, "dma_clk");
3963        if (IS_ERR(lp->dma_clk)) {
3964                if (PTR_ERR(lp->dma_clk) != -ENOENT) {
3965                        ret = PTR_ERR(lp->dma_clk);
3966                        goto free_netdev;
3967                }
3968
3969                /* Clock framework support is optional, continue on
3970                 * anyways if we don't find a matching clock.
3971                 */
3972                 lp->dma_clk = NULL;
3973        }
3974
3975        ret = clk_prepare_enable(lp->dma_clk);
3976        if (ret) {
3977                dev_err(&pdev->dev, "Unable to enable dma clock.\n");
3978                goto free_netdev;
3979        }
3980
3981        lp->eth_clk = devm_clk_get(&pdev->dev, "ethernet_clk");
3982        if (IS_ERR(lp->eth_clk)) {
3983                if (PTR_ERR(lp->eth_clk) != -ENOENT) {
3984                        ret = PTR_ERR(lp->eth_clk);
3985                        goto err_disable_dmaclk;
3986                }
3987
3988                /* Clock framework support is optional, continue on
3989                 * anyways if we don't find a matching clock.
3990                 */
3991                 lp->eth_clk = NULL;
3992        }
3993
3994        ret = clk_prepare_enable(lp->eth_clk);
3995        if (ret) {
3996                dev_err(&pdev->dev, "Unable to enable eth clock.\n");
3997                goto err_disable_dmaclk;
3998        }
3999
4000        /* Retrieve the MAC address */
4001        ret = of_property_read_u8_array(pdev->dev.of_node,
4002                                        "local-mac-address", mac_addr, 6);
4003        if (ret) {
4004                dev_err(&pdev->dev, "could not find MAC address\n");
4005                goto err_disable_ethclk;
4006        }
4007        axienet_set_mac_address(ndev, (void *)mac_addr);
4008
4009        lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
4010        lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
4011
4012        ret = of_get_phy_mode(pdev->dev.of_node);
4013        if (ret < 0)
4014                dev_warn(&pdev->dev, "couldn't find phy i/f\n");
4015        lp->phy_interface = ret;
4016        if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X)
4017                lp->phy_flags = XAE_PHY_TYPE_1000BASE_X;
4018
4019        lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
4020        if (lp->phy_node) {
4021                ret = axienet_mdio_setup(lp, pdev->dev.of_node);
4022                if (ret)
4023                        dev_warn(&pdev->dev, "error registering MDIO bus\n");
4024        }
4025
4026#ifdef CONFIG_AXIENET_HAS_MCDMA
4027        /* Create sysfs file entries for the device */
4028        ret = sysfs_create_group(&lp->dev->kobj, &mcdma_attributes);
4029        if (ret < 0) {
4030                dev_err(lp->dev, "unable to create sysfs entries\n");
4031                return ret;
4032        }
4033#endif
4034
4035        ret = register_netdev(lp->ndev);
4036        if (ret) {
4037                dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
4038                axienet_mdio_teardown(lp);
4039                goto err_disable_ethclk;
4040        }
4041
4042#ifdef CONFIG_XILINX_TSN_PTP
4043        if (lp->is_tsn) {
4044                lp->ptp_rx_irq = platform_get_irq_byname(pdev, "ptp_rx");
4045
4046                lp->ptp_tx_irq = platform_get_irq_byname(pdev, "ptp_tx");
4047
4048                lp->qbv_irq = platform_get_irq_byname(pdev, "qbv_irq");
4049
4050                pr_debug("ptp RX irq: %d\n", lp->ptp_rx_irq);
4051                pr_debug("ptp TX irq: %d\n", lp->ptp_tx_irq);
4052                pr_debug("qbv_irq: %d\n", lp->qbv_irq);
4053
4054                spin_lock_init(&lp->ptp_tx_lock);
4055
4056                if (lp->temac_no == XAE_TEMAC1) {
4057                        axienet_ptp_timer_probe(
4058                                 (lp->regs + XAE_RTC_OFFSET), pdev);
4059
4060                /* enable VLAN */
4061                lp->options |= XAE_OPTION_VLAN;
4062                axienet_setoptions(lp->ndev, lp->options);
4063#ifdef CONFIG_XILINX_TSN_QBV
4064                        axienet_qbv_init(ndev);
4065#endif
4066                }
4067        }
4068#endif
4069        return 0;
4070
4071err_disable_dmaclk:
4072        clk_disable_unprepare(lp->dma_clk);
4073err_disable_ethclk:
4074        clk_disable_unprepare(lp->eth_clk);
4075free_netdev:
4076        free_netdev(ndev);
4077
4078        return ret;
4079}
4080
4081static int axienet_remove(struct platform_device *pdev)
4082{
4083        struct net_device *ndev = platform_get_drvdata(pdev);
4084        struct axienet_local *lp = netdev_priv(ndev);
4085        int i;
4086
4087        axienet_mdio_teardown(lp);
4088
4089#ifdef CONFIG_XILINX_TSN_PTP
4090        axienet_ptp_timer_remove(lp->timer_priv);
4091#ifdef CONFIG_XILINX_TSN_QBV
4092        axienet_qbv_remove(ndev);
4093#endif
4094#endif
4095        if (!lp->is_tsn || lp->temac_no == XAE_TEMAC1) {
4096                for_each_dma_queue(lp, i)
4097                        netif_napi_del(&lp->napi[i]);
4098        }
4099        unregister_netdev(ndev);
4100        clk_disable_unprepare(lp->eth_clk);
4101        clk_disable_unprepare(lp->dma_clk);
4102
4103#ifdef CONFIG_AXIENET_HAS_MCDMA
4104        sysfs_remove_group(&lp->dev->kobj, &mcdma_attributes);
4105#endif
4106        of_node_put(lp->phy_node);
4107        lp->phy_node = NULL;
4108
4109        free_netdev(ndev);
4110
4111        return 0;
4112}
4113
4114static struct platform_driver axienet_driver = {
4115        .probe = axienet_probe,
4116        .remove = axienet_remove,
4117        .driver = {
4118                 .name = "xilinx_axienet",
4119                 .of_match_table = axienet_of_match,
4120        },
4121};
4122
4123module_platform_driver(axienet_driver);
4124
4125MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
4126MODULE_AUTHOR("Xilinx");
4127MODULE_LICENSE("GPL");
4128