linux/drivers/net/ethernet/freescale/gianfar_ethtool.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  drivers/net/ethernet/freescale/gianfar_ethtool.c
   4 *
   5 *  Gianfar Ethernet Driver
   6 *  Ethtool support for Gianfar Enet
   7 *  Based on e1000 ethtool support
   8 *
   9 *  Author: Andy Fleming
  10 *  Maintainer: Kumar Gala
  11 *  Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  12 *
  13 *  Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
  14 */
  15
  16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17
  18#include <linux/kernel.h>
  19#include <linux/string.h>
  20#include <linux/errno.h>
  21#include <linux/interrupt.h>
  22#include <linux/delay.h>
  23#include <linux/netdevice.h>
  24#include <linux/etherdevice.h>
  25#include <linux/net_tstamp.h>
  26#include <linux/skbuff.h>
  27#include <linux/spinlock.h>
  28#include <linux/mm.h>
  29
  30#include <asm/io.h>
  31#include <asm/irq.h>
  32#include <linux/uaccess.h>
  33#include <linux/module.h>
  34#include <linux/crc32.h>
  35#include <asm/types.h>
  36#include <linux/ethtool.h>
  37#include <linux/mii.h>
  38#include <linux/phy.h>
  39#include <linux/sort.h>
  40#include <linux/if_vlan.h>
  41#include <linux/of_platform.h>
  42#include <linux/fsl/ptp_qoriq.h>
  43
  44#include "gianfar.h"
  45
  46#define GFAR_MAX_COAL_USECS 0xffff
  47#define GFAR_MAX_COAL_FRAMES 0xff
  48
  49static const char stat_gstrings[][ETH_GSTRING_LEN] = {
  50        /* extra stats */
  51        "rx-allocation-errors",
  52        "rx-large-frame-errors",
  53        "rx-short-frame-errors",
  54        "rx-non-octet-errors",
  55        "rx-crc-errors",
  56        "rx-overrun-errors",
  57        "rx-busy-errors",
  58        "rx-babbling-errors",
  59        "rx-truncated-frames",
  60        "ethernet-bus-error",
  61        "tx-babbling-errors",
  62        "tx-underrun-errors",
  63        "tx-timeout-errors",
  64        /* rmon stats */
  65        "tx-rx-64-frames",
  66        "tx-rx-65-127-frames",
  67        "tx-rx-128-255-frames",
  68        "tx-rx-256-511-frames",
  69        "tx-rx-512-1023-frames",
  70        "tx-rx-1024-1518-frames",
  71        "tx-rx-1519-1522-good-vlan",
  72        "rx-bytes",
  73        "rx-packets",
  74        "rx-fcs-errors",
  75        "receive-multicast-packet",
  76        "receive-broadcast-packet",
  77        "rx-control-frame-packets",
  78        "rx-pause-frame-packets",
  79        "rx-unknown-op-code",
  80        "rx-alignment-error",
  81        "rx-frame-length-error",
  82        "rx-code-error",
  83        "rx-carrier-sense-error",
  84        "rx-undersize-packets",
  85        "rx-oversize-packets",
  86        "rx-fragmented-frames",
  87        "rx-jabber-frames",
  88        "rx-dropped-frames",
  89        "tx-byte-counter",
  90        "tx-packets",
  91        "tx-multicast-packets",
  92        "tx-broadcast-packets",
  93        "tx-pause-control-frames",
  94        "tx-deferral-packets",
  95        "tx-excessive-deferral-packets",
  96        "tx-single-collision-packets",
  97        "tx-multiple-collision-packets",
  98        "tx-late-collision-packets",
  99        "tx-excessive-collision-packets",
 100        "tx-total-collision",
 101        "reserved",
 102        "tx-dropped-frames",
 103        "tx-jabber-frames",
 104        "tx-fcs-errors",
 105        "tx-control-frames",
 106        "tx-oversize-frames",
 107        "tx-undersize-frames",
 108        "tx-fragmented-frames",
 109};
 110
 111/* Fill in a buffer with the strings which correspond to the
 112 * stats */
 113static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
 114{
 115        struct gfar_private *priv = netdev_priv(dev);
 116
 117        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
 118                memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
 119        else
 120                memcpy(buf, stat_gstrings,
 121                       GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
 122}
 123
 124/* Fill in an array of 64-bit statistics from various sources.
 125 * This array will be appended to the end of the ethtool_stats
 126 * structure, and returned to user space
 127 */
 128static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
 129                            u64 *buf)
 130{
 131        int i;
 132        struct gfar_private *priv = netdev_priv(dev);
 133        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 134        atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
 135
 136        for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
 137                buf[i] = atomic64_read(&extra[i]);
 138
 139        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
 140                u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
 141
 142                for (; i < GFAR_STATS_LEN; i++, rmon++)
 143                        buf[i] = (u64) gfar_read(rmon);
 144        }
 145}
 146
 147static int gfar_sset_count(struct net_device *dev, int sset)
 148{
 149        struct gfar_private *priv = netdev_priv(dev);
 150
 151        switch (sset) {
 152        case ETH_SS_STATS:
 153                if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
 154                        return GFAR_STATS_LEN;
 155                else
 156                        return GFAR_EXTRA_STATS_LEN;
 157        default:
 158                return -EOPNOTSUPP;
 159        }
 160}
 161
 162/* Fills in the drvinfo structure with some basic info */
 163static void gfar_gdrvinfo(struct net_device *dev,
 164                          struct ethtool_drvinfo *drvinfo)
 165{
 166        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
 167}
 168
 169/* Return the length of the register structure */
 170static int gfar_reglen(struct net_device *dev)
 171{
 172        return sizeof (struct gfar);
 173}
 174
 175/* Return a dump of the GFAR register space */
 176static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 177                          void *regbuf)
 178{
 179        int i;
 180        struct gfar_private *priv = netdev_priv(dev);
 181        u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
 182        u32 *buf = (u32 *) regbuf;
 183
 184        for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
 185                buf[i] = gfar_read(&theregs[i]);
 186}
 187
 188/* Convert microseconds to ethernet clock ticks, which changes
 189 * depending on what speed the controller is running at */
 190static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
 191                                     unsigned int usecs)
 192{
 193        struct net_device *ndev = priv->ndev;
 194        struct phy_device *phydev = ndev->phydev;
 195        unsigned int count;
 196
 197        /* The timer is different, depending on the interface speed */
 198        switch (phydev->speed) {
 199        case SPEED_1000:
 200                count = GFAR_GBIT_TIME;
 201                break;
 202        case SPEED_100:
 203                count = GFAR_100_TIME;
 204                break;
 205        case SPEED_10:
 206        default:
 207                count = GFAR_10_TIME;
 208                break;
 209        }
 210
 211        /* Make sure we return a number greater than 0
 212         * if usecs > 0 */
 213        return DIV_ROUND_UP(usecs * 1000, count);
 214}
 215
 216/* Convert ethernet clock ticks to microseconds */
 217static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
 218                                     unsigned int ticks)
 219{
 220        struct net_device *ndev = priv->ndev;
 221        struct phy_device *phydev = ndev->phydev;
 222        unsigned int count;
 223
 224        /* The timer is different, depending on the interface speed */
 225        switch (phydev->speed) {
 226        case SPEED_1000:
 227                count = GFAR_GBIT_TIME;
 228                break;
 229        case SPEED_100:
 230                count = GFAR_100_TIME;
 231                break;
 232        case SPEED_10:
 233        default:
 234                count = GFAR_10_TIME;
 235                break;
 236        }
 237
 238        /* Make sure we return a number greater than 0 */
 239        /* if ticks is > 0 */
 240        return (ticks * count) / 1000;
 241}
 242
 243/* Get the coalescing parameters, and put them in the cvals
 244 * structure.  */
 245static int gfar_gcoalesce(struct net_device *dev,
 246                          struct ethtool_coalesce *cvals)
 247{
 248        struct gfar_private *priv = netdev_priv(dev);
 249        struct gfar_priv_rx_q *rx_queue = NULL;
 250        struct gfar_priv_tx_q *tx_queue = NULL;
 251        unsigned long rxtime;
 252        unsigned long rxcount;
 253        unsigned long txtime;
 254        unsigned long txcount;
 255
 256        if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
 257                return -EOPNOTSUPP;
 258
 259        if (!dev->phydev)
 260                return -ENODEV;
 261
 262        rx_queue = priv->rx_queue[0];
 263        tx_queue = priv->tx_queue[0];
 264
 265        rxtime  = get_ictt_value(rx_queue->rxic);
 266        rxcount = get_icft_value(rx_queue->rxic);
 267        txtime  = get_ictt_value(tx_queue->txic);
 268        txcount = get_icft_value(tx_queue->txic);
 269        cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
 270        cvals->rx_max_coalesced_frames = rxcount;
 271
 272        cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
 273        cvals->tx_max_coalesced_frames = txcount;
 274
 275        return 0;
 276}
 277
 278/* Change the coalescing values.
 279 * Both cvals->*_usecs and cvals->*_frames have to be > 0
 280 * in order for coalescing to be active
 281 */
 282static int gfar_scoalesce(struct net_device *dev,
 283                          struct ethtool_coalesce *cvals)
 284{
 285        struct gfar_private *priv = netdev_priv(dev);
 286        int i, err = 0;
 287
 288        if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
 289                return -EOPNOTSUPP;
 290
 291        if (!dev->phydev)
 292                return -ENODEV;
 293
 294        /* Check the bounds of the values */
 295        if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
 296                netdev_info(dev, "Coalescing is limited to %d microseconds\n",
 297                            GFAR_MAX_COAL_USECS);
 298                return -EINVAL;
 299        }
 300
 301        if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
 302                netdev_info(dev, "Coalescing is limited to %d frames\n",
 303                            GFAR_MAX_COAL_FRAMES);
 304                return -EINVAL;
 305        }
 306
 307        /* Check the bounds of the values */
 308        if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
 309                netdev_info(dev, "Coalescing is limited to %d microseconds\n",
 310                            GFAR_MAX_COAL_USECS);
 311                return -EINVAL;
 312        }
 313
 314        if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
 315                netdev_info(dev, "Coalescing is limited to %d frames\n",
 316                            GFAR_MAX_COAL_FRAMES);
 317                return -EINVAL;
 318        }
 319
 320        while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
 321                cpu_relax();
 322
 323        /* Set up rx coalescing */
 324        if ((cvals->rx_coalesce_usecs == 0) ||
 325            (cvals->rx_max_coalesced_frames == 0)) {
 326                for (i = 0; i < priv->num_rx_queues; i++)
 327                        priv->rx_queue[i]->rxcoalescing = 0;
 328        } else {
 329                for (i = 0; i < priv->num_rx_queues; i++)
 330                        priv->rx_queue[i]->rxcoalescing = 1;
 331        }
 332
 333        for (i = 0; i < priv->num_rx_queues; i++) {
 334                priv->rx_queue[i]->rxic = mk_ic_value(
 335                        cvals->rx_max_coalesced_frames,
 336                        gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
 337        }
 338
 339        /* Set up tx coalescing */
 340        if ((cvals->tx_coalesce_usecs == 0) ||
 341            (cvals->tx_max_coalesced_frames == 0)) {
 342                for (i = 0; i < priv->num_tx_queues; i++)
 343                        priv->tx_queue[i]->txcoalescing = 0;
 344        } else {
 345                for (i = 0; i < priv->num_tx_queues; i++)
 346                        priv->tx_queue[i]->txcoalescing = 1;
 347        }
 348
 349        for (i = 0; i < priv->num_tx_queues; i++) {
 350                priv->tx_queue[i]->txic = mk_ic_value(
 351                        cvals->tx_max_coalesced_frames,
 352                        gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
 353        }
 354
 355        if (dev->flags & IFF_UP) {
 356                stop_gfar(dev);
 357                err = startup_gfar(dev);
 358        } else {
 359                gfar_mac_reset(priv);
 360        }
 361
 362        clear_bit_unlock(GFAR_RESETTING, &priv->state);
 363
 364        return err;
 365}
 366
 367/* Fills in rvals with the current ring parameters.  Currently,
 368 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
 369 * jumbo are ignored by the driver */
 370static void gfar_gringparam(struct net_device *dev,
 371                            struct ethtool_ringparam *rvals)
 372{
 373        struct gfar_private *priv = netdev_priv(dev);
 374        struct gfar_priv_tx_q *tx_queue = NULL;
 375        struct gfar_priv_rx_q *rx_queue = NULL;
 376
 377        tx_queue = priv->tx_queue[0];
 378        rx_queue = priv->rx_queue[0];
 379
 380        rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
 381        rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
 382        rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
 383        rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
 384
 385        /* Values changeable by the user.  The valid values are
 386         * in the range 1 to the "*_max_pending" counterpart above.
 387         */
 388        rvals->rx_pending = rx_queue->rx_ring_size;
 389        rvals->rx_mini_pending = rx_queue->rx_ring_size;
 390        rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
 391        rvals->tx_pending = tx_queue->tx_ring_size;
 392}
 393
 394/* Change the current ring parameters, stopping the controller if
 395 * necessary so that we don't mess things up while we're in motion.
 396 */
 397static int gfar_sringparam(struct net_device *dev,
 398                           struct ethtool_ringparam *rvals)
 399{
 400        struct gfar_private *priv = netdev_priv(dev);
 401        int err = 0, i;
 402
 403        if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
 404                return -EINVAL;
 405
 406        if (!is_power_of_2(rvals->rx_pending)) {
 407                netdev_err(dev, "Ring sizes must be a power of 2\n");
 408                return -EINVAL;
 409        }
 410
 411        if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
 412                return -EINVAL;
 413
 414        if (!is_power_of_2(rvals->tx_pending)) {
 415                netdev_err(dev, "Ring sizes must be a power of 2\n");
 416                return -EINVAL;
 417        }
 418
 419        while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
 420                cpu_relax();
 421
 422        if (dev->flags & IFF_UP)
 423                stop_gfar(dev);
 424
 425        /* Change the sizes */
 426        for (i = 0; i < priv->num_rx_queues; i++)
 427                priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
 428
 429        for (i = 0; i < priv->num_tx_queues; i++)
 430                priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
 431
 432        /* Rebuild the rings with the new size */
 433        if (dev->flags & IFF_UP)
 434                err = startup_gfar(dev);
 435
 436        clear_bit_unlock(GFAR_RESETTING, &priv->state);
 437
 438        return err;
 439}
 440
 441static void gfar_gpauseparam(struct net_device *dev,
 442                             struct ethtool_pauseparam *epause)
 443{
 444        struct gfar_private *priv = netdev_priv(dev);
 445
 446        epause->autoneg = !!priv->pause_aneg_en;
 447        epause->rx_pause = !!priv->rx_pause_en;
 448        epause->tx_pause = !!priv->tx_pause_en;
 449}
 450
 451static int gfar_spauseparam(struct net_device *dev,
 452                            struct ethtool_pauseparam *epause)
 453{
 454        struct gfar_private *priv = netdev_priv(dev);
 455        struct phy_device *phydev = dev->phydev;
 456        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 457
 458        if (!phydev)
 459                return -ENODEV;
 460
 461        if (!phy_validate_pause(phydev, epause))
 462                return -EINVAL;
 463
 464        priv->rx_pause_en = priv->tx_pause_en = 0;
 465        phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
 466        if (epause->rx_pause) {
 467                priv->rx_pause_en = 1;
 468
 469                if (epause->tx_pause) {
 470                        priv->tx_pause_en = 1;
 471                }
 472        } else if (epause->tx_pause) {
 473                priv->tx_pause_en = 1;
 474        }
 475
 476        if (epause->autoneg)
 477                priv->pause_aneg_en = 1;
 478        else
 479                priv->pause_aneg_en = 0;
 480
 481        if (!epause->autoneg) {
 482                u32 tempval = gfar_read(&regs->maccfg1);
 483
 484                tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
 485
 486                priv->tx_actual_en = 0;
 487                if (priv->tx_pause_en) {
 488                        priv->tx_actual_en = 1;
 489                        tempval |= MACCFG1_TX_FLOW;
 490                }
 491
 492                if (priv->rx_pause_en)
 493                        tempval |= MACCFG1_RX_FLOW;
 494                gfar_write(&regs->maccfg1, tempval);
 495        }
 496
 497        return 0;
 498}
 499
 500int gfar_set_features(struct net_device *dev, netdev_features_t features)
 501{
 502        netdev_features_t changed = dev->features ^ features;
 503        struct gfar_private *priv = netdev_priv(dev);
 504        int err = 0;
 505
 506        if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
 507                         NETIF_F_RXCSUM)))
 508                return 0;
 509
 510        while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
 511                cpu_relax();
 512
 513        dev->features = features;
 514
 515        if (dev->flags & IFF_UP) {
 516                /* Now we take down the rings to rebuild them */
 517                stop_gfar(dev);
 518                err = startup_gfar(dev);
 519        } else {
 520                gfar_mac_reset(priv);
 521        }
 522
 523        clear_bit_unlock(GFAR_RESETTING, &priv->state);
 524
 525        return err;
 526}
 527
 528static uint32_t gfar_get_msglevel(struct net_device *dev)
 529{
 530        struct gfar_private *priv = netdev_priv(dev);
 531
 532        return priv->msg_enable;
 533}
 534
 535static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
 536{
 537        struct gfar_private *priv = netdev_priv(dev);
 538
 539        priv->msg_enable = data;
 540}
 541
 542#ifdef CONFIG_PM
 543static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 544{
 545        struct gfar_private *priv = netdev_priv(dev);
 546
 547        wol->supported = 0;
 548        wol->wolopts = 0;
 549
 550        if (priv->wol_supported & GFAR_WOL_MAGIC)
 551                wol->supported |= WAKE_MAGIC;
 552
 553        if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
 554                wol->supported |= WAKE_UCAST;
 555
 556        if (priv->wol_opts & GFAR_WOL_MAGIC)
 557                wol->wolopts |= WAKE_MAGIC;
 558
 559        if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
 560                wol->wolopts |= WAKE_UCAST;
 561}
 562
 563static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 564{
 565        struct gfar_private *priv = netdev_priv(dev);
 566        u16 wol_opts = 0;
 567        int err;
 568
 569        if (!priv->wol_supported && wol->wolopts)
 570                return -EINVAL;
 571
 572        if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
 573                return -EINVAL;
 574
 575        if (wol->wolopts & WAKE_MAGIC) {
 576                wol_opts |= GFAR_WOL_MAGIC;
 577        } else {
 578                if (wol->wolopts & WAKE_UCAST)
 579                        wol_opts |= GFAR_WOL_FILER_UCAST;
 580        }
 581
 582        wol_opts &= priv->wol_supported;
 583        priv->wol_opts = 0;
 584
 585        err = device_set_wakeup_enable(priv->dev, wol_opts);
 586        if (err)
 587                return err;
 588
 589        priv->wol_opts = wol_opts;
 590
 591        return 0;
 592}
 593#endif
 594
 595static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
 596{
 597        u32 fcr = 0x0, fpr = FPR_FILER_MASK;
 598
 599        if (ethflow & RXH_L2DA) {
 600                fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
 601                      RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
 602                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 603                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 604                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 605                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 606
 607                fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
 608                      RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
 609                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 610                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 611                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 612                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 613        }
 614
 615        if (ethflow & RXH_VLAN) {
 616                fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 617                      RQFCR_AND | RQFCR_HASHTBL_0;
 618                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 619                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 620                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 621                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 622        }
 623
 624        if (ethflow & RXH_IP_SRC) {
 625                fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 626                      RQFCR_AND | RQFCR_HASHTBL_0;
 627                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 628                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 629                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 630                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 631        }
 632
 633        if (ethflow & (RXH_IP_DST)) {
 634                fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 635                      RQFCR_AND | RQFCR_HASHTBL_0;
 636                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 637                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 638                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 639                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 640        }
 641
 642        if (ethflow & RXH_L3_PROTO) {
 643                fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 644                      RQFCR_AND | RQFCR_HASHTBL_0;
 645                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 646                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 647                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 648                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 649        }
 650
 651        if (ethflow & RXH_L4_B_0_1) {
 652                fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 653                      RQFCR_AND | RQFCR_HASHTBL_0;
 654                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 655                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 656                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 657                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 658        }
 659
 660        if (ethflow & RXH_L4_B_2_3) {
 661                fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 662                      RQFCR_AND | RQFCR_HASHTBL_0;
 663                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 664                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 665                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 666                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 667        }
 668}
 669
 670static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
 671                                       u64 class)
 672{
 673        unsigned int cmp_rqfpr;
 674        unsigned int *local_rqfpr;
 675        unsigned int *local_rqfcr;
 676        int i = 0x0, k = 0x0;
 677        int j = MAX_FILER_IDX, l = 0x0;
 678        int ret = 1;
 679
 680        local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
 681                                    GFP_KERNEL);
 682        local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
 683                                    GFP_KERNEL);
 684        if (!local_rqfpr || !local_rqfcr) {
 685                ret = 0;
 686                goto err;
 687        }
 688
 689        switch (class) {
 690        case TCP_V4_FLOW:
 691                cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
 692                break;
 693        case UDP_V4_FLOW:
 694                cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
 695                break;
 696        case TCP_V6_FLOW:
 697                cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
 698                break;
 699        case UDP_V6_FLOW:
 700                cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
 701                break;
 702        default:
 703                netdev_err(priv->ndev,
 704                           "Right now this class is not supported\n");
 705                ret = 0;
 706                goto err;
 707        }
 708
 709        for (i = 0; i < MAX_FILER_IDX + 1; i++) {
 710                local_rqfpr[j] = priv->ftp_rqfpr[i];
 711                local_rqfcr[j] = priv->ftp_rqfcr[i];
 712                j--;
 713                if ((priv->ftp_rqfcr[i] ==
 714                     (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
 715                    (priv->ftp_rqfpr[i] == cmp_rqfpr))
 716                        break;
 717        }
 718
 719        if (i == MAX_FILER_IDX + 1) {
 720                netdev_err(priv->ndev,
 721                           "No parse rule found, can't create hash rules\n");
 722                ret = 0;
 723                goto err;
 724        }
 725
 726        /* If a match was found, then it begins the starting of a cluster rule
 727         * if it was already programmed, we need to overwrite these rules
 728         */
 729        for (l = i+1; l < MAX_FILER_IDX; l++) {
 730                if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
 731                    !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
 732                        priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
 733                                             RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
 734                        priv->ftp_rqfpr[l] = FPR_FILER_MASK;
 735                        gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
 736                                         priv->ftp_rqfpr[l]);
 737                        break;
 738                }
 739
 740                if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
 741                        (priv->ftp_rqfcr[l] & RQFCR_AND))
 742                        continue;
 743                else {
 744                        local_rqfpr[j] = priv->ftp_rqfpr[l];
 745                        local_rqfcr[j] = priv->ftp_rqfcr[l];
 746                        j--;
 747                }
 748        }
 749
 750        priv->cur_filer_idx = l - 1;
 751
 752        /* hash rules */
 753        ethflow_to_filer_rules(priv, ethflow);
 754
 755        /* Write back the popped out rules again */
 756        for (k = j+1; k < MAX_FILER_IDX; k++) {
 757                priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
 758                priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
 759                gfar_write_filer(priv, priv->cur_filer_idx,
 760                                 local_rqfcr[k], local_rqfpr[k]);
 761                if (!priv->cur_filer_idx)
 762                        break;
 763                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 764        }
 765
 766err:
 767        kfree(local_rqfcr);
 768        kfree(local_rqfpr);
 769        return ret;
 770}
 771
 772static int gfar_set_hash_opts(struct gfar_private *priv,
 773                              struct ethtool_rxnfc *cmd)
 774{
 775        /* write the filer rules here */
 776        if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
 777                return -EINVAL;
 778
 779        return 0;
 780}
 781
 782static int gfar_check_filer_hardware(struct gfar_private *priv)
 783{
 784        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 785        u32 i;
 786
 787        /* Check if we are in FIFO mode */
 788        i = gfar_read(&regs->ecntrl);
 789        i &= ECNTRL_FIFM;
 790        if (i == ECNTRL_FIFM) {
 791                netdev_notice(priv->ndev, "Interface in FIFO mode\n");
 792                i = gfar_read(&regs->rctrl);
 793                i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
 794                if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
 795                        netdev_info(priv->ndev,
 796                                    "Receive Queue Filtering enabled\n");
 797                } else {
 798                        netdev_warn(priv->ndev,
 799                                    "Receive Queue Filtering disabled\n");
 800                        return -EOPNOTSUPP;
 801                }
 802        }
 803        /* Or in standard mode */
 804        else {
 805                i = gfar_read(&regs->rctrl);
 806                i &= RCTRL_PRSDEP_MASK;
 807                if (i == RCTRL_PRSDEP_MASK) {
 808                        netdev_info(priv->ndev,
 809                                    "Receive Queue Filtering enabled\n");
 810                } else {
 811                        netdev_warn(priv->ndev,
 812                                    "Receive Queue Filtering disabled\n");
 813                        return -EOPNOTSUPP;
 814                }
 815        }
 816
 817        /* Sets the properties for arbitrary filer rule
 818         * to the first 4 Layer 4 Bytes
 819         */
 820        gfar_write(&regs->rbifx, 0xC0C1C2C3);
 821        return 0;
 822}
 823
 824/* Write a mask to filer cache */
 825static void gfar_set_mask(u32 mask, struct filer_table *tab)
 826{
 827        tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
 828        tab->fe[tab->index].prop = mask;
 829        tab->index++;
 830}
 831
 832/* Sets parse bits (e.g. IP or TCP) */
 833static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
 834{
 835        gfar_set_mask(mask, tab);
 836        tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
 837                                   RQFCR_AND;
 838        tab->fe[tab->index].prop = value;
 839        tab->index++;
 840}
 841
 842static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
 843                                       struct filer_table *tab)
 844{
 845        gfar_set_mask(mask, tab);
 846        tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
 847        tab->fe[tab->index].prop = value;
 848        tab->index++;
 849}
 850
 851/* For setting a tuple of value and mask of type flag
 852 * Example:
 853 * IP-Src = 10.0.0.0/255.0.0.0
 854 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
 855 *
 856 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
 857 * For a don't care mask it gives us a 0
 858 *
 859 * The check if don't care and the mask adjustment if mask=0 is done for VLAN
 860 * and MAC stuff on an upper level (due to missing information on this level).
 861 * For these guys we can discard them if they are value=0 and mask=0.
 862 *
 863 * Further the all masks are one-padded for better hardware efficiency.
 864 */
 865static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
 866                               struct filer_table *tab)
 867{
 868        switch (flag) {
 869                /* 3bit */
 870        case RQFCR_PID_PRI:
 871                if (!(value | mask))
 872                        return;
 873                mask |= RQFCR_PID_PRI_MASK;
 874                break;
 875                /* 8bit */
 876        case RQFCR_PID_L4P:
 877        case RQFCR_PID_TOS:
 878                if (!~(mask | RQFCR_PID_L4P_MASK))
 879                        return;
 880                if (!mask)
 881                        mask = ~0;
 882                else
 883                        mask |= RQFCR_PID_L4P_MASK;
 884                break;
 885                /* 12bit */
 886        case RQFCR_PID_VID:
 887                if (!(value | mask))
 888                        return;
 889                mask |= RQFCR_PID_VID_MASK;
 890                break;
 891                /* 16bit */
 892        case RQFCR_PID_DPT:
 893        case RQFCR_PID_SPT:
 894        case RQFCR_PID_ETY:
 895                if (!~(mask | RQFCR_PID_PORT_MASK))
 896                        return;
 897                if (!mask)
 898                        mask = ~0;
 899                else
 900                        mask |= RQFCR_PID_PORT_MASK;
 901                break;
 902                /* 24bit */
 903        case RQFCR_PID_DAH:
 904        case RQFCR_PID_DAL:
 905        case RQFCR_PID_SAH:
 906        case RQFCR_PID_SAL:
 907                if (!(value | mask))
 908                        return;
 909                mask |= RQFCR_PID_MAC_MASK;
 910                break;
 911                /* for all real 32bit masks */
 912        default:
 913                if (!~mask)
 914                        return;
 915                if (!mask)
 916                        mask = ~0;
 917                break;
 918        }
 919        gfar_set_general_attribute(value, mask, flag, tab);
 920}
 921
 922/* Translates value and mask for UDP, TCP or SCTP */
 923static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
 924                              struct ethtool_tcpip4_spec *mask,
 925                              struct filer_table *tab)
 926{
 927        gfar_set_attribute(be32_to_cpu(value->ip4src),
 928                           be32_to_cpu(mask->ip4src),
 929                           RQFCR_PID_SIA, tab);
 930        gfar_set_attribute(be32_to_cpu(value->ip4dst),
 931                           be32_to_cpu(mask->ip4dst),
 932                           RQFCR_PID_DIA, tab);
 933        gfar_set_attribute(be16_to_cpu(value->pdst),
 934                           be16_to_cpu(mask->pdst),
 935                           RQFCR_PID_DPT, tab);
 936        gfar_set_attribute(be16_to_cpu(value->psrc),
 937                           be16_to_cpu(mask->psrc),
 938                           RQFCR_PID_SPT, tab);
 939        gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
 940}
 941
 942/* Translates value and mask for RAW-IP4 */
 943static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
 944                             struct ethtool_usrip4_spec *mask,
 945                             struct filer_table *tab)
 946{
 947        gfar_set_attribute(be32_to_cpu(value->ip4src),
 948                           be32_to_cpu(mask->ip4src),
 949                           RQFCR_PID_SIA, tab);
 950        gfar_set_attribute(be32_to_cpu(value->ip4dst),
 951                           be32_to_cpu(mask->ip4dst),
 952                           RQFCR_PID_DIA, tab);
 953        gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
 954        gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
 955        gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
 956                           be32_to_cpu(mask->l4_4_bytes),
 957                           RQFCR_PID_ARB, tab);
 958
 959}
 960
 961/* Translates value and mask for ETHER spec */
 962static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
 963                           struct filer_table *tab)
 964{
 965        u32 upper_temp_mask = 0;
 966        u32 lower_temp_mask = 0;
 967
 968        /* Source address */
 969        if (!is_broadcast_ether_addr(mask->h_source)) {
 970                if (is_zero_ether_addr(mask->h_source)) {
 971                        upper_temp_mask = 0xFFFFFFFF;
 972                        lower_temp_mask = 0xFFFFFFFF;
 973                } else {
 974                        upper_temp_mask = mask->h_source[0] << 16 |
 975                                          mask->h_source[1] << 8  |
 976                                          mask->h_source[2];
 977                        lower_temp_mask = mask->h_source[3] << 16 |
 978                                          mask->h_source[4] << 8  |
 979                                          mask->h_source[5];
 980                }
 981                /* Upper 24bit */
 982                gfar_set_attribute(value->h_source[0] << 16 |
 983                                   value->h_source[1] << 8  |
 984                                   value->h_source[2],
 985                                   upper_temp_mask, RQFCR_PID_SAH, tab);
 986                /* And the same for the lower part */
 987                gfar_set_attribute(value->h_source[3] << 16 |
 988                                   value->h_source[4] << 8  |
 989                                   value->h_source[5],
 990                                   lower_temp_mask, RQFCR_PID_SAL, tab);
 991        }
 992        /* Destination address */
 993        if (!is_broadcast_ether_addr(mask->h_dest)) {
 994                /* Special for destination is limited broadcast */
 995                if ((is_broadcast_ether_addr(value->h_dest) &&
 996                    is_zero_ether_addr(mask->h_dest))) {
 997                        gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
 998                } else {
 999                        if (is_zero_ether_addr(mask->h_dest)) {
1000                                upper_temp_mask = 0xFFFFFFFF;
1001                                lower_temp_mask = 0xFFFFFFFF;
1002                        } else {
1003                                upper_temp_mask = mask->h_dest[0] << 16 |
1004                                                  mask->h_dest[1] << 8  |
1005                                                  mask->h_dest[2];
1006                                lower_temp_mask = mask->h_dest[3] << 16 |
1007                                                  mask->h_dest[4] << 8  |
1008                                                  mask->h_dest[5];
1009                        }
1010
1011                        /* Upper 24bit */
1012                        gfar_set_attribute(value->h_dest[0] << 16 |
1013                                           value->h_dest[1] << 8  |
1014                                           value->h_dest[2],
1015                                           upper_temp_mask, RQFCR_PID_DAH, tab);
1016                        /* And the same for the lower part */
1017                        gfar_set_attribute(value->h_dest[3] << 16 |
1018                                           value->h_dest[4] << 8  |
1019                                           value->h_dest[5],
1020                                           lower_temp_mask, RQFCR_PID_DAL, tab);
1021                }
1022        }
1023
1024        gfar_set_attribute(be16_to_cpu(value->h_proto),
1025                           be16_to_cpu(mask->h_proto),
1026                           RQFCR_PID_ETY, tab);
1027}
1028
1029static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
1030{
1031        return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
1032}
1033
1034static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
1035{
1036        return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
1037}
1038
1039static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
1040{
1041        return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
1042}
1043
1044static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
1045{
1046        return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
1047}
1048
1049static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
1050{
1051        return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1052                VLAN_PRIO_SHIFT;
1053}
1054
1055static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
1056{
1057        return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1058                VLAN_PRIO_SHIFT;
1059}
1060
1061/* Convert a rule to binary filter format of gianfar */
1062static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1063                                 struct filer_table *tab)
1064{
1065        u32 vlan = 0, vlan_mask = 0;
1066        u32 id = 0, id_mask = 0;
1067        u32 cfi = 0, cfi_mask = 0;
1068        u32 prio = 0, prio_mask = 0;
1069        u32 old_index = tab->index;
1070
1071        /* Check if vlan is wanted */
1072        if ((rule->flow_type & FLOW_EXT) &&
1073            (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
1074                if (!rule->m_ext.vlan_tci)
1075                        rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
1076
1077                vlan = RQFPR_VLN;
1078                vlan_mask = RQFPR_VLN;
1079
1080                /* Separate the fields */
1081                id = vlan_tci_vid(rule);
1082                id_mask = vlan_tci_vidm(rule);
1083                cfi = vlan_tci_cfi(rule);
1084                cfi_mask = vlan_tci_cfim(rule);
1085                prio = vlan_tci_prio(rule);
1086                prio_mask = vlan_tci_priom(rule);
1087
1088                if (cfi_mask) {
1089                        if (cfi)
1090                                vlan |= RQFPR_CFI;
1091                        vlan_mask |= RQFPR_CFI;
1092                }
1093        }
1094
1095        switch (rule->flow_type & ~FLOW_EXT) {
1096        case TCP_V4_FLOW:
1097                gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
1098                                    RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
1099                gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
1100                                  &rule->m_u.tcp_ip4_spec, tab);
1101                break;
1102        case UDP_V4_FLOW:
1103                gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
1104                                    RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
1105                gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
1106                                  &rule->m_u.udp_ip4_spec, tab);
1107                break;
1108        case SCTP_V4_FLOW:
1109                gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1110                                    tab);
1111                gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
1112                gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1113                                  (struct ethtool_tcpip4_spec *)&rule->m_u,
1114                                  tab);
1115                break;
1116        case IP_USER_FLOW:
1117                gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1118                                    tab);
1119                gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
1120                                 (struct ethtool_usrip4_spec *) &rule->m_u,
1121                                 tab);
1122                break;
1123        case ETHER_FLOW:
1124                if (vlan)
1125                        gfar_set_parse_bits(vlan, vlan_mask, tab);
1126                gfar_set_ether((struct ethhdr *) &rule->h_u,
1127                               (struct ethhdr *) &rule->m_u, tab);
1128                break;
1129        default:
1130                return -1;
1131        }
1132
1133        /* Set the vlan attributes in the end */
1134        if (vlan) {
1135                gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
1136                gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
1137        }
1138
1139        /* If there has been nothing written till now, it must be a default */
1140        if (tab->index == old_index) {
1141                gfar_set_mask(0xFFFFFFFF, tab);
1142                tab->fe[tab->index].ctrl = 0x20;
1143                tab->fe[tab->index].prop = 0x0;
1144                tab->index++;
1145        }
1146
1147        /* Remove last AND */
1148        tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
1149
1150        /* Specify which queue to use or to drop */
1151        if (rule->ring_cookie == RX_CLS_FLOW_DISC)
1152                tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
1153        else
1154                tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
1155
1156        /* Only big enough entries can be clustered */
1157        if (tab->index > (old_index + 2)) {
1158                tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
1159                tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1160        }
1161
1162        /* In rare cases the cache can be full while there is
1163         * free space in hw
1164         */
1165        if (tab->index > MAX_FILER_CACHE_IDX - 1)
1166                return -EBUSY;
1167
1168        return 0;
1169}
1170
1171/* Write the bit-pattern from software's buffer to hardware registers */
1172static int gfar_write_filer_table(struct gfar_private *priv,
1173                                  struct filer_table *tab)
1174{
1175        u32 i = 0;
1176        if (tab->index > MAX_FILER_IDX - 1)
1177                return -EBUSY;
1178
1179        /* Fill regular entries */
1180        for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
1181                gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1182        /* Fill the rest with fall-troughs */
1183        for (; i < MAX_FILER_IDX; i++)
1184                gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1185        /* Last entry must be default accept
1186         * because that's what people expect
1187         */
1188        gfar_write_filer(priv, i, 0x20, 0x0);
1189
1190        return 0;
1191}
1192
1193static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
1194                                 struct gfar_private *priv)
1195{
1196
1197        if (flow->flow_type & FLOW_EXT) {
1198                if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1199                        netdev_warn(priv->ndev,
1200                                    "User-specific data not supported!\n");
1201                if (~flow->m_ext.vlan_etype)
1202                        netdev_warn(priv->ndev,
1203                                    "VLAN-etype not supported!\n");
1204        }
1205        if (flow->flow_type == IP_USER_FLOW)
1206                if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1207                        netdev_warn(priv->ndev,
1208                                    "IP-Version differing from IPv4 not supported!\n");
1209
1210        return 0;
1211}
1212
1213static int gfar_process_filer_changes(struct gfar_private *priv)
1214{
1215        struct ethtool_flow_spec_container *j;
1216        struct filer_table *tab;
1217        s32 ret = 0;
1218
1219        /* So index is set to zero, too! */
1220        tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1221        if (tab == NULL)
1222                return -ENOMEM;
1223
1224        /* Now convert the existing filer data from flow_spec into
1225         * filer tables binary format
1226         */
1227        list_for_each_entry(j, &priv->rx_list.list, list) {
1228                ret = gfar_convert_to_filer(&j->fs, tab);
1229                if (ret == -EBUSY) {
1230                        netdev_err(priv->ndev,
1231                                   "Rule not added: No free space!\n");
1232                        goto end;
1233                }
1234                if (ret == -1) {
1235                        netdev_err(priv->ndev,
1236                                   "Rule not added: Unsupported Flow-type!\n");
1237                        goto end;
1238                }
1239        }
1240
1241        /* Write everything to hardware */
1242        ret = gfar_write_filer_table(priv, tab);
1243        if (ret == -EBUSY) {
1244                netdev_err(priv->ndev, "Rule not added: No free space!\n");
1245                goto end;
1246        }
1247
1248end:
1249        kfree(tab);
1250        return ret;
1251}
1252
1253static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1254{
1255        u32 i = 0;
1256
1257        for (i = 0; i < sizeof(flow->m_u); i++)
1258                flow->m_u.hdata[i] ^= 0xFF;
1259
1260        flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
1261        flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
1262        flow->m_ext.data[0] ^= cpu_to_be32(~0);
1263        flow->m_ext.data[1] ^= cpu_to_be32(~0);
1264}
1265
1266static int gfar_add_cls(struct gfar_private *priv,
1267                        struct ethtool_rx_flow_spec *flow)
1268{
1269        struct ethtool_flow_spec_container *temp, *comp;
1270        int ret = 0;
1271
1272        temp = kmalloc(sizeof(*temp), GFP_KERNEL);
1273        if (temp == NULL)
1274                return -ENOMEM;
1275        memcpy(&temp->fs, flow, sizeof(temp->fs));
1276
1277        gfar_invert_masks(&temp->fs);
1278        ret = gfar_check_capability(&temp->fs, priv);
1279        if (ret)
1280                goto clean_mem;
1281        /* Link in the new element at the right @location */
1282        if (list_empty(&priv->rx_list.list)) {
1283                ret = gfar_check_filer_hardware(priv);
1284                if (ret != 0)
1285                        goto clean_mem;
1286                list_add(&temp->list, &priv->rx_list.list);
1287                goto process;
1288        } else {
1289                list_for_each_entry(comp, &priv->rx_list.list, list) {
1290                        if (comp->fs.location > flow->location) {
1291                                list_add_tail(&temp->list, &comp->list);
1292                                goto process;
1293                        }
1294                        if (comp->fs.location == flow->location) {
1295                                netdev_err(priv->ndev,
1296                                           "Rule not added: ID %d not free!\n",
1297                                           flow->location);
1298                                ret = -EBUSY;
1299                                goto clean_mem;
1300                        }
1301                }
1302                list_add_tail(&temp->list, &priv->rx_list.list);
1303        }
1304
1305process:
1306        priv->rx_list.count++;
1307        ret = gfar_process_filer_changes(priv);
1308        if (ret)
1309                goto clean_list;
1310        return ret;
1311
1312clean_list:
1313        priv->rx_list.count--;
1314        list_del(&temp->list);
1315clean_mem:
1316        kfree(temp);
1317        return ret;
1318}
1319
1320static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1321{
1322        struct ethtool_flow_spec_container *comp;
1323        u32 ret = -EINVAL;
1324
1325        if (list_empty(&priv->rx_list.list))
1326                return ret;
1327
1328        list_for_each_entry(comp, &priv->rx_list.list, list) {
1329                if (comp->fs.location == loc) {
1330                        list_del(&comp->list);
1331                        kfree(comp);
1332                        priv->rx_list.count--;
1333                        gfar_process_filer_changes(priv);
1334                        ret = 0;
1335                        break;
1336                }
1337        }
1338
1339        return ret;
1340}
1341
1342static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1343{
1344        struct ethtool_flow_spec_container *comp;
1345        u32 ret = -EINVAL;
1346
1347        list_for_each_entry(comp, &priv->rx_list.list, list) {
1348                if (comp->fs.location == cmd->fs.location) {
1349                        memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
1350                        gfar_invert_masks(&cmd->fs);
1351                        ret = 0;
1352                        break;
1353                }
1354        }
1355
1356        return ret;
1357}
1358
1359static int gfar_get_cls_all(struct gfar_private *priv,
1360                            struct ethtool_rxnfc *cmd, u32 *rule_locs)
1361{
1362        struct ethtool_flow_spec_container *comp;
1363        u32 i = 0;
1364
1365        list_for_each_entry(comp, &priv->rx_list.list, list) {
1366                if (i == cmd->rule_cnt)
1367                        return -EMSGSIZE;
1368                rule_locs[i] = comp->fs.location;
1369                i++;
1370        }
1371
1372        cmd->data = MAX_FILER_IDX;
1373        cmd->rule_cnt = i;
1374
1375        return 0;
1376}
1377
1378static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1379{
1380        struct gfar_private *priv = netdev_priv(dev);
1381        int ret = 0;
1382
1383        if (test_bit(GFAR_RESETTING, &priv->state))
1384                return -EBUSY;
1385
1386        mutex_lock(&priv->rx_queue_access);
1387
1388        switch (cmd->cmd) {
1389        case ETHTOOL_SRXFH:
1390                ret = gfar_set_hash_opts(priv, cmd);
1391                break;
1392        case ETHTOOL_SRXCLSRLINS:
1393                if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
1394                     cmd->fs.ring_cookie >= priv->num_rx_queues) ||
1395                    cmd->fs.location >= MAX_FILER_IDX) {
1396                        ret = -EINVAL;
1397                        break;
1398                }
1399                ret = gfar_add_cls(priv, &cmd->fs);
1400                break;
1401        case ETHTOOL_SRXCLSRLDEL:
1402                ret = gfar_del_cls(priv, cmd->fs.location);
1403                break;
1404        default:
1405                ret = -EINVAL;
1406        }
1407
1408        mutex_unlock(&priv->rx_queue_access);
1409
1410        return ret;
1411}
1412
1413static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1414                        u32 *rule_locs)
1415{
1416        struct gfar_private *priv = netdev_priv(dev);
1417        int ret = 0;
1418
1419        switch (cmd->cmd) {
1420        case ETHTOOL_GRXRINGS:
1421                cmd->data = priv->num_rx_queues;
1422                break;
1423        case ETHTOOL_GRXCLSRLCNT:
1424                cmd->rule_cnt = priv->rx_list.count;
1425                break;
1426        case ETHTOOL_GRXCLSRULE:
1427                ret = gfar_get_cls(priv, cmd);
1428                break;
1429        case ETHTOOL_GRXCLSRLALL:
1430                ret = gfar_get_cls_all(priv, cmd, rule_locs);
1431                break;
1432        default:
1433                ret = -EINVAL;
1434                break;
1435        }
1436
1437        return ret;
1438}
1439
1440static int gfar_get_ts_info(struct net_device *dev,
1441                            struct ethtool_ts_info *info)
1442{
1443        struct gfar_private *priv = netdev_priv(dev);
1444        struct platform_device *ptp_dev;
1445        struct device_node *ptp_node;
1446        struct ptp_qoriq *ptp = NULL;
1447
1448        info->phc_index = -1;
1449
1450        if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
1451                info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1452                                        SOF_TIMESTAMPING_SOFTWARE;
1453                return 0;
1454        }
1455
1456        ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
1457        if (ptp_node) {
1458                ptp_dev = of_find_device_by_node(ptp_node);
1459                if (ptp_dev)
1460                        ptp = platform_get_drvdata(ptp_dev);
1461        }
1462
1463        if (ptp)
1464                info->phc_index = ptp->phc_index;
1465
1466        info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1467                                SOF_TIMESTAMPING_RX_HARDWARE |
1468                                SOF_TIMESTAMPING_RAW_HARDWARE;
1469        info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1470                         (1 << HWTSTAMP_TX_ON);
1471        info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1472                           (1 << HWTSTAMP_FILTER_ALL);
1473        return 0;
1474}
1475
1476const struct ethtool_ops gfar_ethtool_ops = {
1477        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1478                                     ETHTOOL_COALESCE_MAX_FRAMES,
1479        .get_drvinfo = gfar_gdrvinfo,
1480        .get_regs_len = gfar_reglen,
1481        .get_regs = gfar_get_regs,
1482        .get_link = ethtool_op_get_link,
1483        .get_coalesce = gfar_gcoalesce,
1484        .set_coalesce = gfar_scoalesce,
1485        .get_ringparam = gfar_gringparam,
1486        .set_ringparam = gfar_sringparam,
1487        .get_pauseparam = gfar_gpauseparam,
1488        .set_pauseparam = gfar_spauseparam,
1489        .get_strings = gfar_gstrings,
1490        .get_sset_count = gfar_sset_count,
1491        .get_ethtool_stats = gfar_fill_stats,
1492        .get_msglevel = gfar_get_msglevel,
1493        .set_msglevel = gfar_set_msglevel,
1494#ifdef CONFIG_PM
1495        .get_wol = gfar_get_wol,
1496        .set_wol = gfar_set_wol,
1497#endif
1498        .set_rxnfc = gfar_set_nfc,
1499        .get_rxnfc = gfar_get_nfc,
1500        .get_ts_info = gfar_get_ts_info,
1501        .get_link_ksettings = phy_ethtool_get_link_ksettings,
1502        .set_link_ksettings = phy_ethtool_set_link_ksettings,
1503};
1504