linux/drivers/net/ethernet/freescale/gianfar_ethtool.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  drivers/net/ethernet/freescale/gianfar_ethtool.c
   4 *
   5 *  Gianfar Ethernet Driver
   6 *  Ethtool support for Gianfar Enet
   7 *  Based on e1000 ethtool support
   8 *
   9 *  Author: Andy Fleming
  10 *  Maintainer: Kumar Gala
  11 *  Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
  12 *
  13 *  Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
  14 */
  15
  16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17
  18#include <linux/kernel.h>
  19#include <linux/string.h>
  20#include <linux/errno.h>
  21#include <linux/interrupt.h>
  22#include <linux/delay.h>
  23#include <linux/netdevice.h>
  24#include <linux/etherdevice.h>
  25#include <linux/net_tstamp.h>
  26#include <linux/skbuff.h>
  27#include <linux/spinlock.h>
  28#include <linux/mm.h>
  29
  30#include <asm/io.h>
  31#include <asm/irq.h>
  32#include <linux/uaccess.h>
  33#include <linux/module.h>
  34#include <linux/crc32.h>
  35#include <asm/types.h>
  36#include <linux/ethtool.h>
  37#include <linux/mii.h>
  38#include <linux/phy.h>
  39#include <linux/sort.h>
  40#include <linux/if_vlan.h>
  41#include <linux/of_platform.h>
  42#include <linux/fsl/ptp_qoriq.h>
  43
  44#include "gianfar.h"
  45
  46#define GFAR_MAX_COAL_USECS 0xffff
  47#define GFAR_MAX_COAL_FRAMES 0xff
  48
  49static const char stat_gstrings[][ETH_GSTRING_LEN] = {
  50        /* extra stats */
  51        "rx-allocation-errors",
  52        "rx-large-frame-errors",
  53        "rx-short-frame-errors",
  54        "rx-non-octet-errors",
  55        "rx-crc-errors",
  56        "rx-overrun-errors",
  57        "rx-busy-errors",
  58        "rx-babbling-errors",
  59        "rx-truncated-frames",
  60        "ethernet-bus-error",
  61        "tx-babbling-errors",
  62        "tx-underrun-errors",
  63        "tx-timeout-errors",
  64        /* rmon stats */
  65        "tx-rx-64-frames",
  66        "tx-rx-65-127-frames",
  67        "tx-rx-128-255-frames",
  68        "tx-rx-256-511-frames",
  69        "tx-rx-512-1023-frames",
  70        "tx-rx-1024-1518-frames",
  71        "tx-rx-1519-1522-good-vlan",
  72        "rx-bytes",
  73        "rx-packets",
  74        "rx-fcs-errors",
  75        "receive-multicast-packet",
  76        "receive-broadcast-packet",
  77        "rx-control-frame-packets",
  78        "rx-pause-frame-packets",
  79        "rx-unknown-op-code",
  80        "rx-alignment-error",
  81        "rx-frame-length-error",
  82        "rx-code-error",
  83        "rx-carrier-sense-error",
  84        "rx-undersize-packets",
  85        "rx-oversize-packets",
  86        "rx-fragmented-frames",
  87        "rx-jabber-frames",
  88        "rx-dropped-frames",
  89        "tx-byte-counter",
  90        "tx-packets",
  91        "tx-multicast-packets",
  92        "tx-broadcast-packets",
  93        "tx-pause-control-frames",
  94        "tx-deferral-packets",
  95        "tx-excessive-deferral-packets",
  96        "tx-single-collision-packets",
  97        "tx-multiple-collision-packets",
  98        "tx-late-collision-packets",
  99        "tx-excessive-collision-packets",
 100        "tx-total-collision",
 101        "reserved",
 102        "tx-dropped-frames",
 103        "tx-jabber-frames",
 104        "tx-fcs-errors",
 105        "tx-control-frames",
 106        "tx-oversize-frames",
 107        "tx-undersize-frames",
 108        "tx-fragmented-frames",
 109};
 110
 111/* Fill in a buffer with the strings which correspond to the
 112 * stats */
 113static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
 114{
 115        struct gfar_private *priv = netdev_priv(dev);
 116
 117        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
 118                memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
 119        else
 120                memcpy(buf, stat_gstrings,
 121                       GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
 122}
 123
 124/* Fill in an array of 64-bit statistics from various sources.
 125 * This array will be appended to the end of the ethtool_stats
 126 * structure, and returned to user space
 127 */
 128static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
 129                            u64 *buf)
 130{
 131        int i;
 132        struct gfar_private *priv = netdev_priv(dev);
 133        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 134        atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
 135
 136        for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
 137                buf[i] = atomic64_read(&extra[i]);
 138
 139        if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
 140                u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
 141
 142                for (; i < GFAR_STATS_LEN; i++, rmon++)
 143                        buf[i] = (u64) gfar_read(rmon);
 144        }
 145}
 146
 147static int gfar_sset_count(struct net_device *dev, int sset)
 148{
 149        struct gfar_private *priv = netdev_priv(dev);
 150
 151        switch (sset) {
 152        case ETH_SS_STATS:
 153                if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
 154                        return GFAR_STATS_LEN;
 155                else
 156                        return GFAR_EXTRA_STATS_LEN;
 157        default:
 158                return -EOPNOTSUPP;
 159        }
 160}
 161
 162/* Fills in the drvinfo structure with some basic info */
 163static void gfar_gdrvinfo(struct net_device *dev,
 164                          struct ethtool_drvinfo *drvinfo)
 165{
 166        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
 167}
 168
 169/* Return the length of the register structure */
 170static int gfar_reglen(struct net_device *dev)
 171{
 172        return sizeof (struct gfar);
 173}
 174
 175/* Return a dump of the GFAR register space */
 176static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 177                          void *regbuf)
 178{
 179        int i;
 180        struct gfar_private *priv = netdev_priv(dev);
 181        u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
 182        u32 *buf = (u32 *) regbuf;
 183
 184        for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
 185                buf[i] = gfar_read(&theregs[i]);
 186}
 187
 188/* Convert microseconds to ethernet clock ticks, which changes
 189 * depending on what speed the controller is running at */
 190static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
 191                                     unsigned int usecs)
 192{
 193        struct net_device *ndev = priv->ndev;
 194        struct phy_device *phydev = ndev->phydev;
 195        unsigned int count;
 196
 197        /* The timer is different, depending on the interface speed */
 198        switch (phydev->speed) {
 199        case SPEED_1000:
 200                count = GFAR_GBIT_TIME;
 201                break;
 202        case SPEED_100:
 203                count = GFAR_100_TIME;
 204                break;
 205        case SPEED_10:
 206        default:
 207                count = GFAR_10_TIME;
 208                break;
 209        }
 210
 211        /* Make sure we return a number greater than 0
 212         * if usecs > 0 */
 213        return DIV_ROUND_UP(usecs * 1000, count);
 214}
 215
 216/* Convert ethernet clock ticks to microseconds */
 217static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
 218                                     unsigned int ticks)
 219{
 220        struct net_device *ndev = priv->ndev;
 221        struct phy_device *phydev = ndev->phydev;
 222        unsigned int count;
 223
 224        /* The timer is different, depending on the interface speed */
 225        switch (phydev->speed) {
 226        case SPEED_1000:
 227                count = GFAR_GBIT_TIME;
 228                break;
 229        case SPEED_100:
 230                count = GFAR_100_TIME;
 231                break;
 232        case SPEED_10:
 233        default:
 234                count = GFAR_10_TIME;
 235                break;
 236        }
 237
 238        /* Make sure we return a number greater than 0 */
 239        /* if ticks is > 0 */
 240        return (ticks * count) / 1000;
 241}
 242
 243/* Get the coalescing parameters, and put them in the cvals
 244 * structure.  */
 245static int gfar_gcoalesce(struct net_device *dev,
 246                          struct ethtool_coalesce *cvals,
 247                          struct kernel_ethtool_coalesce *kernel_coal,
 248                          struct netlink_ext_ack *extack)
 249{
 250        struct gfar_private *priv = netdev_priv(dev);
 251        struct gfar_priv_rx_q *rx_queue = NULL;
 252        struct gfar_priv_tx_q *tx_queue = NULL;
 253        unsigned long rxtime;
 254        unsigned long rxcount;
 255        unsigned long txtime;
 256        unsigned long txcount;
 257
 258        if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
 259                return -EOPNOTSUPP;
 260
 261        if (!dev->phydev)
 262                return -ENODEV;
 263
 264        rx_queue = priv->rx_queue[0];
 265        tx_queue = priv->tx_queue[0];
 266
 267        rxtime  = get_ictt_value(rx_queue->rxic);
 268        rxcount = get_icft_value(rx_queue->rxic);
 269        txtime  = get_ictt_value(tx_queue->txic);
 270        txcount = get_icft_value(tx_queue->txic);
 271        cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
 272        cvals->rx_max_coalesced_frames = rxcount;
 273
 274        cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
 275        cvals->tx_max_coalesced_frames = txcount;
 276
 277        return 0;
 278}
 279
 280/* Change the coalescing values.
 281 * Both cvals->*_usecs and cvals->*_frames have to be > 0
 282 * in order for coalescing to be active
 283 */
 284static int gfar_scoalesce(struct net_device *dev,
 285                          struct ethtool_coalesce *cvals,
 286                          struct kernel_ethtool_coalesce *kernel_coal,
 287                          struct netlink_ext_ack *extack)
 288{
 289        struct gfar_private *priv = netdev_priv(dev);
 290        int i, err = 0;
 291
 292        if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
 293                return -EOPNOTSUPP;
 294
 295        if (!dev->phydev)
 296                return -ENODEV;
 297
 298        /* Check the bounds of the values */
 299        if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
 300                netdev_info(dev, "Coalescing is limited to %d microseconds\n",
 301                            GFAR_MAX_COAL_USECS);
 302                return -EINVAL;
 303        }
 304
 305        if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
 306                netdev_info(dev, "Coalescing is limited to %d frames\n",
 307                            GFAR_MAX_COAL_FRAMES);
 308                return -EINVAL;
 309        }
 310
 311        /* Check the bounds of the values */
 312        if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
 313                netdev_info(dev, "Coalescing is limited to %d microseconds\n",
 314                            GFAR_MAX_COAL_USECS);
 315                return -EINVAL;
 316        }
 317
 318        if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
 319                netdev_info(dev, "Coalescing is limited to %d frames\n",
 320                            GFAR_MAX_COAL_FRAMES);
 321                return -EINVAL;
 322        }
 323
 324        while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
 325                cpu_relax();
 326
 327        /* Set up rx coalescing */
 328        if ((cvals->rx_coalesce_usecs == 0) ||
 329            (cvals->rx_max_coalesced_frames == 0)) {
 330                for (i = 0; i < priv->num_rx_queues; i++)
 331                        priv->rx_queue[i]->rxcoalescing = 0;
 332        } else {
 333                for (i = 0; i < priv->num_rx_queues; i++)
 334                        priv->rx_queue[i]->rxcoalescing = 1;
 335        }
 336
 337        for (i = 0; i < priv->num_rx_queues; i++) {
 338                priv->rx_queue[i]->rxic = mk_ic_value(
 339                        cvals->rx_max_coalesced_frames,
 340                        gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
 341        }
 342
 343        /* Set up tx coalescing */
 344        if ((cvals->tx_coalesce_usecs == 0) ||
 345            (cvals->tx_max_coalesced_frames == 0)) {
 346                for (i = 0; i < priv->num_tx_queues; i++)
 347                        priv->tx_queue[i]->txcoalescing = 0;
 348        } else {
 349                for (i = 0; i < priv->num_tx_queues; i++)
 350                        priv->tx_queue[i]->txcoalescing = 1;
 351        }
 352
 353        for (i = 0; i < priv->num_tx_queues; i++) {
 354                priv->tx_queue[i]->txic = mk_ic_value(
 355                        cvals->tx_max_coalesced_frames,
 356                        gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
 357        }
 358
 359        if (dev->flags & IFF_UP) {
 360                stop_gfar(dev);
 361                err = startup_gfar(dev);
 362        } else {
 363                gfar_mac_reset(priv);
 364        }
 365
 366        clear_bit_unlock(GFAR_RESETTING, &priv->state);
 367
 368        return err;
 369}
 370
 371/* Fills in rvals with the current ring parameters.  Currently,
 372 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
 373 * jumbo are ignored by the driver */
 374static void gfar_gringparam(struct net_device *dev,
 375                            struct ethtool_ringparam *rvals)
 376{
 377        struct gfar_private *priv = netdev_priv(dev);
 378        struct gfar_priv_tx_q *tx_queue = NULL;
 379        struct gfar_priv_rx_q *rx_queue = NULL;
 380
 381        tx_queue = priv->tx_queue[0];
 382        rx_queue = priv->rx_queue[0];
 383
 384        rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
 385        rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
 386        rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
 387        rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
 388
 389        /* Values changeable by the user.  The valid values are
 390         * in the range 1 to the "*_max_pending" counterpart above.
 391         */
 392        rvals->rx_pending = rx_queue->rx_ring_size;
 393        rvals->rx_mini_pending = rx_queue->rx_ring_size;
 394        rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
 395        rvals->tx_pending = tx_queue->tx_ring_size;
 396}
 397
 398/* Change the current ring parameters, stopping the controller if
 399 * necessary so that we don't mess things up while we're in motion.
 400 */
 401static int gfar_sringparam(struct net_device *dev,
 402                           struct ethtool_ringparam *rvals)
 403{
 404        struct gfar_private *priv = netdev_priv(dev);
 405        int err = 0, i;
 406
 407        if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
 408                return -EINVAL;
 409
 410        if (!is_power_of_2(rvals->rx_pending)) {
 411                netdev_err(dev, "Ring sizes must be a power of 2\n");
 412                return -EINVAL;
 413        }
 414
 415        if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
 416                return -EINVAL;
 417
 418        if (!is_power_of_2(rvals->tx_pending)) {
 419                netdev_err(dev, "Ring sizes must be a power of 2\n");
 420                return -EINVAL;
 421        }
 422
 423        while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
 424                cpu_relax();
 425
 426        if (dev->flags & IFF_UP)
 427                stop_gfar(dev);
 428
 429        /* Change the sizes */
 430        for (i = 0; i < priv->num_rx_queues; i++)
 431                priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
 432
 433        for (i = 0; i < priv->num_tx_queues; i++)
 434                priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
 435
 436        /* Rebuild the rings with the new size */
 437        if (dev->flags & IFF_UP)
 438                err = startup_gfar(dev);
 439
 440        clear_bit_unlock(GFAR_RESETTING, &priv->state);
 441
 442        return err;
 443}
 444
 445static void gfar_gpauseparam(struct net_device *dev,
 446                             struct ethtool_pauseparam *epause)
 447{
 448        struct gfar_private *priv = netdev_priv(dev);
 449
 450        epause->autoneg = !!priv->pause_aneg_en;
 451        epause->rx_pause = !!priv->rx_pause_en;
 452        epause->tx_pause = !!priv->tx_pause_en;
 453}
 454
 455static int gfar_spauseparam(struct net_device *dev,
 456                            struct ethtool_pauseparam *epause)
 457{
 458        struct gfar_private *priv = netdev_priv(dev);
 459        struct phy_device *phydev = dev->phydev;
 460        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 461
 462        if (!phydev)
 463                return -ENODEV;
 464
 465        if (!phy_validate_pause(phydev, epause))
 466                return -EINVAL;
 467
 468        priv->rx_pause_en = priv->tx_pause_en = 0;
 469        phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
 470        if (epause->rx_pause) {
 471                priv->rx_pause_en = 1;
 472
 473                if (epause->tx_pause) {
 474                        priv->tx_pause_en = 1;
 475                }
 476        } else if (epause->tx_pause) {
 477                priv->tx_pause_en = 1;
 478        }
 479
 480        if (epause->autoneg)
 481                priv->pause_aneg_en = 1;
 482        else
 483                priv->pause_aneg_en = 0;
 484
 485        if (!epause->autoneg) {
 486                u32 tempval = gfar_read(&regs->maccfg1);
 487
 488                tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
 489
 490                priv->tx_actual_en = 0;
 491                if (priv->tx_pause_en) {
 492                        priv->tx_actual_en = 1;
 493                        tempval |= MACCFG1_TX_FLOW;
 494                }
 495
 496                if (priv->rx_pause_en)
 497                        tempval |= MACCFG1_RX_FLOW;
 498                gfar_write(&regs->maccfg1, tempval);
 499        }
 500
 501        return 0;
 502}
 503
 504int gfar_set_features(struct net_device *dev, netdev_features_t features)
 505{
 506        netdev_features_t changed = dev->features ^ features;
 507        struct gfar_private *priv = netdev_priv(dev);
 508        int err = 0;
 509
 510        if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
 511                         NETIF_F_RXCSUM)))
 512                return 0;
 513
 514        while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
 515                cpu_relax();
 516
 517        dev->features = features;
 518
 519        if (dev->flags & IFF_UP) {
 520                /* Now we take down the rings to rebuild them */
 521                stop_gfar(dev);
 522                err = startup_gfar(dev);
 523        } else {
 524                gfar_mac_reset(priv);
 525        }
 526
 527        clear_bit_unlock(GFAR_RESETTING, &priv->state);
 528
 529        return err;
 530}
 531
 532static uint32_t gfar_get_msglevel(struct net_device *dev)
 533{
 534        struct gfar_private *priv = netdev_priv(dev);
 535
 536        return priv->msg_enable;
 537}
 538
 539static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
 540{
 541        struct gfar_private *priv = netdev_priv(dev);
 542
 543        priv->msg_enable = data;
 544}
 545
 546#ifdef CONFIG_PM
 547static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 548{
 549        struct gfar_private *priv = netdev_priv(dev);
 550
 551        wol->supported = 0;
 552        wol->wolopts = 0;
 553
 554        if (priv->wol_supported & GFAR_WOL_MAGIC)
 555                wol->supported |= WAKE_MAGIC;
 556
 557        if (priv->wol_supported & GFAR_WOL_FILER_UCAST)
 558                wol->supported |= WAKE_UCAST;
 559
 560        if (priv->wol_opts & GFAR_WOL_MAGIC)
 561                wol->wolopts |= WAKE_MAGIC;
 562
 563        if (priv->wol_opts & GFAR_WOL_FILER_UCAST)
 564                wol->wolopts |= WAKE_UCAST;
 565}
 566
 567static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 568{
 569        struct gfar_private *priv = netdev_priv(dev);
 570        u16 wol_opts = 0;
 571        int err;
 572
 573        if (!priv->wol_supported && wol->wolopts)
 574                return -EINVAL;
 575
 576        if (wol->wolopts & ~(WAKE_MAGIC | WAKE_UCAST))
 577                return -EINVAL;
 578
 579        if (wol->wolopts & WAKE_MAGIC) {
 580                wol_opts |= GFAR_WOL_MAGIC;
 581        } else {
 582                if (wol->wolopts & WAKE_UCAST)
 583                        wol_opts |= GFAR_WOL_FILER_UCAST;
 584        }
 585
 586        wol_opts &= priv->wol_supported;
 587        priv->wol_opts = 0;
 588
 589        err = device_set_wakeup_enable(priv->dev, wol_opts);
 590        if (err)
 591                return err;
 592
 593        priv->wol_opts = wol_opts;
 594
 595        return 0;
 596}
 597#endif
 598
 599static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
 600{
 601        u32 fcr = 0x0, fpr = FPR_FILER_MASK;
 602
 603        if (ethflow & RXH_L2DA) {
 604                fcr = RQFCR_PID_DAH | RQFCR_CMP_NOMATCH |
 605                      RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
 606                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 607                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 608                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 609                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 610
 611                fcr = RQFCR_PID_DAL | RQFCR_CMP_NOMATCH |
 612                      RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
 613                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 614                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 615                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 616                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 617        }
 618
 619        if (ethflow & RXH_VLAN) {
 620                fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 621                      RQFCR_AND | RQFCR_HASHTBL_0;
 622                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 623                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 624                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 625                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 626        }
 627
 628        if (ethflow & RXH_IP_SRC) {
 629                fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 630                      RQFCR_AND | RQFCR_HASHTBL_0;
 631                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 632                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 633                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 634                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 635        }
 636
 637        if (ethflow & (RXH_IP_DST)) {
 638                fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 639                      RQFCR_AND | RQFCR_HASHTBL_0;
 640                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 641                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 642                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 643                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 644        }
 645
 646        if (ethflow & RXH_L3_PROTO) {
 647                fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 648                      RQFCR_AND | RQFCR_HASHTBL_0;
 649                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 650                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 651                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 652                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 653        }
 654
 655        if (ethflow & RXH_L4_B_0_1) {
 656                fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 657                      RQFCR_AND | RQFCR_HASHTBL_0;
 658                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 659                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 660                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 661                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 662        }
 663
 664        if (ethflow & RXH_L4_B_2_3) {
 665                fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
 666                      RQFCR_AND | RQFCR_HASHTBL_0;
 667                priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
 668                priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
 669                gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
 670                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 671        }
 672}
 673
 674static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
 675                                       u64 class)
 676{
 677        unsigned int cmp_rqfpr;
 678        unsigned int *local_rqfpr;
 679        unsigned int *local_rqfcr;
 680        int i = 0x0, k = 0x0;
 681        int j = MAX_FILER_IDX, l = 0x0;
 682        int ret = 1;
 683
 684        local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
 685                                    GFP_KERNEL);
 686        local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
 687                                    GFP_KERNEL);
 688        if (!local_rqfpr || !local_rqfcr) {
 689                ret = 0;
 690                goto err;
 691        }
 692
 693        switch (class) {
 694        case TCP_V4_FLOW:
 695                cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
 696                break;
 697        case UDP_V4_FLOW:
 698                cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
 699                break;
 700        case TCP_V6_FLOW:
 701                cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
 702                break;
 703        case UDP_V6_FLOW:
 704                cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
 705                break;
 706        default:
 707                netdev_err(priv->ndev,
 708                           "Right now this class is not supported\n");
 709                ret = 0;
 710                goto err;
 711        }
 712
 713        for (i = 0; i < MAX_FILER_IDX + 1; i++) {
 714                local_rqfpr[j] = priv->ftp_rqfpr[i];
 715                local_rqfcr[j] = priv->ftp_rqfcr[i];
 716                j--;
 717                if ((priv->ftp_rqfcr[i] ==
 718                     (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
 719                    (priv->ftp_rqfpr[i] == cmp_rqfpr))
 720                        break;
 721        }
 722
 723        if (i == MAX_FILER_IDX + 1) {
 724                netdev_err(priv->ndev,
 725                           "No parse rule found, can't create hash rules\n");
 726                ret = 0;
 727                goto err;
 728        }
 729
 730        /* If a match was found, then it begins the starting of a cluster rule
 731         * if it was already programmed, we need to overwrite these rules
 732         */
 733        for (l = i+1; l < MAX_FILER_IDX; l++) {
 734                if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
 735                    !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
 736                        priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
 737                                             RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
 738                        priv->ftp_rqfpr[l] = FPR_FILER_MASK;
 739                        gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
 740                                         priv->ftp_rqfpr[l]);
 741                        break;
 742                }
 743
 744                if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
 745                        (priv->ftp_rqfcr[l] & RQFCR_AND))
 746                        continue;
 747                else {
 748                        local_rqfpr[j] = priv->ftp_rqfpr[l];
 749                        local_rqfcr[j] = priv->ftp_rqfcr[l];
 750                        j--;
 751                }
 752        }
 753
 754        priv->cur_filer_idx = l - 1;
 755
 756        /* hash rules */
 757        ethflow_to_filer_rules(priv, ethflow);
 758
 759        /* Write back the popped out rules again */
 760        for (k = j+1; k < MAX_FILER_IDX; k++) {
 761                priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
 762                priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
 763                gfar_write_filer(priv, priv->cur_filer_idx,
 764                                 local_rqfcr[k], local_rqfpr[k]);
 765                if (!priv->cur_filer_idx)
 766                        break;
 767                priv->cur_filer_idx = priv->cur_filer_idx - 1;
 768        }
 769
 770err:
 771        kfree(local_rqfcr);
 772        kfree(local_rqfpr);
 773        return ret;
 774}
 775
 776static int gfar_set_hash_opts(struct gfar_private *priv,
 777                              struct ethtool_rxnfc *cmd)
 778{
 779        /* write the filer rules here */
 780        if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
 781                return -EINVAL;
 782
 783        return 0;
 784}
 785
 786static int gfar_check_filer_hardware(struct gfar_private *priv)
 787{
 788        struct gfar __iomem *regs = priv->gfargrp[0].regs;
 789        u32 i;
 790
 791        /* Check if we are in FIFO mode */
 792        i = gfar_read(&regs->ecntrl);
 793        i &= ECNTRL_FIFM;
 794        if (i == ECNTRL_FIFM) {
 795                netdev_notice(priv->ndev, "Interface in FIFO mode\n");
 796                i = gfar_read(&regs->rctrl);
 797                i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
 798                if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
 799                        netdev_info(priv->ndev,
 800                                    "Receive Queue Filtering enabled\n");
 801                } else {
 802                        netdev_warn(priv->ndev,
 803                                    "Receive Queue Filtering disabled\n");
 804                        return -EOPNOTSUPP;
 805                }
 806        }
 807        /* Or in standard mode */
 808        else {
 809                i = gfar_read(&regs->rctrl);
 810                i &= RCTRL_PRSDEP_MASK;
 811                if (i == RCTRL_PRSDEP_MASK) {
 812                        netdev_info(priv->ndev,
 813                                    "Receive Queue Filtering enabled\n");
 814                } else {
 815                        netdev_warn(priv->ndev,
 816                                    "Receive Queue Filtering disabled\n");
 817                        return -EOPNOTSUPP;
 818                }
 819        }
 820
 821        /* Sets the properties for arbitrary filer rule
 822         * to the first 4 Layer 4 Bytes
 823         */
 824        gfar_write(&regs->rbifx, 0xC0C1C2C3);
 825        return 0;
 826}
 827
 828/* Write a mask to filer cache */
 829static void gfar_set_mask(u32 mask, struct filer_table *tab)
 830{
 831        tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
 832        tab->fe[tab->index].prop = mask;
 833        tab->index++;
 834}
 835
 836/* Sets parse bits (e.g. IP or TCP) */
 837static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
 838{
 839        gfar_set_mask(mask, tab);
 840        tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
 841                                   RQFCR_AND;
 842        tab->fe[tab->index].prop = value;
 843        tab->index++;
 844}
 845
 846static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
 847                                       struct filer_table *tab)
 848{
 849        gfar_set_mask(mask, tab);
 850        tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
 851        tab->fe[tab->index].prop = value;
 852        tab->index++;
 853}
 854
 855/* For setting a tuple of value and mask of type flag
 856 * Example:
 857 * IP-Src = 10.0.0.0/255.0.0.0
 858 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
 859 *
 860 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
 861 * For a don't care mask it gives us a 0
 862 *
 863 * The check if don't care and the mask adjustment if mask=0 is done for VLAN
 864 * and MAC stuff on an upper level (due to missing information on this level).
 865 * For these guys we can discard them if they are value=0 and mask=0.
 866 *
 867 * Further the all masks are one-padded for better hardware efficiency.
 868 */
 869static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
 870                               struct filer_table *tab)
 871{
 872        switch (flag) {
 873                /* 3bit */
 874        case RQFCR_PID_PRI:
 875                if (!(value | mask))
 876                        return;
 877                mask |= RQFCR_PID_PRI_MASK;
 878                break;
 879                /* 8bit */
 880        case RQFCR_PID_L4P:
 881        case RQFCR_PID_TOS:
 882                if (!~(mask | RQFCR_PID_L4P_MASK))
 883                        return;
 884                if (!mask)
 885                        mask = ~0;
 886                else
 887                        mask |= RQFCR_PID_L4P_MASK;
 888                break;
 889                /* 12bit */
 890        case RQFCR_PID_VID:
 891                if (!(value | mask))
 892                        return;
 893                mask |= RQFCR_PID_VID_MASK;
 894                break;
 895                /* 16bit */
 896        case RQFCR_PID_DPT:
 897        case RQFCR_PID_SPT:
 898        case RQFCR_PID_ETY:
 899                if (!~(mask | RQFCR_PID_PORT_MASK))
 900                        return;
 901                if (!mask)
 902                        mask = ~0;
 903                else
 904                        mask |= RQFCR_PID_PORT_MASK;
 905                break;
 906                /* 24bit */
 907        case RQFCR_PID_DAH:
 908        case RQFCR_PID_DAL:
 909        case RQFCR_PID_SAH:
 910        case RQFCR_PID_SAL:
 911                if (!(value | mask))
 912                        return;
 913                mask |= RQFCR_PID_MAC_MASK;
 914                break;
 915                /* for all real 32bit masks */
 916        default:
 917                if (!~mask)
 918                        return;
 919                if (!mask)
 920                        mask = ~0;
 921                break;
 922        }
 923        gfar_set_general_attribute(value, mask, flag, tab);
 924}
 925
 926/* Translates value and mask for UDP, TCP or SCTP */
 927static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
 928                              struct ethtool_tcpip4_spec *mask,
 929                              struct filer_table *tab)
 930{
 931        gfar_set_attribute(be32_to_cpu(value->ip4src),
 932                           be32_to_cpu(mask->ip4src),
 933                           RQFCR_PID_SIA, tab);
 934        gfar_set_attribute(be32_to_cpu(value->ip4dst),
 935                           be32_to_cpu(mask->ip4dst),
 936                           RQFCR_PID_DIA, tab);
 937        gfar_set_attribute(be16_to_cpu(value->pdst),
 938                           be16_to_cpu(mask->pdst),
 939                           RQFCR_PID_DPT, tab);
 940        gfar_set_attribute(be16_to_cpu(value->psrc),
 941                           be16_to_cpu(mask->psrc),
 942                           RQFCR_PID_SPT, tab);
 943        gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
 944}
 945
 946/* Translates value and mask for RAW-IP4 */
 947static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
 948                             struct ethtool_usrip4_spec *mask,
 949                             struct filer_table *tab)
 950{
 951        gfar_set_attribute(be32_to_cpu(value->ip4src),
 952                           be32_to_cpu(mask->ip4src),
 953                           RQFCR_PID_SIA, tab);
 954        gfar_set_attribute(be32_to_cpu(value->ip4dst),
 955                           be32_to_cpu(mask->ip4dst),
 956                           RQFCR_PID_DIA, tab);
 957        gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
 958        gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
 959        gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
 960                           be32_to_cpu(mask->l4_4_bytes),
 961                           RQFCR_PID_ARB, tab);
 962
 963}
 964
 965/* Translates value and mask for ETHER spec */
 966static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
 967                           struct filer_table *tab)
 968{
 969        u32 upper_temp_mask = 0;
 970        u32 lower_temp_mask = 0;
 971
 972        /* Source address */
 973        if (!is_broadcast_ether_addr(mask->h_source)) {
 974                if (is_zero_ether_addr(mask->h_source)) {
 975                        upper_temp_mask = 0xFFFFFFFF;
 976                        lower_temp_mask = 0xFFFFFFFF;
 977                } else {
 978                        upper_temp_mask = mask->h_source[0] << 16 |
 979                                          mask->h_source[1] << 8  |
 980                                          mask->h_source[2];
 981                        lower_temp_mask = mask->h_source[3] << 16 |
 982                                          mask->h_source[4] << 8  |
 983                                          mask->h_source[5];
 984                }
 985                /* Upper 24bit */
 986                gfar_set_attribute(value->h_source[0] << 16 |
 987                                   value->h_source[1] << 8  |
 988                                   value->h_source[2],
 989                                   upper_temp_mask, RQFCR_PID_SAH, tab);
 990                /* And the same for the lower part */
 991                gfar_set_attribute(value->h_source[3] << 16 |
 992                                   value->h_source[4] << 8  |
 993                                   value->h_source[5],
 994                                   lower_temp_mask, RQFCR_PID_SAL, tab);
 995        }
 996        /* Destination address */
 997        if (!is_broadcast_ether_addr(mask->h_dest)) {
 998                /* Special for destination is limited broadcast */
 999                if ((is_broadcast_ether_addr(value->h_dest) &&
1000                    is_zero_ether_addr(mask->h_dest))) {
1001                        gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1002                } else {
1003                        if (is_zero_ether_addr(mask->h_dest)) {
1004                                upper_temp_mask = 0xFFFFFFFF;
1005                                lower_temp_mask = 0xFFFFFFFF;
1006                        } else {
1007                                upper_temp_mask = mask->h_dest[0] << 16 |
1008                                                  mask->h_dest[1] << 8  |
1009                                                  mask->h_dest[2];
1010                                lower_temp_mask = mask->h_dest[3] << 16 |
1011                                                  mask->h_dest[4] << 8  |
1012                                                  mask->h_dest[5];
1013                        }
1014
1015                        /* Upper 24bit */
1016                        gfar_set_attribute(value->h_dest[0] << 16 |
1017                                           value->h_dest[1] << 8  |
1018                                           value->h_dest[2],
1019                                           upper_temp_mask, RQFCR_PID_DAH, tab);
1020                        /* And the same for the lower part */
1021                        gfar_set_attribute(value->h_dest[3] << 16 |
1022                                           value->h_dest[4] << 8  |
1023                                           value->h_dest[5],
1024                                           lower_temp_mask, RQFCR_PID_DAL, tab);
1025                }
1026        }
1027
1028        gfar_set_attribute(be16_to_cpu(value->h_proto),
1029                           be16_to_cpu(mask->h_proto),
1030                           RQFCR_PID_ETY, tab);
1031}
1032
1033static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
1034{
1035        return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
1036}
1037
1038static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
1039{
1040        return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
1041}
1042
1043static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
1044{
1045        return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
1046}
1047
1048static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
1049{
1050        return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
1051}
1052
1053static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
1054{
1055        return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1056                VLAN_PRIO_SHIFT;
1057}
1058
1059static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
1060{
1061        return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1062                VLAN_PRIO_SHIFT;
1063}
1064
1065/* Convert a rule to binary filter format of gianfar */
1066static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1067                                 struct filer_table *tab)
1068{
1069        u32 vlan = 0, vlan_mask = 0;
1070        u32 id = 0, id_mask = 0;
1071        u32 cfi = 0, cfi_mask = 0;
1072        u32 prio = 0, prio_mask = 0;
1073        u32 old_index = tab->index;
1074
1075        /* Check if vlan is wanted */
1076        if ((rule->flow_type & FLOW_EXT) &&
1077            (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
1078                if (!rule->m_ext.vlan_tci)
1079                        rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
1080
1081                vlan = RQFPR_VLN;
1082                vlan_mask = RQFPR_VLN;
1083
1084                /* Separate the fields */
1085                id = vlan_tci_vid(rule);
1086                id_mask = vlan_tci_vidm(rule);
1087                cfi = vlan_tci_cfi(rule);
1088                cfi_mask = vlan_tci_cfim(rule);
1089                prio = vlan_tci_prio(rule);
1090                prio_mask = vlan_tci_priom(rule);
1091
1092                if (cfi_mask) {
1093                        if (cfi)
1094                                vlan |= RQFPR_CFI;
1095                        vlan_mask |= RQFPR_CFI;
1096                }
1097        }
1098
1099        switch (rule->flow_type & ~FLOW_EXT) {
1100        case TCP_V4_FLOW:
1101                gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
1102                                    RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
1103                gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
1104                                  &rule->m_u.tcp_ip4_spec, tab);
1105                break;
1106        case UDP_V4_FLOW:
1107                gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
1108                                    RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
1109                gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
1110                                  &rule->m_u.udp_ip4_spec, tab);
1111                break;
1112        case SCTP_V4_FLOW:
1113                gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1114                                    tab);
1115                gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
1116                gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1117                                  (struct ethtool_tcpip4_spec *)&rule->m_u,
1118                                  tab);
1119                break;
1120        case IP_USER_FLOW:
1121                gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1122                                    tab);
1123                gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
1124                                 (struct ethtool_usrip4_spec *) &rule->m_u,
1125                                 tab);
1126                break;
1127        case ETHER_FLOW:
1128                if (vlan)
1129                        gfar_set_parse_bits(vlan, vlan_mask, tab);
1130                gfar_set_ether((struct ethhdr *) &rule->h_u,
1131                               (struct ethhdr *) &rule->m_u, tab);
1132                break;
1133        default:
1134                return -1;
1135        }
1136
1137        /* Set the vlan attributes in the end */
1138        if (vlan) {
1139                gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
1140                gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
1141        }
1142
1143        /* If there has been nothing written till now, it must be a default */
1144        if (tab->index == old_index) {
1145                gfar_set_mask(0xFFFFFFFF, tab);
1146                tab->fe[tab->index].ctrl = 0x20;
1147                tab->fe[tab->index].prop = 0x0;
1148                tab->index++;
1149        }
1150
1151        /* Remove last AND */
1152        tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
1153
1154        /* Specify which queue to use or to drop */
1155        if (rule->ring_cookie == RX_CLS_FLOW_DISC)
1156                tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
1157        else
1158                tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
1159
1160        /* Only big enough entries can be clustered */
1161        if (tab->index > (old_index + 2)) {
1162                tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
1163                tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1164        }
1165
1166        /* In rare cases the cache can be full while there is
1167         * free space in hw
1168         */
1169        if (tab->index > MAX_FILER_CACHE_IDX - 1)
1170                return -EBUSY;
1171
1172        return 0;
1173}
1174
1175/* Write the bit-pattern from software's buffer to hardware registers */
1176static int gfar_write_filer_table(struct gfar_private *priv,
1177                                  struct filer_table *tab)
1178{
1179        u32 i = 0;
1180        if (tab->index > MAX_FILER_IDX - 1)
1181                return -EBUSY;
1182
1183        /* Fill regular entries */
1184        for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
1185                gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1186        /* Fill the rest with fall-troughs */
1187        for (; i < MAX_FILER_IDX; i++)
1188                gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1189        /* Last entry must be default accept
1190         * because that's what people expect
1191         */
1192        gfar_write_filer(priv, i, 0x20, 0x0);
1193
1194        return 0;
1195}
1196
1197static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
1198                                 struct gfar_private *priv)
1199{
1200
1201        if (flow->flow_type & FLOW_EXT) {
1202                if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1203                        netdev_warn(priv->ndev,
1204                                    "User-specific data not supported!\n");
1205                if (~flow->m_ext.vlan_etype)
1206                        netdev_warn(priv->ndev,
1207                                    "VLAN-etype not supported!\n");
1208        }
1209        if (flow->flow_type == IP_USER_FLOW)
1210                if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1211                        netdev_warn(priv->ndev,
1212                                    "IP-Version differing from IPv4 not supported!\n");
1213
1214        return 0;
1215}
1216
1217static int gfar_process_filer_changes(struct gfar_private *priv)
1218{
1219        struct ethtool_flow_spec_container *j;
1220        struct filer_table *tab;
1221        s32 ret = 0;
1222
1223        /* So index is set to zero, too! */
1224        tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1225        if (tab == NULL)
1226                return -ENOMEM;
1227
1228        /* Now convert the existing filer data from flow_spec into
1229         * filer tables binary format
1230         */
1231        list_for_each_entry(j, &priv->rx_list.list, list) {
1232                ret = gfar_convert_to_filer(&j->fs, tab);
1233                if (ret == -EBUSY) {
1234                        netdev_err(priv->ndev,
1235                                   "Rule not added: No free space!\n");
1236                        goto end;
1237                }
1238                if (ret == -1) {
1239                        netdev_err(priv->ndev,
1240                                   "Rule not added: Unsupported Flow-type!\n");
1241                        goto end;
1242                }
1243        }
1244
1245        /* Write everything to hardware */
1246        ret = gfar_write_filer_table(priv, tab);
1247        if (ret == -EBUSY) {
1248                netdev_err(priv->ndev, "Rule not added: No free space!\n");
1249                goto end;
1250        }
1251
1252end:
1253        kfree(tab);
1254        return ret;
1255}
1256
1257static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1258{
1259        u32 i = 0;
1260
1261        for (i = 0; i < sizeof(flow->m_u); i++)
1262                flow->m_u.hdata[i] ^= 0xFF;
1263
1264        flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
1265        flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
1266        flow->m_ext.data[0] ^= cpu_to_be32(~0);
1267        flow->m_ext.data[1] ^= cpu_to_be32(~0);
1268}
1269
1270static int gfar_add_cls(struct gfar_private *priv,
1271                        struct ethtool_rx_flow_spec *flow)
1272{
1273        struct ethtool_flow_spec_container *temp, *comp;
1274        int ret = 0;
1275
1276        temp = kmalloc(sizeof(*temp), GFP_KERNEL);
1277        if (temp == NULL)
1278                return -ENOMEM;
1279        memcpy(&temp->fs, flow, sizeof(temp->fs));
1280
1281        gfar_invert_masks(&temp->fs);
1282        ret = gfar_check_capability(&temp->fs, priv);
1283        if (ret)
1284                goto clean_mem;
1285        /* Link in the new element at the right @location */
1286        if (list_empty(&priv->rx_list.list)) {
1287                ret = gfar_check_filer_hardware(priv);
1288                if (ret != 0)
1289                        goto clean_mem;
1290                list_add(&temp->list, &priv->rx_list.list);
1291                goto process;
1292        } else {
1293                list_for_each_entry(comp, &priv->rx_list.list, list) {
1294                        if (comp->fs.location > flow->location) {
1295                                list_add_tail(&temp->list, &comp->list);
1296                                goto process;
1297                        }
1298                        if (comp->fs.location == flow->location) {
1299                                netdev_err(priv->ndev,
1300                                           "Rule not added: ID %d not free!\n",
1301                                           flow->location);
1302                                ret = -EBUSY;
1303                                goto clean_mem;
1304                        }
1305                }
1306                list_add_tail(&temp->list, &priv->rx_list.list);
1307        }
1308
1309process:
1310        priv->rx_list.count++;
1311        ret = gfar_process_filer_changes(priv);
1312        if (ret)
1313                goto clean_list;
1314        return ret;
1315
1316clean_list:
1317        priv->rx_list.count--;
1318        list_del(&temp->list);
1319clean_mem:
1320        kfree(temp);
1321        return ret;
1322}
1323
1324static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1325{
1326        struct ethtool_flow_spec_container *comp;
1327        u32 ret = -EINVAL;
1328
1329        if (list_empty(&priv->rx_list.list))
1330                return ret;
1331
1332        list_for_each_entry(comp, &priv->rx_list.list, list) {
1333                if (comp->fs.location == loc) {
1334                        list_del(&comp->list);
1335                        kfree(comp);
1336                        priv->rx_list.count--;
1337                        gfar_process_filer_changes(priv);
1338                        ret = 0;
1339                        break;
1340                }
1341        }
1342
1343        return ret;
1344}
1345
1346static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1347{
1348        struct ethtool_flow_spec_container *comp;
1349        u32 ret = -EINVAL;
1350
1351        list_for_each_entry(comp, &priv->rx_list.list, list) {
1352                if (comp->fs.location == cmd->fs.location) {
1353                        memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
1354                        gfar_invert_masks(&cmd->fs);
1355                        ret = 0;
1356                        break;
1357                }
1358        }
1359
1360        return ret;
1361}
1362
1363static int gfar_get_cls_all(struct gfar_private *priv,
1364                            struct ethtool_rxnfc *cmd, u32 *rule_locs)
1365{
1366        struct ethtool_flow_spec_container *comp;
1367        u32 i = 0;
1368
1369        list_for_each_entry(comp, &priv->rx_list.list, list) {
1370                if (i == cmd->rule_cnt)
1371                        return -EMSGSIZE;
1372                rule_locs[i] = comp->fs.location;
1373                i++;
1374        }
1375
1376        cmd->data = MAX_FILER_IDX;
1377        cmd->rule_cnt = i;
1378
1379        return 0;
1380}
1381
1382static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1383{
1384        struct gfar_private *priv = netdev_priv(dev);
1385        int ret = 0;
1386
1387        if (test_bit(GFAR_RESETTING, &priv->state))
1388                return -EBUSY;
1389
1390        mutex_lock(&priv->rx_queue_access);
1391
1392        switch (cmd->cmd) {
1393        case ETHTOOL_SRXFH:
1394                ret = gfar_set_hash_opts(priv, cmd);
1395                break;
1396        case ETHTOOL_SRXCLSRLINS:
1397                if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
1398                     cmd->fs.ring_cookie >= priv->num_rx_queues) ||
1399                    cmd->fs.location >= MAX_FILER_IDX) {
1400                        ret = -EINVAL;
1401                        break;
1402                }
1403                ret = gfar_add_cls(priv, &cmd->fs);
1404                break;
1405        case ETHTOOL_SRXCLSRLDEL:
1406                ret = gfar_del_cls(priv, cmd->fs.location);
1407                break;
1408        default:
1409                ret = -EINVAL;
1410        }
1411
1412        mutex_unlock(&priv->rx_queue_access);
1413
1414        return ret;
1415}
1416
1417static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1418                        u32 *rule_locs)
1419{
1420        struct gfar_private *priv = netdev_priv(dev);
1421        int ret = 0;
1422
1423        switch (cmd->cmd) {
1424        case ETHTOOL_GRXRINGS:
1425                cmd->data = priv->num_rx_queues;
1426                break;
1427        case ETHTOOL_GRXCLSRLCNT:
1428                cmd->rule_cnt = priv->rx_list.count;
1429                break;
1430        case ETHTOOL_GRXCLSRULE:
1431                ret = gfar_get_cls(priv, cmd);
1432                break;
1433        case ETHTOOL_GRXCLSRLALL:
1434                ret = gfar_get_cls_all(priv, cmd, rule_locs);
1435                break;
1436        default:
1437                ret = -EINVAL;
1438                break;
1439        }
1440
1441        return ret;
1442}
1443
1444static int gfar_get_ts_info(struct net_device *dev,
1445                            struct ethtool_ts_info *info)
1446{
1447        struct gfar_private *priv = netdev_priv(dev);
1448        struct platform_device *ptp_dev;
1449        struct device_node *ptp_node;
1450        struct ptp_qoriq *ptp = NULL;
1451
1452        info->phc_index = -1;
1453
1454        if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
1455                info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1456                                        SOF_TIMESTAMPING_SOFTWARE;
1457                return 0;
1458        }
1459
1460        ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
1461        if (ptp_node) {
1462                ptp_dev = of_find_device_by_node(ptp_node);
1463                if (ptp_dev)
1464                        ptp = platform_get_drvdata(ptp_dev);
1465        }
1466
1467        if (ptp)
1468                info->phc_index = ptp->phc_index;
1469
1470        info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1471                                SOF_TIMESTAMPING_RX_HARDWARE |
1472                                SOF_TIMESTAMPING_RAW_HARDWARE;
1473        info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1474                         (1 << HWTSTAMP_TX_ON);
1475        info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1476                           (1 << HWTSTAMP_FILTER_ALL);
1477        return 0;
1478}
1479
1480const struct ethtool_ops gfar_ethtool_ops = {
1481        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1482                                     ETHTOOL_COALESCE_MAX_FRAMES,
1483        .get_drvinfo = gfar_gdrvinfo,
1484        .get_regs_len = gfar_reglen,
1485        .get_regs = gfar_get_regs,
1486        .get_link = ethtool_op_get_link,
1487        .get_coalesce = gfar_gcoalesce,
1488        .set_coalesce = gfar_scoalesce,
1489        .get_ringparam = gfar_gringparam,
1490        .set_ringparam = gfar_sringparam,
1491        .get_pauseparam = gfar_gpauseparam,
1492        .set_pauseparam = gfar_spauseparam,
1493        .get_strings = gfar_gstrings,
1494        .get_sset_count = gfar_sset_count,
1495        .get_ethtool_stats = gfar_fill_stats,
1496        .get_msglevel = gfar_get_msglevel,
1497        .set_msglevel = gfar_set_msglevel,
1498#ifdef CONFIG_PM
1499        .get_wol = gfar_get_wol,
1500        .set_wol = gfar_set_wol,
1501#endif
1502        .set_rxnfc = gfar_set_nfc,
1503        .get_rxnfc = gfar_get_nfc,
1504        .get_ts_info = gfar_get_ts_info,
1505        .get_link_ksettings = phy_ethtool_get_link_ksettings,
1506        .set_link_ksettings = phy_ethtool_set_link_ksettings,
1507};
1508