linux/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/* Copyright 2014-2016 Freescale Semiconductor Inc.
   3 * Copyright 2016 NXP
   4 */
   5
   6#include <linux/net_tstamp.h>
   7#include <linux/nospec.h>
   8
   9#include "dpni.h"       /* DPNI_LINK_OPT_* */
  10#include "dpaa2-eth.h"
  11
  12/* To be kept in sync with DPNI statistics */
  13static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
  14        "[hw] rx frames",
  15        "[hw] rx bytes",
  16        "[hw] rx mcast frames",
  17        "[hw] rx mcast bytes",
  18        "[hw] rx bcast frames",
  19        "[hw] rx bcast bytes",
  20        "[hw] tx frames",
  21        "[hw] tx bytes",
  22        "[hw] tx mcast frames",
  23        "[hw] tx mcast bytes",
  24        "[hw] tx bcast frames",
  25        "[hw] tx bcast bytes",
  26        "[hw] rx filtered frames",
  27        "[hw] rx discarded frames",
  28        "[hw] rx nobuffer discards",
  29        "[hw] tx discarded frames",
  30        "[hw] tx confirmed frames",
  31        "[hw] tx dequeued bytes",
  32        "[hw] tx dequeued frames",
  33        "[hw] tx rejected bytes",
  34        "[hw] tx rejected frames",
  35        "[hw] tx pending frames",
  36};
  37
  38#define DPAA2_ETH_NUM_STATS     ARRAY_SIZE(dpaa2_ethtool_stats)
  39
  40static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
  41        /* per-cpu stats */
  42        "[drv] tx conf frames",
  43        "[drv] tx conf bytes",
  44        "[drv] tx sg frames",
  45        "[drv] tx sg bytes",
  46        "[drv] tx realloc frames",
  47        "[drv] rx sg frames",
  48        "[drv] rx sg bytes",
  49        "[drv] enqueue portal busy",
  50        /* Channel stats */
  51        "[drv] dequeue portal busy",
  52        "[drv] channel pull errors",
  53        "[drv] cdan",
  54        "[drv] xdp drop",
  55        "[drv] xdp tx",
  56        "[drv] xdp tx errors",
  57        "[drv] xdp redirect",
  58        /* FQ stats */
  59        "[qbman] rx pending frames",
  60        "[qbman] rx pending bytes",
  61        "[qbman] tx conf pending frames",
  62        "[qbman] tx conf pending bytes",
  63        "[qbman] buffer count",
  64};
  65
  66#define DPAA2_ETH_NUM_EXTRA_STATS       ARRAY_SIZE(dpaa2_ethtool_extras)
  67
  68static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
  69                                  struct ethtool_drvinfo *drvinfo)
  70{
  71        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  72
  73        strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
  74
  75        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  76                 "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
  77
  78        strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
  79                sizeof(drvinfo->bus_info));
  80}
  81
  82static int dpaa2_eth_nway_reset(struct net_device *net_dev)
  83{
  84        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  85
  86        if (priv->mac)
  87                return phylink_ethtool_nway_reset(priv->mac->phylink);
  88
  89        return -EOPNOTSUPP;
  90}
  91
  92static int
  93dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
  94                             struct ethtool_link_ksettings *link_settings)
  95{
  96        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
  97
  98        if (priv->mac)
  99                return phylink_ethtool_ksettings_get(priv->mac->phylink,
 100                                                     link_settings);
 101
 102        link_settings->base.autoneg = AUTONEG_DISABLE;
 103        if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
 104                link_settings->base.duplex = DUPLEX_FULL;
 105        link_settings->base.speed = priv->link_state.rate;
 106
 107        return 0;
 108}
 109
 110static int
 111dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
 112                             const struct ethtool_link_ksettings *link_settings)
 113{
 114        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 115
 116        if (!priv->mac)
 117                return -ENOTSUPP;
 118
 119        return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
 120}
 121
 122static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
 123                                     struct ethtool_pauseparam *pause)
 124{
 125        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 126        u64 link_options = priv->link_state.options;
 127
 128        if (priv->mac) {
 129                phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
 130                return;
 131        }
 132
 133        pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
 134        pause->tx_pause = pause->rx_pause ^
 135                          !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
 136        pause->autoneg = AUTONEG_DISABLE;
 137}
 138
 139static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
 140                                    struct ethtool_pauseparam *pause)
 141{
 142        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 143        struct dpni_link_cfg cfg = {0};
 144        int err;
 145
 146        if (!dpaa2_eth_has_pause_support(priv)) {
 147                netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
 148                            DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
 149                return -EOPNOTSUPP;
 150        }
 151
 152        if (priv->mac)
 153                return phylink_ethtool_set_pauseparam(priv->mac->phylink,
 154                                                      pause);
 155        if (pause->autoneg)
 156                return -EOPNOTSUPP;
 157
 158        cfg.rate = priv->link_state.rate;
 159        cfg.options = priv->link_state.options;
 160        if (pause->rx_pause)
 161                cfg.options |= DPNI_LINK_OPT_PAUSE;
 162        else
 163                cfg.options &= ~DPNI_LINK_OPT_PAUSE;
 164        if (!!pause->rx_pause ^ !!pause->tx_pause)
 165                cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
 166        else
 167                cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
 168
 169        if (cfg.options == priv->link_state.options)
 170                return 0;
 171
 172        err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
 173        if (err) {
 174                netdev_err(net_dev, "dpni_set_link_state failed\n");
 175                return err;
 176        }
 177
 178        priv->link_state.options = cfg.options;
 179
 180        return 0;
 181}
 182
 183static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
 184                                  u8 *data)
 185{
 186        struct dpaa2_eth_priv *priv = netdev_priv(netdev);
 187        u8 *p = data;
 188        int i;
 189
 190        switch (stringset) {
 191        case ETH_SS_STATS:
 192                for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
 193                        strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
 194                        p += ETH_GSTRING_LEN;
 195                }
 196                for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
 197                        strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
 198                        p += ETH_GSTRING_LEN;
 199                }
 200                if (priv->mac)
 201                        dpaa2_mac_get_strings(p);
 202                break;
 203        }
 204}
 205
 206static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
 207{
 208        int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
 209        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 210
 211        switch (sset) {
 212        case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
 213                if (priv->mac)
 214                        num_ss_stats += dpaa2_mac_get_sset_count();
 215                return num_ss_stats;
 216        default:
 217                return -EOPNOTSUPP;
 218        }
 219}
 220
 221/** Fill in hardware counters, as returned by MC.
 222 */
 223static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
 224                                        struct ethtool_stats *stats,
 225                                        u64 *data)
 226{
 227        int i = 0;
 228        int j, k, err;
 229        int num_cnt;
 230        union dpni_statistics dpni_stats;
 231        u32 fcnt, bcnt;
 232        u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
 233        u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
 234        u32 buf_cnt;
 235        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 236        struct dpaa2_eth_drv_stats *extras;
 237        struct dpaa2_eth_ch_stats *ch_stats;
 238        int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
 239                sizeof(dpni_stats.page_0),
 240                sizeof(dpni_stats.page_1),
 241                sizeof(dpni_stats.page_2),
 242                sizeof(dpni_stats.page_3),
 243                sizeof(dpni_stats.page_4),
 244                sizeof(dpni_stats.page_5),
 245                sizeof(dpni_stats.page_6),
 246        };
 247
 248        memset(data, 0,
 249               sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
 250
 251        /* Print standard counters, from DPNI statistics */
 252        for (j = 0; j <= 6; j++) {
 253                /* We're not interested in pages 4 & 5 for now */
 254                if (j == 4 || j == 5)
 255                        continue;
 256                err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
 257                                          j, &dpni_stats);
 258                if (err == -EINVAL)
 259                        /* Older firmware versions don't support all pages */
 260                        memset(&dpni_stats, 0, sizeof(dpni_stats));
 261                else if (err)
 262                        netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
 263
 264                num_cnt = dpni_stats_page_size[j] / sizeof(u64);
 265                for (k = 0; k < num_cnt; k++)
 266                        *(data + i++) = dpni_stats.raw.counter[k];
 267        }
 268
 269        /* Print per-cpu extra stats */
 270        for_each_online_cpu(k) {
 271                extras = per_cpu_ptr(priv->percpu_extras, k);
 272                for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
 273                        *((__u64 *)data + i + j) += *((__u64 *)extras + j);
 274        }
 275        i += j;
 276
 277        /* Per-channel stats */
 278        for (k = 0; k < priv->num_channels; k++) {
 279                ch_stats = &priv->channel[k]->stats;
 280                for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
 281                        *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
 282        }
 283        i += j;
 284
 285        for (j = 0; j < priv->num_fqs; j++) {
 286                /* Print FQ instantaneous counts */
 287                err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
 288                                              &fcnt, &bcnt);
 289                if (err) {
 290                        netdev_warn(net_dev, "FQ query error %d", err);
 291                        return;
 292                }
 293
 294                if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
 295                        fcnt_tx_total += fcnt;
 296                        bcnt_tx_total += bcnt;
 297                } else {
 298                        fcnt_rx_total += fcnt;
 299                        bcnt_rx_total += bcnt;
 300                }
 301        }
 302
 303        *(data + i++) = fcnt_rx_total;
 304        *(data + i++) = bcnt_rx_total;
 305        *(data + i++) = fcnt_tx_total;
 306        *(data + i++) = bcnt_tx_total;
 307
 308        err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
 309        if (err) {
 310                netdev_warn(net_dev, "Buffer count query error %d\n", err);
 311                return;
 312        }
 313        *(data + i++) = buf_cnt;
 314
 315        if (priv->mac)
 316                dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
 317}
 318
 319static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
 320                         void *key, void *mask, u64 *fields)
 321{
 322        int off;
 323
 324        if (eth_mask->h_proto) {
 325                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
 326                *(__be16 *)(key + off) = eth_value->h_proto;
 327                *(__be16 *)(mask + off) = eth_mask->h_proto;
 328                *fields |= DPAA2_ETH_DIST_ETHTYPE;
 329        }
 330
 331        if (!is_zero_ether_addr(eth_mask->h_source)) {
 332                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
 333                ether_addr_copy(key + off, eth_value->h_source);
 334                ether_addr_copy(mask + off, eth_mask->h_source);
 335                *fields |= DPAA2_ETH_DIST_ETHSRC;
 336        }
 337
 338        if (!is_zero_ether_addr(eth_mask->h_dest)) {
 339                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
 340                ether_addr_copy(key + off, eth_value->h_dest);
 341                ether_addr_copy(mask + off, eth_mask->h_dest);
 342                *fields |= DPAA2_ETH_DIST_ETHDST;
 343        }
 344
 345        return 0;
 346}
 347
 348static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
 349                         struct ethtool_usrip4_spec *uip_mask,
 350                         void *key, void *mask, u64 *fields)
 351{
 352        int off;
 353        u32 tmp_value, tmp_mask;
 354
 355        if (uip_mask->tos || uip_mask->ip_ver)
 356                return -EOPNOTSUPP;
 357
 358        if (uip_mask->ip4src) {
 359                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
 360                *(__be32 *)(key + off) = uip_value->ip4src;
 361                *(__be32 *)(mask + off) = uip_mask->ip4src;
 362                *fields |= DPAA2_ETH_DIST_IPSRC;
 363        }
 364
 365        if (uip_mask->ip4dst) {
 366                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
 367                *(__be32 *)(key + off) = uip_value->ip4dst;
 368                *(__be32 *)(mask + off) = uip_mask->ip4dst;
 369                *fields |= DPAA2_ETH_DIST_IPDST;
 370        }
 371
 372        if (uip_mask->proto) {
 373                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
 374                *(u8 *)(key + off) = uip_value->proto;
 375                *(u8 *)(mask + off) = uip_mask->proto;
 376                *fields |= DPAA2_ETH_DIST_IPPROTO;
 377        }
 378
 379        if (uip_mask->l4_4_bytes) {
 380                tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
 381                tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
 382
 383                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
 384                *(__be16 *)(key + off) = htons(tmp_value >> 16);
 385                *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
 386                *fields |= DPAA2_ETH_DIST_L4SRC;
 387
 388                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
 389                *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
 390                *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
 391                *fields |= DPAA2_ETH_DIST_L4DST;
 392        }
 393
 394        /* Only apply the rule for IPv4 frames */
 395        off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
 396        *(__be16 *)(key + off) = htons(ETH_P_IP);
 397        *(__be16 *)(mask + off) = htons(0xFFFF);
 398        *fields |= DPAA2_ETH_DIST_ETHTYPE;
 399
 400        return 0;
 401}
 402
 403static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
 404                        struct ethtool_tcpip4_spec *l4_mask,
 405                        void *key, void *mask, u8 l4_proto, u64 *fields)
 406{
 407        int off;
 408
 409        if (l4_mask->tos)
 410                return -EOPNOTSUPP;
 411
 412        if (l4_mask->ip4src) {
 413                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
 414                *(__be32 *)(key + off) = l4_value->ip4src;
 415                *(__be32 *)(mask + off) = l4_mask->ip4src;
 416                *fields |= DPAA2_ETH_DIST_IPSRC;
 417        }
 418
 419        if (l4_mask->ip4dst) {
 420                off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
 421                *(__be32 *)(key + off) = l4_value->ip4dst;
 422                *(__be32 *)(mask + off) = l4_mask->ip4dst;
 423                *fields |= DPAA2_ETH_DIST_IPDST;
 424        }
 425
 426        if (l4_mask->psrc) {
 427                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
 428                *(__be16 *)(key + off) = l4_value->psrc;
 429                *(__be16 *)(mask + off) = l4_mask->psrc;
 430                *fields |= DPAA2_ETH_DIST_L4SRC;
 431        }
 432
 433        if (l4_mask->pdst) {
 434                off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
 435                *(__be16 *)(key + off) = l4_value->pdst;
 436                *(__be16 *)(mask + off) = l4_mask->pdst;
 437                *fields |= DPAA2_ETH_DIST_L4DST;
 438        }
 439
 440        /* Only apply the rule for IPv4 frames with the specified L4 proto */
 441        off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
 442        *(__be16 *)(key + off) = htons(ETH_P_IP);
 443        *(__be16 *)(mask + off) = htons(0xFFFF);
 444        *fields |= DPAA2_ETH_DIST_ETHTYPE;
 445
 446        off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
 447        *(u8 *)(key + off) = l4_proto;
 448        *(u8 *)(mask + off) = 0xFF;
 449        *fields |= DPAA2_ETH_DIST_IPPROTO;
 450
 451        return 0;
 452}
 453
 454static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
 455                         struct ethtool_flow_ext *ext_mask,
 456                         void *key, void *mask, u64 *fields)
 457{
 458        int off;
 459
 460        if (ext_mask->vlan_etype)
 461                return -EOPNOTSUPP;
 462
 463        if (ext_mask->vlan_tci) {
 464                off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
 465                *(__be16 *)(key + off) = ext_value->vlan_tci;
 466                *(__be16 *)(mask + off) = ext_mask->vlan_tci;
 467                *fields |= DPAA2_ETH_DIST_VLAN;
 468        }
 469
 470        return 0;
 471}
 472
 473static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
 474                             struct ethtool_flow_ext *ext_mask,
 475                             void *key, void *mask, u64 *fields)
 476{
 477        int off;
 478
 479        if (!is_zero_ether_addr(ext_mask->h_dest)) {
 480                off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
 481                ether_addr_copy(key + off, ext_value->h_dest);
 482                ether_addr_copy(mask + off, ext_mask->h_dest);
 483                *fields |= DPAA2_ETH_DIST_ETHDST;
 484        }
 485
 486        return 0;
 487}
 488
 489static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
 490                         u64 *fields)
 491{
 492        int err;
 493
 494        switch (fs->flow_type & 0xFF) {
 495        case ETHER_FLOW:
 496                err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
 497                                    key, mask, fields);
 498                break;
 499        case IP_USER_FLOW:
 500                err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
 501                                    &fs->m_u.usr_ip4_spec, key, mask, fields);
 502                break;
 503        case TCP_V4_FLOW:
 504                err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
 505                                   key, mask, IPPROTO_TCP, fields);
 506                break;
 507        case UDP_V4_FLOW:
 508                err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
 509                                   key, mask, IPPROTO_UDP, fields);
 510                break;
 511        case SCTP_V4_FLOW:
 512                err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
 513                                   &fs->m_u.sctp_ip4_spec, key, mask,
 514                                   IPPROTO_SCTP, fields);
 515                break;
 516        default:
 517                return -EOPNOTSUPP;
 518        }
 519
 520        if (err)
 521                return err;
 522
 523        if (fs->flow_type & FLOW_EXT) {
 524                err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
 525                if (err)
 526                        return err;
 527        }
 528
 529        if (fs->flow_type & FLOW_MAC_EXT) {
 530                err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
 531                                        fields);
 532                if (err)
 533                        return err;
 534        }
 535
 536        return 0;
 537}
 538
 539static int do_cls_rule(struct net_device *net_dev,
 540                       struct ethtool_rx_flow_spec *fs,
 541                       bool add)
 542{
 543        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 544        struct device *dev = net_dev->dev.parent;
 545        struct dpni_rule_cfg rule_cfg = { 0 };
 546        struct dpni_fs_action_cfg fs_act = { 0 };
 547        dma_addr_t key_iova;
 548        u64 fields = 0;
 549        void *key_buf;
 550        int err;
 551
 552        if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
 553            fs->ring_cookie >= dpaa2_eth_queue_count(priv))
 554                return -EINVAL;
 555
 556        rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
 557
 558        /* allocate twice the key size, for the actual key and for mask */
 559        key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
 560        if (!key_buf)
 561                return -ENOMEM;
 562
 563        /* Fill the key and mask memory areas */
 564        err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
 565        if (err)
 566                goto free_mem;
 567
 568        if (!dpaa2_eth_fs_mask_enabled(priv)) {
 569                /* Masking allows us to configure a maximal key during init and
 570                 * use it for all flow steering rules. Without it, we include
 571                 * in the key only the fields actually used, so we need to
 572                 * extract the others from the final key buffer.
 573                 *
 574                 * Program the FS key if needed, or return error if previously
 575                 * set key can't be used for the current rule. User needs to
 576                 * delete existing rules in this case to allow for the new one.
 577                 */
 578                if (!priv->rx_cls_fields) {
 579                        err = dpaa2_eth_set_cls(net_dev, fields);
 580                        if (err)
 581                                goto free_mem;
 582
 583                        priv->rx_cls_fields = fields;
 584                } else if (priv->rx_cls_fields != fields) {
 585                        netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
 586                        err = -EOPNOTSUPP;
 587                        goto free_mem;
 588                }
 589
 590                dpaa2_eth_cls_trim_rule(key_buf, fields);
 591                rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
 592        }
 593
 594        key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
 595                                  DMA_TO_DEVICE);
 596        if (dma_mapping_error(dev, key_iova)) {
 597                err = -ENOMEM;
 598                goto free_mem;
 599        }
 600
 601        rule_cfg.key_iova = key_iova;
 602        if (dpaa2_eth_fs_mask_enabled(priv))
 603                rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
 604
 605        if (add) {
 606                if (fs->ring_cookie == RX_CLS_FLOW_DISC)
 607                        fs_act.options |= DPNI_FS_OPT_DISCARD;
 608                else
 609                        fs_act.flow_id = fs->ring_cookie;
 610                err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
 611                                        fs->location, &rule_cfg, &fs_act);
 612        } else {
 613                err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
 614                                           &rule_cfg);
 615        }
 616
 617        dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
 618
 619free_mem:
 620        kfree(key_buf);
 621
 622        return err;
 623}
 624
 625static int num_rules(struct dpaa2_eth_priv *priv)
 626{
 627        int i, rules = 0;
 628
 629        for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
 630                if (priv->cls_rules[i].in_use)
 631                        rules++;
 632
 633        return rules;
 634}
 635
 636static int update_cls_rule(struct net_device *net_dev,
 637                           struct ethtool_rx_flow_spec *new_fs,
 638                           unsigned int location)
 639{
 640        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 641        struct dpaa2_eth_cls_rule *rule;
 642        int err = -EINVAL;
 643
 644        if (!priv->rx_cls_enabled)
 645                return -EOPNOTSUPP;
 646
 647        if (location >= dpaa2_eth_fs_count(priv))
 648                return -EINVAL;
 649
 650        rule = &priv->cls_rules[location];
 651
 652        /* If a rule is present at the specified location, delete it. */
 653        if (rule->in_use) {
 654                err = do_cls_rule(net_dev, &rule->fs, false);
 655                if (err)
 656                        return err;
 657
 658                rule->in_use = 0;
 659
 660                if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
 661                        priv->rx_cls_fields = 0;
 662        }
 663
 664        /* If no new entry to add, return here */
 665        if (!new_fs)
 666                return err;
 667
 668        err = do_cls_rule(net_dev, new_fs, true);
 669        if (err)
 670                return err;
 671
 672        rule->in_use = 1;
 673        rule->fs = *new_fs;
 674
 675        return 0;
 676}
 677
 678static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
 679                               struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
 680{
 681        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
 682        int max_rules = dpaa2_eth_fs_count(priv);
 683        int i, j = 0;
 684
 685        switch (rxnfc->cmd) {
 686        case ETHTOOL_GRXFH:
 687                /* we purposely ignore cmd->flow_type for now, because the
 688                 * classifier only supports a single set of fields for all
 689                 * protocols
 690                 */
 691                rxnfc->data = priv->rx_hash_fields;
 692                break;
 693        case ETHTOOL_GRXRINGS:
 694                rxnfc->data = dpaa2_eth_queue_count(priv);
 695                break;
 696        case ETHTOOL_GRXCLSRLCNT:
 697                rxnfc->rule_cnt = 0;
 698                rxnfc->rule_cnt = num_rules(priv);
 699                rxnfc->data = max_rules;
 700                break;
 701        case ETHTOOL_GRXCLSRULE:
 702                if (rxnfc->fs.location >= max_rules)
 703                        return -EINVAL;
 704                rxnfc->fs.location = array_index_nospec(rxnfc->fs.location,
 705                                                        max_rules);
 706                if (!priv->cls_rules[rxnfc->fs.location].in_use)
 707                        return -EINVAL;
 708                rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
 709                break;
 710        case ETHTOOL_GRXCLSRLALL:
 711                for (i = 0; i < max_rules; i++) {
 712                        if (!priv->cls_rules[i].in_use)
 713                                continue;
 714                        if (j == rxnfc->rule_cnt)
 715                                return -EMSGSIZE;
 716                        rule_locs[j++] = i;
 717                }
 718                rxnfc->rule_cnt = j;
 719                rxnfc->data = max_rules;
 720                break;
 721        default:
 722                return -EOPNOTSUPP;
 723        }
 724
 725        return 0;
 726}
 727
 728static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
 729                               struct ethtool_rxnfc *rxnfc)
 730{
 731        int err = 0;
 732
 733        switch (rxnfc->cmd) {
 734        case ETHTOOL_SRXFH:
 735                if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
 736                        return -EOPNOTSUPP;
 737                err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
 738                break;
 739        case ETHTOOL_SRXCLSRLINS:
 740                err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
 741                break;
 742        case ETHTOOL_SRXCLSRLDEL:
 743                err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
 744                break;
 745        default:
 746                err = -EOPNOTSUPP;
 747        }
 748
 749        return err;
 750}
 751
 752int dpaa2_phc_index = -1;
 753EXPORT_SYMBOL(dpaa2_phc_index);
 754
 755static int dpaa2_eth_get_ts_info(struct net_device *dev,
 756                                 struct ethtool_ts_info *info)
 757{
 758        info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
 759                                SOF_TIMESTAMPING_RX_HARDWARE |
 760                                SOF_TIMESTAMPING_RAW_HARDWARE;
 761
 762        info->phc_index = dpaa2_phc_index;
 763
 764        info->tx_types = (1 << HWTSTAMP_TX_OFF) |
 765                         (1 << HWTSTAMP_TX_ON);
 766
 767        info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
 768                           (1 << HWTSTAMP_FILTER_ALL);
 769        return 0;
 770}
 771
 772const struct ethtool_ops dpaa2_ethtool_ops = {
 773        .get_drvinfo = dpaa2_eth_get_drvinfo,
 774        .nway_reset = dpaa2_eth_nway_reset,
 775        .get_link = ethtool_op_get_link,
 776        .get_link_ksettings = dpaa2_eth_get_link_ksettings,
 777        .set_link_ksettings = dpaa2_eth_set_link_ksettings,
 778        .get_pauseparam = dpaa2_eth_get_pauseparam,
 779        .set_pauseparam = dpaa2_eth_set_pauseparam,
 780        .get_sset_count = dpaa2_eth_get_sset_count,
 781        .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
 782        .get_strings = dpaa2_eth_get_strings,
 783        .get_rxnfc = dpaa2_eth_get_rxnfc,
 784        .set_rxnfc = dpaa2_eth_set_rxnfc,
 785        .get_ts_info = dpaa2_eth_get_ts_info,
 786};
 787