linux/drivers/net/ethernet/google/gve/gve_ethtool.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
   2/* Google virtual Ethernet (gve) driver
   3 *
   4 * Copyright (C) 2015-2019 Google, Inc.
   5 */
   6
   7#include <linux/rtnetlink.h>
   8#include "gve.h"
   9#include "gve_adminq.h"
  10
  11static void gve_get_drvinfo(struct net_device *netdev,
  12                            struct ethtool_drvinfo *info)
  13{
  14        struct gve_priv *priv = netdev_priv(netdev);
  15
  16        strlcpy(info->driver, "gve", sizeof(info->driver));
  17        strlcpy(info->version, gve_version_str, sizeof(info->version));
  18        strlcpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
  19}
  20
  21static void gve_set_msglevel(struct net_device *netdev, u32 value)
  22{
  23        struct gve_priv *priv = netdev_priv(netdev);
  24
  25        priv->msg_enable = value;
  26}
  27
  28static u32 gve_get_msglevel(struct net_device *netdev)
  29{
  30        struct gve_priv *priv = netdev_priv(netdev);
  31
  32        return priv->msg_enable;
  33}
  34
  35static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
  36        "rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
  37        "rx_dropped", "tx_dropped", "tx_timeouts",
  38        "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
  39        "interface_up_cnt", "interface_down_cnt", "reset_cnt",
  40        "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
  41};
  42
  43static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
  44        "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
  45        "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
  46        "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
  47        "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
  48};
  49
  50static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
  51        "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]",
  52        "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
  53};
  54
  55static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
  56        "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
  57        "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
  58        "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
  59        "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
  60        "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
  61        "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
  62        "adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
  63};
  64
  65static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
  66        "report-stats",
  67};
  68
  69#define GVE_MAIN_STATS_LEN  ARRAY_SIZE(gve_gstrings_main_stats)
  70#define GVE_ADMINQ_STATS_LEN  ARRAY_SIZE(gve_gstrings_adminq_stats)
  71#define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats)
  72#define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats)
  73#define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
  74
  75static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
  76{
  77        struct gve_priv *priv = netdev_priv(netdev);
  78        char *s = (char *)data;
  79        int i, j;
  80
  81        switch (stringset) {
  82        case ETH_SS_STATS:
  83                memcpy(s, *gve_gstrings_main_stats,
  84                       sizeof(gve_gstrings_main_stats));
  85                s += sizeof(gve_gstrings_main_stats);
  86
  87                for (i = 0; i < priv->rx_cfg.num_queues; i++) {
  88                        for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
  89                                snprintf(s, ETH_GSTRING_LEN,
  90                                         gve_gstrings_rx_stats[j], i);
  91                                s += ETH_GSTRING_LEN;
  92                        }
  93                }
  94
  95                for (i = 0; i < priv->tx_cfg.num_queues; i++) {
  96                        for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
  97                                snprintf(s, ETH_GSTRING_LEN,
  98                                         gve_gstrings_tx_stats[j], i);
  99                                s += ETH_GSTRING_LEN;
 100                        }
 101                }
 102
 103                memcpy(s, *gve_gstrings_adminq_stats,
 104                       sizeof(gve_gstrings_adminq_stats));
 105                s += sizeof(gve_gstrings_adminq_stats);
 106                break;
 107
 108        case ETH_SS_PRIV_FLAGS:
 109                memcpy(s, *gve_gstrings_priv_flags,
 110                       sizeof(gve_gstrings_priv_flags));
 111                s += sizeof(gve_gstrings_priv_flags);
 112                break;
 113
 114        default:
 115                break;
 116        }
 117}
 118
 119static int gve_get_sset_count(struct net_device *netdev, int sset)
 120{
 121        struct gve_priv *priv = netdev_priv(netdev);
 122
 123        switch (sset) {
 124        case ETH_SS_STATS:
 125                return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
 126                       (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
 127                       (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
 128        case ETH_SS_PRIV_FLAGS:
 129                return GVE_PRIV_FLAGS_STR_LEN;
 130        default:
 131                return -EOPNOTSUPP;
 132        }
 133}
 134
 135static void
 136gve_get_ethtool_stats(struct net_device *netdev,
 137                      struct ethtool_stats *stats, u64 *data)
 138{
 139        u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,   tmp_rx_buf_alloc_fail,
 140                tmp_rx_desc_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes;
 141        u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
 142                rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes;
 143        int stats_idx, base_stats_idx, max_stats_idx;
 144        struct stats *report_stats;
 145        int *rx_qid_to_stats_idx;
 146        int *tx_qid_to_stats_idx;
 147        struct gve_priv *priv;
 148        bool skip_nic_stats;
 149        unsigned int start;
 150        int ring;
 151        int i, j;
 152
 153        ASSERT_RTNL();
 154
 155        priv = netdev_priv(netdev);
 156        report_stats = priv->stats_report->stats;
 157        rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
 158                                            sizeof(int), GFP_KERNEL);
 159        if (!rx_qid_to_stats_idx)
 160                return;
 161        tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues,
 162                                            sizeof(int), GFP_KERNEL);
 163        if (!tx_qid_to_stats_idx) {
 164                kfree(rx_qid_to_stats_idx);
 165                return;
 166        }
 167        for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
 168             rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
 169             ring < priv->rx_cfg.num_queues; ring++) {
 170                if (priv->rx) {
 171                        do {
 172                                struct gve_rx_ring *rx = &priv->rx[ring];
 173
 174                                start =
 175                                  u64_stats_fetch_begin(&priv->rx[ring].statss);
 176                                tmp_rx_pkts = rx->rpackets;
 177                                tmp_rx_bytes = rx->rbytes;
 178                                tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
 179                                tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
 180                                tmp_rx_desc_err_dropped_pkt =
 181                                        rx->rx_desc_err_dropped_pkt;
 182                        } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
 183                                                       start));
 184                        rx_pkts += tmp_rx_pkts;
 185                        rx_bytes += tmp_rx_bytes;
 186                        rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
 187                        rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
 188                        rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
 189                }
 190        }
 191        for (tx_pkts = 0, tx_bytes = 0, ring = 0;
 192             ring < priv->tx_cfg.num_queues; ring++) {
 193                if (priv->tx) {
 194                        do {
 195                                start =
 196                                  u64_stats_fetch_begin(&priv->tx[ring].statss);
 197                                tmp_tx_pkts = priv->tx[ring].pkt_done;
 198                                tmp_tx_bytes = priv->tx[ring].bytes_done;
 199                        } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
 200                                                       start));
 201                        tx_pkts += tmp_tx_pkts;
 202                        tx_bytes += tmp_tx_bytes;
 203                }
 204        }
 205
 206        i = 0;
 207        data[i++] = rx_pkts;
 208        data[i++] = tx_pkts;
 209        data[i++] = rx_bytes;
 210        data[i++] = tx_bytes;
 211        /* total rx dropped packets */
 212        data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
 213                    rx_desc_err_dropped_pkt;
 214        /* Skip tx_dropped */
 215        i++;
 216
 217        data[i++] = priv->tx_timeo_cnt;
 218        data[i++] = rx_skb_alloc_fail;
 219        data[i++] = rx_buf_alloc_fail;
 220        data[i++] = rx_desc_err_dropped_pkt;
 221        data[i++] = priv->interface_up_cnt;
 222        data[i++] = priv->interface_down_cnt;
 223        data[i++] = priv->reset_cnt;
 224        data[i++] = priv->page_alloc_fail;
 225        data[i++] = priv->dma_mapping_error;
 226        data[i++] = priv->stats_report_trigger_cnt;
 227        i = GVE_MAIN_STATS_LEN;
 228
 229        /* For rx cross-reporting stats, start from nic rx stats in report */
 230        base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
 231                GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
 232        max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
 233                base_stats_idx;
 234        /* Preprocess the stats report for rx, map queue id to start index */
 235        skip_nic_stats = false;
 236        for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
 237                stats_idx += NIC_RX_STATS_REPORT_NUM) {
 238                u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
 239                u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
 240
 241                if (stat_name == 0) {
 242                        /* no stats written by NIC yet */
 243                        skip_nic_stats = true;
 244                        break;
 245                }
 246                rx_qid_to_stats_idx[queue_id] = stats_idx;
 247        }
 248        /* walk RX rings */
 249        if (priv->rx) {
 250                for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
 251                        struct gve_rx_ring *rx = &priv->rx[ring];
 252
 253                        data[i++] = rx->fill_cnt;
 254                        data[i++] = rx->cnt;
 255                        do {
 256                                start =
 257                                  u64_stats_fetch_begin(&priv->rx[ring].statss);
 258                                tmp_rx_bytes = rx->rbytes;
 259                                tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
 260                                tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
 261                                tmp_rx_desc_err_dropped_pkt =
 262                                        rx->rx_desc_err_dropped_pkt;
 263                        } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
 264                                                       start));
 265                        data[i++] = tmp_rx_bytes;
 266                        /* rx dropped packets */
 267                        data[i++] = tmp_rx_skb_alloc_fail +
 268                                tmp_rx_buf_alloc_fail +
 269                                tmp_rx_desc_err_dropped_pkt;
 270                        data[i++] = rx->rx_copybreak_pkt;
 271                        data[i++] = rx->rx_copied_pkt;
 272                        /* stats from NIC */
 273                        if (skip_nic_stats) {
 274                                /* skip NIC rx stats */
 275                                i += NIC_RX_STATS_REPORT_NUM;
 276                                continue;
 277                        }
 278                        for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
 279                                u64 value =
 280                                be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value);
 281
 282                                data[i++] = value;
 283                        }
 284                }
 285        } else {
 286                i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
 287        }
 288
 289        /* For tx cross-reporting stats, start from nic tx stats in report */
 290        base_stats_idx = max_stats_idx;
 291        max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
 292                max_stats_idx;
 293        /* Preprocess the stats report for tx, map queue id to start index */
 294        skip_nic_stats = false;
 295        for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
 296                stats_idx += NIC_TX_STATS_REPORT_NUM) {
 297                u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
 298                u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
 299
 300                if (stat_name == 0) {
 301                        /* no stats written by NIC yet */
 302                        skip_nic_stats = true;
 303                        break;
 304                }
 305                tx_qid_to_stats_idx[queue_id] = stats_idx;
 306        }
 307        /* walk TX rings */
 308        if (priv->tx) {
 309                for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
 310                        struct gve_tx_ring *tx = &priv->tx[ring];
 311
 312                        data[i++] = tx->req;
 313                        data[i++] = tx->done;
 314                        do {
 315                                start =
 316                                  u64_stats_fetch_begin(&priv->tx[ring].statss);
 317                                tmp_tx_bytes = tx->bytes_done;
 318                        } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
 319                                                       start));
 320                        data[i++] = tmp_tx_bytes;
 321                        data[i++] = tx->wake_queue;
 322                        data[i++] = tx->stop_queue;
 323                        data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
 324                                                                          tx));
 325                        /* stats from NIC */
 326                        if (skip_nic_stats) {
 327                                /* skip NIC tx stats */
 328                                i += NIC_TX_STATS_REPORT_NUM;
 329                                continue;
 330                        }
 331                        for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
 332                                u64 value =
 333                                be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
 334                                data[i++] = value;
 335                        }
 336                }
 337        } else {
 338                i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
 339        }
 340
 341        kfree(rx_qid_to_stats_idx);
 342        kfree(tx_qid_to_stats_idx);
 343        /* AQ Stats */
 344        data[i++] = priv->adminq_prod_cnt;
 345        data[i++] = priv->adminq_cmd_fail;
 346        data[i++] = priv->adminq_timeouts;
 347        data[i++] = priv->adminq_describe_device_cnt;
 348        data[i++] = priv->adminq_cfg_device_resources_cnt;
 349        data[i++] = priv->adminq_register_page_list_cnt;
 350        data[i++] = priv->adminq_unregister_page_list_cnt;
 351        data[i++] = priv->adminq_create_tx_queue_cnt;
 352        data[i++] = priv->adminq_create_rx_queue_cnt;
 353        data[i++] = priv->adminq_destroy_tx_queue_cnt;
 354        data[i++] = priv->adminq_destroy_rx_queue_cnt;
 355        data[i++] = priv->adminq_dcfg_device_resources_cnt;
 356        data[i++] = priv->adminq_set_driver_parameter_cnt;
 357        data[i++] = priv->adminq_report_stats_cnt;
 358        data[i++] = priv->adminq_report_link_speed_cnt;
 359}
 360
 361static void gve_get_channels(struct net_device *netdev,
 362                             struct ethtool_channels *cmd)
 363{
 364        struct gve_priv *priv = netdev_priv(netdev);
 365
 366        cmd->max_rx = priv->rx_cfg.max_queues;
 367        cmd->max_tx = priv->tx_cfg.max_queues;
 368        cmd->max_other = 0;
 369        cmd->max_combined = 0;
 370        cmd->rx_count = priv->rx_cfg.num_queues;
 371        cmd->tx_count = priv->tx_cfg.num_queues;
 372        cmd->other_count = 0;
 373        cmd->combined_count = 0;
 374}
 375
 376static int gve_set_channels(struct net_device *netdev,
 377                            struct ethtool_channels *cmd)
 378{
 379        struct gve_priv *priv = netdev_priv(netdev);
 380        struct gve_queue_config new_tx_cfg = priv->tx_cfg;
 381        struct gve_queue_config new_rx_cfg = priv->rx_cfg;
 382        struct ethtool_channels old_settings;
 383        int new_tx = cmd->tx_count;
 384        int new_rx = cmd->rx_count;
 385
 386        gve_get_channels(netdev, &old_settings);
 387
 388        /* Changing combined is not allowed allowed */
 389        if (cmd->combined_count != old_settings.combined_count)
 390                return -EINVAL;
 391
 392        if (!new_rx || !new_tx)
 393                return -EINVAL;
 394
 395        if (!netif_carrier_ok(netdev)) {
 396                priv->tx_cfg.num_queues = new_tx;
 397                priv->rx_cfg.num_queues = new_rx;
 398                return 0;
 399        }
 400
 401        new_tx_cfg.num_queues = new_tx;
 402        new_rx_cfg.num_queues = new_rx;
 403
 404        return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
 405}
 406
 407static void gve_get_ringparam(struct net_device *netdev,
 408                              struct ethtool_ringparam *cmd)
 409{
 410        struct gve_priv *priv = netdev_priv(netdev);
 411
 412        cmd->rx_max_pending = priv->rx_desc_cnt;
 413        cmd->tx_max_pending = priv->tx_desc_cnt;
 414        cmd->rx_pending = priv->rx_desc_cnt;
 415        cmd->tx_pending = priv->tx_desc_cnt;
 416}
 417
 418static int gve_user_reset(struct net_device *netdev, u32 *flags)
 419{
 420        struct gve_priv *priv = netdev_priv(netdev);
 421
 422        if (*flags == ETH_RESET_ALL) {
 423                *flags = 0;
 424                return gve_reset(priv, true);
 425        }
 426
 427        return -EOPNOTSUPP;
 428}
 429
 430static int gve_get_tunable(struct net_device *netdev,
 431                           const struct ethtool_tunable *etuna, void *value)
 432{
 433        struct gve_priv *priv = netdev_priv(netdev);
 434
 435        switch (etuna->id) {
 436        case ETHTOOL_RX_COPYBREAK:
 437                *(u32 *)value = priv->rx_copybreak;
 438                return 0;
 439        default:
 440                return -EOPNOTSUPP;
 441        }
 442}
 443
 444static int gve_set_tunable(struct net_device *netdev,
 445                           const struct ethtool_tunable *etuna,
 446                           const void *value)
 447{
 448        struct gve_priv *priv = netdev_priv(netdev);
 449        u32 len;
 450
 451        switch (etuna->id) {
 452        case ETHTOOL_RX_COPYBREAK:
 453                len = *(u32 *)value;
 454                if (len > PAGE_SIZE / 2)
 455                        return -EINVAL;
 456                priv->rx_copybreak = len;
 457                return 0;
 458        default:
 459                return -EOPNOTSUPP;
 460        }
 461}
 462
 463static u32 gve_get_priv_flags(struct net_device *netdev)
 464{
 465        struct gve_priv *priv = netdev_priv(netdev);
 466        u32 ret_flags = 0;
 467
 468        /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
 469        if (priv->ethtool_flags & BIT(0))
 470                ret_flags |= BIT(0);
 471        return ret_flags;
 472}
 473
 474static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
 475{
 476        struct gve_priv *priv = netdev_priv(netdev);
 477        u64 ori_flags, new_flags;
 478
 479        ori_flags = READ_ONCE(priv->ethtool_flags);
 480        new_flags = ori_flags;
 481
 482        /* Only one priv flag exists: report-stats (BIT(0))*/
 483        if (flags & BIT(0))
 484                new_flags |= BIT(0);
 485        else
 486                new_flags &= ~(BIT(0));
 487        priv->ethtool_flags = new_flags;
 488        /* start report-stats timer when user turns report stats on. */
 489        if (flags & BIT(0)) {
 490                mod_timer(&priv->stats_report_timer,
 491                          round_jiffies(jiffies +
 492                                        msecs_to_jiffies(priv->stats_report_timer_period)));
 493        }
 494        /* Zero off gve stats when report-stats turned off and */
 495        /* delete report stats timer. */
 496        if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
 497                int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
 498                        priv->tx_cfg.num_queues;
 499                int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
 500                        priv->rx_cfg.num_queues;
 501
 502                memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
 503                                   sizeof(struct stats));
 504                del_timer_sync(&priv->stats_report_timer);
 505        }
 506        return 0;
 507}
 508
 509static int gve_get_link_ksettings(struct net_device *netdev,
 510                                  struct ethtool_link_ksettings *cmd)
 511{
 512        struct gve_priv *priv = netdev_priv(netdev);
 513        int err = gve_adminq_report_link_speed(priv);
 514
 515        cmd->base.speed = priv->link_speed;
 516        return err;
 517}
 518
 519const struct ethtool_ops gve_ethtool_ops = {
 520        .get_drvinfo = gve_get_drvinfo,
 521        .get_strings = gve_get_strings,
 522        .get_sset_count = gve_get_sset_count,
 523        .get_ethtool_stats = gve_get_ethtool_stats,
 524        .set_msglevel = gve_set_msglevel,
 525        .get_msglevel = gve_get_msglevel,
 526        .set_channels = gve_set_channels,
 527        .get_channels = gve_get_channels,
 528        .get_link = ethtool_op_get_link,
 529        .get_ringparam = gve_get_ringparam,
 530        .reset = gve_user_reset,
 531        .get_tunable = gve_get_tunable,
 532        .set_tunable = gve_set_tunable,
 533        .get_priv_flags = gve_get_priv_flags,
 534        .set_priv_flags = gve_set_priv_flags,
 535        .get_link_ksettings = gve_get_link_ksettings
 536};
 537