linux/drivers/net/ethernet/sfc/ef10.c
<<
>>
Prefs
   1/****************************************************************************
   2 * Driver for Solarflare network controllers and boards
   3 * Copyright 2012-2013 Solarflare Communications Inc.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published
   7 * by the Free Software Foundation, incorporated herein by reference.
   8 */
   9
  10#include "net_driver.h"
  11#include "ef10_regs.h"
  12#include "io.h"
  13#include "mcdi.h"
  14#include "mcdi_pcol.h"
  15#include "nic.h"
  16#include "workarounds.h"
  17#include "selftest.h"
  18#include <linux/in.h>
  19#include <linux/jhash.h>
  20#include <linux/wait.h>
  21#include <linux/workqueue.h>
  22
  23/* Hardware control for EF10 architecture including 'Huntington'. */
  24
  25#define EFX_EF10_DRVGEN_EV              7
  26enum {
  27        EFX_EF10_TEST = 1,
  28        EFX_EF10_REFILL,
  29};
  30
  31/* The reserved RSS context value */
  32#define EFX_EF10_RSS_CONTEXT_INVALID    0xffffffff
  33
  34/* The filter table(s) are managed by firmware and we have write-only
  35 * access.  When removing filters we must identify them to the
  36 * firmware by a 64-bit handle, but this is too wide for Linux kernel
  37 * interfaces (32-bit for RX NFC, 16-bit for RFS).  Also, we need to
  38 * be able to tell in advance whether a requested insertion will
  39 * replace an existing filter.  Therefore we maintain a software hash
  40 * table, which should be at least as large as the hardware hash
  41 * table.
  42 *
  43 * Huntington has a single 8K filter table shared between all filter
  44 * types and both ports.
  45 */
  46#define HUNT_FILTER_TBL_ROWS 8192
  47
  48struct efx_ef10_filter_table {
  49/* The RX match field masks supported by this fw & hw, in order of priority */
  50        enum efx_filter_match_flags rx_match_flags[
  51                MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
  52        unsigned int rx_match_count;
  53
  54        struct {
  55                unsigned long spec;     /* pointer to spec plus flag bits */
  56/* BUSY flag indicates that an update is in progress.  AUTO_OLD is
  57 * used to mark and sweep MAC filters for the device address lists.
  58 */
  59#define EFX_EF10_FILTER_FLAG_BUSY       1UL
  60#define EFX_EF10_FILTER_FLAG_AUTO_OLD   2UL
  61#define EFX_EF10_FILTER_FLAGS           3UL
  62                u64 handle;             /* firmware handle */
  63        } *entry;
  64        wait_queue_head_t waitq;
  65/* Shadow of net_device address lists, guarded by mac_lock */
  66#define EFX_EF10_FILTER_DEV_UC_MAX      32
  67#define EFX_EF10_FILTER_DEV_MC_MAX      256
  68        struct {
  69                u8 addr[ETH_ALEN];
  70                u16 id;
  71        } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
  72          dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
  73        int dev_uc_count;               /* negative for PROMISC */
  74        int dev_mc_count;               /* negative for PROMISC/ALLMULTI */
  75};
  76
  77/* An arbitrary search limit for the software hash table */
  78#define EFX_EF10_FILTER_SEARCH_LIMIT 200
  79
  80static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
  81static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
  82static void efx_ef10_filter_table_remove(struct efx_nic *efx);
  83
  84static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
  85{
  86        efx_dword_t reg;
  87
  88        efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
  89        return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
  90                EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
  91}
  92
  93static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
  94{
  95        return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
  96}
  97
  98static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
  99{
 100        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
 101        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 102        size_t outlen;
 103        int rc;
 104
 105        BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
 106
 107        rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
 108                          outbuf, sizeof(outbuf), &outlen);
 109        if (rc)
 110                return rc;
 111        if (outlen < sizeof(outbuf)) {
 112                netif_err(efx, drv, efx->net_dev,
 113                          "unable to read datapath firmware capabilities\n");
 114                return -EIO;
 115        }
 116
 117        nic_data->datapath_caps =
 118                MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
 119
 120        if (!(nic_data->datapath_caps &
 121              (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
 122                netif_err(efx, drv, efx->net_dev,
 123                          "current firmware does not support TSO\n");
 124                return -ENODEV;
 125        }
 126
 127        if (!(nic_data->datapath_caps &
 128              (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
 129                netif_err(efx, probe, efx->net_dev,
 130                          "current firmware does not support an RX prefix\n");
 131                return -ENODEV;
 132        }
 133
 134        return 0;
 135}
 136
 137static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
 138{
 139        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
 140        int rc;
 141
 142        rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
 143                          outbuf, sizeof(outbuf), NULL);
 144        if (rc)
 145                return rc;
 146        rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
 147        return rc > 0 ? rc : -ERANGE;
 148}
 149
 150static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
 151{
 152        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
 153        size_t outlen;
 154        int rc;
 155
 156        BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
 157
 158        rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
 159                          outbuf, sizeof(outbuf), &outlen);
 160        if (rc)
 161                return rc;
 162        if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
 163                return -EIO;
 164
 165        memcpy(mac_address,
 166               MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
 167        return 0;
 168}
 169
 170static int efx_ef10_probe(struct efx_nic *efx)
 171{
 172        struct efx_ef10_nic_data *nic_data;
 173        int i, rc;
 174
 175        /* We can have one VI for each 8K region.  However we need
 176         * multiple TX queues per channel.
 177         */
 178        efx->max_channels =
 179                min_t(unsigned int,
 180                      EFX_MAX_CHANNELS,
 181                      resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
 182                      (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
 183        BUG_ON(efx->max_channels == 0);
 184
 185        nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
 186        if (!nic_data)
 187                return -ENOMEM;
 188        efx->nic_data = nic_data;
 189
 190        rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
 191                                  8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
 192        if (rc)
 193                goto fail1;
 194
 195        /* Get the MC's warm boot count.  In case it's rebooting right
 196         * now, be prepared to retry.
 197         */
 198        i = 0;
 199        for (;;) {
 200                rc = efx_ef10_get_warm_boot_count(efx);
 201                if (rc >= 0)
 202                        break;
 203                if (++i == 5)
 204                        goto fail2;
 205                ssleep(1);
 206        }
 207        nic_data->warm_boot_count = rc;
 208
 209        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 210
 211        /* In case we're recovering from a crash (kexec), we want to
 212         * cancel any outstanding request by the previous user of this
 213         * function.  We send a special message using the least
 214         * significant bits of the 'high' (doorbell) register.
 215         */
 216        _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
 217
 218        rc = efx_mcdi_init(efx);
 219        if (rc)
 220                goto fail2;
 221
 222        /* Reset (most) configuration for this function */
 223        rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
 224        if (rc)
 225                goto fail3;
 226
 227        /* Enable event logging */
 228        rc = efx_mcdi_log_ctrl(efx, true, false, 0);
 229        if (rc)
 230                goto fail3;
 231
 232        rc = efx_ef10_init_datapath_caps(efx);
 233        if (rc < 0)
 234                goto fail3;
 235
 236        efx->rx_packet_len_offset =
 237                ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
 238
 239        rc = efx_mcdi_port_get_number(efx);
 240        if (rc < 0)
 241                goto fail3;
 242        efx->port_num = rc;
 243
 244        rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
 245        if (rc)
 246                goto fail3;
 247
 248        rc = efx_ef10_get_sysclk_freq(efx);
 249        if (rc < 0)
 250                goto fail3;
 251        efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
 252
 253        /* Check whether firmware supports bug 35388 workaround */
 254        rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
 255        if (rc == 0)
 256                nic_data->workaround_35388 = true;
 257        else if (rc != -ENOSYS && rc != -ENOENT)
 258                goto fail3;
 259        netif_dbg(efx, probe, efx->net_dev,
 260                  "workaround for bug 35388 is %sabled\n",
 261                  nic_data->workaround_35388 ? "en" : "dis");
 262
 263        rc = efx_mcdi_mon_probe(efx);
 264        if (rc)
 265                goto fail3;
 266
 267        efx_ptp_probe(efx, NULL);
 268
 269        return 0;
 270
 271fail3:
 272        efx_mcdi_fini(efx);
 273fail2:
 274        efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
 275fail1:
 276        kfree(nic_data);
 277        efx->nic_data = NULL;
 278        return rc;
 279}
 280
 281static int efx_ef10_free_vis(struct efx_nic *efx)
 282{
 283        MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
 284        size_t outlen;
 285        int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
 286                                    outbuf, sizeof(outbuf), &outlen);
 287
 288        /* -EALREADY means nothing to free, so ignore */
 289        if (rc == -EALREADY)
 290                rc = 0;
 291        if (rc)
 292                efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
 293                                       rc);
 294        return rc;
 295}
 296
 297#ifdef EFX_USE_PIO
 298
 299static void efx_ef10_free_piobufs(struct efx_nic *efx)
 300{
 301        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 302        MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
 303        unsigned int i;
 304        int rc;
 305
 306        BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
 307
 308        for (i = 0; i < nic_data->n_piobufs; i++) {
 309                MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
 310                               nic_data->piobuf_handle[i]);
 311                rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
 312                                  NULL, 0, NULL);
 313                WARN_ON(rc);
 314        }
 315
 316        nic_data->n_piobufs = 0;
 317}
 318
 319static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
 320{
 321        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 322        MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
 323        unsigned int i;
 324        size_t outlen;
 325        int rc = 0;
 326
 327        BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
 328
 329        for (i = 0; i < n; i++) {
 330                rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
 331                                  outbuf, sizeof(outbuf), &outlen);
 332                if (rc)
 333                        break;
 334                if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
 335                        rc = -EIO;
 336                        break;
 337                }
 338                nic_data->piobuf_handle[i] =
 339                        MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
 340                netif_dbg(efx, probe, efx->net_dev,
 341                          "allocated PIO buffer %u handle %x\n", i,
 342                          nic_data->piobuf_handle[i]);
 343        }
 344
 345        nic_data->n_piobufs = i;
 346        if (rc)
 347                efx_ef10_free_piobufs(efx);
 348        return rc;
 349}
 350
 351static int efx_ef10_link_piobufs(struct efx_nic *efx)
 352{
 353        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 354        MCDI_DECLARE_BUF(inbuf,
 355                         max(MC_CMD_LINK_PIOBUF_IN_LEN,
 356                             MC_CMD_UNLINK_PIOBUF_IN_LEN));
 357        struct efx_channel *channel;
 358        struct efx_tx_queue *tx_queue;
 359        unsigned int offset, index;
 360        int rc;
 361
 362        BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
 363        BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
 364
 365        /* Link a buffer to each VI in the write-combining mapping */
 366        for (index = 0; index < nic_data->n_piobufs; ++index) {
 367                MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
 368                               nic_data->piobuf_handle[index]);
 369                MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
 370                               nic_data->pio_write_vi_base + index);
 371                rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
 372                                  inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
 373                                  NULL, 0, NULL);
 374                if (rc) {
 375                        netif_err(efx, drv, efx->net_dev,
 376                                  "failed to link VI %u to PIO buffer %u (%d)\n",
 377                                  nic_data->pio_write_vi_base + index, index,
 378                                  rc);
 379                        goto fail;
 380                }
 381                netif_dbg(efx, probe, efx->net_dev,
 382                          "linked VI %u to PIO buffer %u\n",
 383                          nic_data->pio_write_vi_base + index, index);
 384        }
 385
 386        /* Link a buffer to each TX queue */
 387        efx_for_each_channel(channel, efx) {
 388                efx_for_each_channel_tx_queue(tx_queue, channel) {
 389                        /* We assign the PIO buffers to queues in
 390                         * reverse order to allow for the following
 391                         * special case.
 392                         */
 393                        offset = ((efx->tx_channel_offset + efx->n_tx_channels -
 394                                   tx_queue->channel->channel - 1) *
 395                                  efx_piobuf_size);
 396                        index = offset / ER_DZ_TX_PIOBUF_SIZE;
 397                        offset = offset % ER_DZ_TX_PIOBUF_SIZE;
 398
 399                        /* When the host page size is 4K, the first
 400                         * host page in the WC mapping may be within
 401                         * the same VI page as the last TX queue.  We
 402                         * can only link one buffer to each VI.
 403                         */
 404                        if (tx_queue->queue == nic_data->pio_write_vi_base) {
 405                                BUG_ON(index != 0);
 406                                rc = 0;
 407                        } else {
 408                                MCDI_SET_DWORD(inbuf,
 409                                               LINK_PIOBUF_IN_PIOBUF_HANDLE,
 410                                               nic_data->piobuf_handle[index]);
 411                                MCDI_SET_DWORD(inbuf,
 412                                               LINK_PIOBUF_IN_TXQ_INSTANCE,
 413                                               tx_queue->queue);
 414                                rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
 415                                                  inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
 416                                                  NULL, 0, NULL);
 417                        }
 418
 419                        if (rc) {
 420                                /* This is non-fatal; the TX path just
 421                                 * won't use PIO for this queue
 422                                 */
 423                                netif_err(efx, drv, efx->net_dev,
 424                                          "failed to link VI %u to PIO buffer %u (%d)\n",
 425                                          tx_queue->queue, index, rc);
 426                                tx_queue->piobuf = NULL;
 427                        } else {
 428                                tx_queue->piobuf =
 429                                        nic_data->pio_write_base +
 430                                        index * EFX_VI_PAGE_SIZE + offset;
 431                                tx_queue->piobuf_offset = offset;
 432                                netif_dbg(efx, probe, efx->net_dev,
 433                                          "linked VI %u to PIO buffer %u offset %x addr %p\n",
 434                                          tx_queue->queue, index,
 435                                          tx_queue->piobuf_offset,
 436                                          tx_queue->piobuf);
 437                        }
 438                }
 439        }
 440
 441        return 0;
 442
 443fail:
 444        while (index--) {
 445                MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
 446                               nic_data->pio_write_vi_base + index);
 447                efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
 448                             inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
 449                             NULL, 0, NULL);
 450        }
 451        return rc;
 452}
 453
 454#else /* !EFX_USE_PIO */
 455
 456static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
 457{
 458        return n == 0 ? 0 : -ENOBUFS;
 459}
 460
 461static int efx_ef10_link_piobufs(struct efx_nic *efx)
 462{
 463        return 0;
 464}
 465
 466static void efx_ef10_free_piobufs(struct efx_nic *efx)
 467{
 468}
 469
 470#endif /* EFX_USE_PIO */
 471
 472static void efx_ef10_remove(struct efx_nic *efx)
 473{
 474        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 475        int rc;
 476
 477        efx_ptp_remove(efx);
 478
 479        efx_mcdi_mon_remove(efx);
 480
 481        efx_ef10_rx_free_indir_table(efx);
 482
 483        if (nic_data->wc_membase)
 484                iounmap(nic_data->wc_membase);
 485
 486        rc = efx_ef10_free_vis(efx);
 487        WARN_ON(rc != 0);
 488
 489        if (!nic_data->must_restore_piobufs)
 490                efx_ef10_free_piobufs(efx);
 491
 492        efx_mcdi_fini(efx);
 493        efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
 494        kfree(nic_data);
 495}
 496
 497static int efx_ef10_alloc_vis(struct efx_nic *efx,
 498                              unsigned int min_vis, unsigned int max_vis)
 499{
 500        MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
 501        MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
 502        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 503        size_t outlen;
 504        int rc;
 505
 506        MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
 507        MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
 508        rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
 509                          outbuf, sizeof(outbuf), &outlen);
 510        if (rc != 0)
 511                return rc;
 512
 513        if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
 514                return -EIO;
 515
 516        netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
 517                  MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
 518
 519        nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
 520        nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
 521        return 0;
 522}
 523
 524/* Note that the failure path of this function does not free
 525 * resources, as this will be done by efx_ef10_remove().
 526 */
 527static int efx_ef10_dimension_resources(struct efx_nic *efx)
 528{
 529        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 530        unsigned int uc_mem_map_size, wc_mem_map_size;
 531        unsigned int min_vis, pio_write_vi_base, max_vis;
 532        void __iomem *membase;
 533        int rc;
 534
 535        min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
 536
 537#ifdef EFX_USE_PIO
 538        /* Try to allocate PIO buffers if wanted and if the full
 539         * number of PIO buffers would be sufficient to allocate one
 540         * copy-buffer per TX channel.  Failure is non-fatal, as there
 541         * are only a small number of PIO buffers shared between all
 542         * functions of the controller.
 543         */
 544        if (efx_piobuf_size != 0 &&
 545            ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
 546            efx->n_tx_channels) {
 547                unsigned int n_piobufs =
 548                        DIV_ROUND_UP(efx->n_tx_channels,
 549                                     ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
 550
 551                rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
 552                if (rc)
 553                        netif_err(efx, probe, efx->net_dev,
 554                                  "failed to allocate PIO buffers (%d)\n", rc);
 555                else
 556                        netif_dbg(efx, probe, efx->net_dev,
 557                                  "allocated %u PIO buffers\n", n_piobufs);
 558        }
 559#else
 560        nic_data->n_piobufs = 0;
 561#endif
 562
 563        /* PIO buffers should be mapped with write-combining enabled,
 564         * and we want to make single UC and WC mappings rather than
 565         * several of each (in fact that's the only option if host
 566         * page size is >4K).  So we may allocate some extra VIs just
 567         * for writing PIO buffers through.
 568         */
 569        uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
 570                                     ER_DZ_TX_PIOBUF);
 571        if (nic_data->n_piobufs) {
 572                pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
 573                wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
 574                                               nic_data->n_piobufs) *
 575                                              EFX_VI_PAGE_SIZE) -
 576                                   uc_mem_map_size);
 577                max_vis = pio_write_vi_base + nic_data->n_piobufs;
 578        } else {
 579                pio_write_vi_base = 0;
 580                wc_mem_map_size = 0;
 581                max_vis = min_vis;
 582        }
 583
 584        /* In case the last attached driver failed to free VIs, do it now */
 585        rc = efx_ef10_free_vis(efx);
 586        if (rc != 0)
 587                return rc;
 588
 589        rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
 590        if (rc != 0)
 591                return rc;
 592
 593        /* If we didn't get enough VIs to map all the PIO buffers, free the
 594         * PIO buffers
 595         */
 596        if (nic_data->n_piobufs &&
 597            nic_data->n_allocated_vis <
 598            pio_write_vi_base + nic_data->n_piobufs) {
 599                netif_dbg(efx, probe, efx->net_dev,
 600                          "%u VIs are not sufficient to map %u PIO buffers\n",
 601                          nic_data->n_allocated_vis, nic_data->n_piobufs);
 602                efx_ef10_free_piobufs(efx);
 603        }
 604
 605        /* Shrink the original UC mapping of the memory BAR */
 606        membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
 607        if (!membase) {
 608                netif_err(efx, probe, efx->net_dev,
 609                          "could not shrink memory BAR to %x\n",
 610                          uc_mem_map_size);
 611                return -ENOMEM;
 612        }
 613        iounmap(efx->membase);
 614        efx->membase = membase;
 615
 616        /* Set up the WC mapping if needed */
 617        if (wc_mem_map_size) {
 618                nic_data->wc_membase = ioremap_wc(efx->membase_phys +
 619                                                  uc_mem_map_size,
 620                                                  wc_mem_map_size);
 621                if (!nic_data->wc_membase) {
 622                        netif_err(efx, probe, efx->net_dev,
 623                                  "could not allocate WC mapping of size %x\n",
 624                                  wc_mem_map_size);
 625                        return -ENOMEM;
 626                }
 627                nic_data->pio_write_vi_base = pio_write_vi_base;
 628                nic_data->pio_write_base =
 629                        nic_data->wc_membase +
 630                        (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
 631                         uc_mem_map_size);
 632
 633                rc = efx_ef10_link_piobufs(efx);
 634                if (rc)
 635                        efx_ef10_free_piobufs(efx);
 636        }
 637
 638        netif_dbg(efx, probe, efx->net_dev,
 639                  "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
 640                  &efx->membase_phys, efx->membase, uc_mem_map_size,
 641                  nic_data->wc_membase, wc_mem_map_size);
 642
 643        return 0;
 644}
 645
 646static int efx_ef10_init_nic(struct efx_nic *efx)
 647{
 648        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 649        int rc;
 650
 651        if (nic_data->must_check_datapath_caps) {
 652                rc = efx_ef10_init_datapath_caps(efx);
 653                if (rc)
 654                        return rc;
 655                nic_data->must_check_datapath_caps = false;
 656        }
 657
 658        if (nic_data->must_realloc_vis) {
 659                /* We cannot let the number of VIs change now */
 660                rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
 661                                        nic_data->n_allocated_vis);
 662                if (rc)
 663                        return rc;
 664                nic_data->must_realloc_vis = false;
 665        }
 666
 667        if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
 668                rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
 669                if (rc == 0) {
 670                        rc = efx_ef10_link_piobufs(efx);
 671                        if (rc)
 672                                efx_ef10_free_piobufs(efx);
 673                }
 674
 675                /* Log an error on failure, but this is non-fatal */
 676                if (rc)
 677                        netif_err(efx, drv, efx->net_dev,
 678                                  "failed to restore PIO buffers (%d)\n", rc);
 679                nic_data->must_restore_piobufs = false;
 680        }
 681
 682        efx_ef10_rx_push_rss_config(efx);
 683        return 0;
 684}
 685
 686static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
 687{
 688        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 689
 690        /* All our allocations have been reset */
 691        nic_data->must_realloc_vis = true;
 692        nic_data->must_restore_filters = true;
 693        nic_data->must_restore_piobufs = true;
 694        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 695}
 696
 697static int efx_ef10_map_reset_flags(u32 *flags)
 698{
 699        enum {
 700                EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
 701                                   ETH_RESET_SHARED_SHIFT),
 702                EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
 703                                  ETH_RESET_OFFLOAD | ETH_RESET_MAC |
 704                                  ETH_RESET_PHY | ETH_RESET_MGMT) <<
 705                                 ETH_RESET_SHARED_SHIFT)
 706        };
 707
 708        /* We assume for now that our PCI function is permitted to
 709         * reset everything.
 710         */
 711
 712        if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
 713                *flags &= ~EF10_RESET_MC;
 714                return RESET_TYPE_WORLD;
 715        }
 716
 717        if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
 718                *flags &= ~EF10_RESET_PORT;
 719                return RESET_TYPE_ALL;
 720        }
 721
 722        /* no invisible reset implemented */
 723
 724        return -EINVAL;
 725}
 726
 727static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
 728{
 729        int rc = efx_mcdi_reset(efx, reset_type);
 730
 731        /* If it was a port reset, trigger reallocation of MC resources.
 732         * Note that on an MC reset nothing needs to be done now because we'll
 733         * detect the MC reset later and handle it then.
 734         */
 735        if (reset_type == RESET_TYPE_ALL && !rc)
 736                efx_ef10_reset_mc_allocations(efx);
 737        return rc;
 738}
 739
 740#define EF10_DMA_STAT(ext_name, mcdi_name)                      \
 741        [EF10_STAT_ ## ext_name] =                              \
 742        { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
 743#define EF10_DMA_INVIS_STAT(int_name, mcdi_name)                \
 744        [EF10_STAT_ ## int_name] =                              \
 745        { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
 746#define EF10_OTHER_STAT(ext_name)                               \
 747        [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 748
 749static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
 750        EF10_DMA_STAT(tx_bytes, TX_BYTES),
 751        EF10_DMA_STAT(tx_packets, TX_PKTS),
 752        EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
 753        EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
 754        EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
 755        EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
 756        EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
 757        EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
 758        EF10_DMA_STAT(tx_64, TX_64_PKTS),
 759        EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
 760        EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
 761        EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
 762        EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
 763        EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
 764        EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
 765        EF10_DMA_STAT(rx_bytes, RX_BYTES),
 766        EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
 767        EF10_OTHER_STAT(rx_good_bytes),
 768        EF10_OTHER_STAT(rx_bad_bytes),
 769        EF10_DMA_STAT(rx_packets, RX_PKTS),
 770        EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
 771        EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
 772        EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
 773        EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
 774        EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
 775        EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
 776        EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
 777        EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
 778        EF10_DMA_STAT(rx_64, RX_64_PKTS),
 779        EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
 780        EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
 781        EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
 782        EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
 783        EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
 784        EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
 785        EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
 786        EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
 787        EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
 788        EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
 789        EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
 790        EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
 791        EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
 792        EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
 793        EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
 794        EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
 795        EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
 796        EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
 797        EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
 798        EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
 799        EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
 800        EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
 801        EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
 802        EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
 803};
 804
 805#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) |           \
 806                               (1ULL << EF10_STAT_tx_packets) |         \
 807                               (1ULL << EF10_STAT_tx_pause) |           \
 808                               (1ULL << EF10_STAT_tx_unicast) |         \
 809                               (1ULL << EF10_STAT_tx_multicast) |       \
 810                               (1ULL << EF10_STAT_tx_broadcast) |       \
 811                               (1ULL << EF10_STAT_rx_bytes) |           \
 812                               (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
 813                               (1ULL << EF10_STAT_rx_good_bytes) |      \
 814                               (1ULL << EF10_STAT_rx_bad_bytes) |       \
 815                               (1ULL << EF10_STAT_rx_packets) |         \
 816                               (1ULL << EF10_STAT_rx_good) |            \
 817                               (1ULL << EF10_STAT_rx_bad) |             \
 818                               (1ULL << EF10_STAT_rx_pause) |           \
 819                               (1ULL << EF10_STAT_rx_control) |         \
 820                               (1ULL << EF10_STAT_rx_unicast) |         \
 821                               (1ULL << EF10_STAT_rx_multicast) |       \
 822                               (1ULL << EF10_STAT_rx_broadcast) |       \
 823                               (1ULL << EF10_STAT_rx_lt64) |            \
 824                               (1ULL << EF10_STAT_rx_64) |              \
 825                               (1ULL << EF10_STAT_rx_65_to_127) |       \
 826                               (1ULL << EF10_STAT_rx_128_to_255) |      \
 827                               (1ULL << EF10_STAT_rx_256_to_511) |      \
 828                               (1ULL << EF10_STAT_rx_512_to_1023) |     \
 829                               (1ULL << EF10_STAT_rx_1024_to_15xx) |    \
 830                               (1ULL << EF10_STAT_rx_15xx_to_jumbo) |   \
 831                               (1ULL << EF10_STAT_rx_gtjumbo) |         \
 832                               (1ULL << EF10_STAT_rx_bad_gtjumbo) |     \
 833                               (1ULL << EF10_STAT_rx_overflow) |        \
 834                               (1ULL << EF10_STAT_rx_nodesc_drops))
 835
 836/* These statistics are only provided by the 10G MAC.  For a 10G/40G
 837 * switchable port we do not expose these because they might not
 838 * include all the packets they should.
 839 */
 840#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) |       \
 841                                 (1ULL << EF10_STAT_tx_lt64) |          \
 842                                 (1ULL << EF10_STAT_tx_64) |            \
 843                                 (1ULL << EF10_STAT_tx_65_to_127) |     \
 844                                 (1ULL << EF10_STAT_tx_128_to_255) |    \
 845                                 (1ULL << EF10_STAT_tx_256_to_511) |    \
 846                                 (1ULL << EF10_STAT_tx_512_to_1023) |   \
 847                                 (1ULL << EF10_STAT_tx_1024_to_15xx) |  \
 848                                 (1ULL << EF10_STAT_tx_15xx_to_jumbo))
 849
 850/* These statistics are only provided by the 40G MAC.  For a 10G/40G
 851 * switchable port we do expose these because the errors will otherwise
 852 * be silent.
 853 */
 854#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) |  \
 855                                  (1ULL << EF10_STAT_rx_length_error))
 856
 857/* These statistics are only provided if the firmware supports the
 858 * capability PM_AND_RXDP_COUNTERS.
 859 */
 860#define HUNT_PM_AND_RXDP_STAT_MASK (                                    \
 861        (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) |                   \
 862        (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) |                 \
 863        (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) |                    \
 864        (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) |                  \
 865        (1ULL << EF10_STAT_rx_pm_trunc_qbb) |                           \
 866        (1ULL << EF10_STAT_rx_pm_discard_qbb) |                         \
 867        (1ULL << EF10_STAT_rx_pm_discard_mapping) |                     \
 868        (1ULL << EF10_STAT_rx_dp_q_disabled_packets) |                  \
 869        (1ULL << EF10_STAT_rx_dp_di_dropped_packets) |                  \
 870        (1ULL << EF10_STAT_rx_dp_streaming_packets) |                   \
 871        (1ULL << EF10_STAT_rx_dp_hlb_fetch) |                           \
 872        (1ULL << EF10_STAT_rx_dp_hlb_wait))
 873
 874static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
 875{
 876        u64 raw_mask = HUNT_COMMON_STAT_MASK;
 877        u32 port_caps = efx_mcdi_phy_get_caps(efx);
 878        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 879
 880        if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
 881                raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
 882        else
 883                raw_mask |= HUNT_10G_ONLY_STAT_MASK;
 884
 885        if (nic_data->datapath_caps &
 886            (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
 887                raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
 888
 889        return raw_mask;
 890}
 891
 892static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
 893{
 894        u64 raw_mask = efx_ef10_raw_stat_mask(efx);
 895
 896#if BITS_PER_LONG == 64
 897        mask[0] = raw_mask;
 898#else
 899        mask[0] = raw_mask & 0xffffffff;
 900        mask[1] = raw_mask >> 32;
 901#endif
 902}
 903
 904static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
 905{
 906        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
 907
 908        efx_ef10_get_stat_mask(efx, mask);
 909        return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
 910                                      mask, names);
 911}
 912
 913static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
 914{
 915        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 916        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
 917        __le64 generation_start, generation_end;
 918        u64 *stats = nic_data->stats;
 919        __le64 *dma_stats;
 920
 921        efx_ef10_get_stat_mask(efx, mask);
 922
 923        dma_stats = efx->stats_buffer.addr;
 924        nic_data = efx->nic_data;
 925
 926        generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
 927        if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
 928                return 0;
 929        rmb();
 930        efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
 931                             stats, efx->stats_buffer.addr, false);
 932        rmb();
 933        generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
 934        if (generation_end != generation_start)
 935                return -EAGAIN;
 936
 937        /* Update derived statistics */
 938        efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
 939        stats[EF10_STAT_rx_good_bytes] =
 940                stats[EF10_STAT_rx_bytes] -
 941                stats[EF10_STAT_rx_bytes_minus_good_bytes];
 942        efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
 943                             stats[EF10_STAT_rx_bytes_minus_good_bytes]);
 944
 945        return 0;
 946}
 947
 948
 949static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
 950                                    struct rtnl_link_stats64 *core_stats)
 951{
 952        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
 953        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 954        u64 *stats = nic_data->stats;
 955        size_t stats_count = 0, index;
 956        int retry;
 957
 958        efx_ef10_get_stat_mask(efx, mask);
 959
 960        /* If we're unlucky enough to read statistics during the DMA, wait
 961         * up to 10ms for it to finish (typically takes <500us)
 962         */
 963        for (retry = 0; retry < 100; ++retry) {
 964                if (efx_ef10_try_update_nic_stats(efx) == 0)
 965                        break;
 966                udelay(100);
 967        }
 968
 969        if (full_stats) {
 970                for_each_set_bit(index, mask, EF10_STAT_COUNT) {
 971                        if (efx_ef10_stat_desc[index].name) {
 972                                *full_stats++ = stats[index];
 973                                ++stats_count;
 974                        }
 975                }
 976        }
 977
 978        if (core_stats) {
 979                core_stats->rx_packets = stats[EF10_STAT_rx_packets];
 980                core_stats->tx_packets = stats[EF10_STAT_tx_packets];
 981                core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
 982                core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
 983                core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
 984                core_stats->multicast = stats[EF10_STAT_rx_multicast];
 985                core_stats->rx_length_errors =
 986                        stats[EF10_STAT_rx_gtjumbo] +
 987                        stats[EF10_STAT_rx_length_error];
 988                core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
 989                core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
 990                core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
 991                core_stats->rx_errors = (core_stats->rx_length_errors +
 992                                         core_stats->rx_crc_errors +
 993                                         core_stats->rx_frame_errors);
 994        }
 995
 996        return stats_count;
 997}
 998
 999static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
1000{
1001        struct efx_nic *efx = channel->efx;
1002        unsigned int mode, value;
1003        efx_dword_t timer_cmd;
1004
1005        if (channel->irq_moderation) {
1006                mode = 3;
1007                value = channel->irq_moderation - 1;
1008        } else {
1009                mode = 0;
1010                value = 0;
1011        }
1012
1013        if (EFX_EF10_WORKAROUND_35388(efx)) {
1014                EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
1015                                     EFE_DD_EVQ_IND_TIMER_FLAGS,
1016                                     ERF_DD_EVQ_IND_TIMER_MODE, mode,
1017                                     ERF_DD_EVQ_IND_TIMER_VAL, value);
1018                efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
1019                                channel->channel);
1020        } else {
1021                EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
1022                                     ERF_DZ_TC_TIMER_VAL, value);
1023                efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
1024                                channel->channel);
1025        }
1026}
1027
1028static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1029{
1030        wol->supported = 0;
1031        wol->wolopts = 0;
1032        memset(&wol->sopass, 0, sizeof(wol->sopass));
1033}
1034
1035static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
1036{
1037        if (type != 0)
1038                return -EINVAL;
1039        return 0;
1040}
1041
1042static void efx_ef10_mcdi_request(struct efx_nic *efx,
1043                                  const efx_dword_t *hdr, size_t hdr_len,
1044                                  const efx_dword_t *sdu, size_t sdu_len)
1045{
1046        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1047        u8 *pdu = nic_data->mcdi_buf.addr;
1048
1049        memcpy(pdu, hdr, hdr_len);
1050        memcpy(pdu + hdr_len, sdu, sdu_len);
1051        wmb();
1052
1053        /* The hardware provides 'low' and 'high' (doorbell) registers
1054         * for passing the 64-bit address of an MCDI request to
1055         * firmware.  However the dwords are swapped by firmware.  The
1056         * least significant bits of the doorbell are then 0 for all
1057         * MCDI requests due to alignment.
1058         */
1059        _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
1060                    ER_DZ_MC_DB_LWRD);
1061        _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
1062                    ER_DZ_MC_DB_HWRD);
1063}
1064
1065static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
1066{
1067        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1068        const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
1069
1070        rmb();
1071        return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
1072}
1073
1074static void
1075efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
1076                            size_t offset, size_t outlen)
1077{
1078        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1079        const u8 *pdu = nic_data->mcdi_buf.addr;
1080
1081        memcpy(outbuf, pdu + offset, outlen);
1082}
1083
1084static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
1085{
1086        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1087        int rc;
1088
1089        rc = efx_ef10_get_warm_boot_count(efx);
1090        if (rc < 0) {
1091                /* The firmware is presumably in the process of
1092                 * rebooting.  However, we are supposed to report each
1093                 * reboot just once, so we must only do that once we
1094                 * can read and store the updated warm boot count.
1095                 */
1096                return 0;
1097        }
1098
1099        if (rc == nic_data->warm_boot_count)
1100                return 0;
1101
1102        nic_data->warm_boot_count = rc;
1103
1104        /* All our allocations have been reset */
1105        efx_ef10_reset_mc_allocations(efx);
1106
1107        /* The datapath firmware might have been changed */
1108        nic_data->must_check_datapath_caps = true;
1109
1110        /* MAC statistics have been cleared on the NIC; clear the local
1111         * statistic that we update with efx_update_diff_stat().
1112         */
1113        nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
1114
1115        return -EIO;
1116}
1117
1118/* Handle an MSI interrupt
1119 *
1120 * Handle an MSI hardware interrupt.  This routine schedules event
1121 * queue processing.  No interrupt acknowledgement cycle is necessary.
1122 * Also, we never need to check that the interrupt is for us, since
1123 * MSI interrupts cannot be shared.
1124 */
1125static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
1126{
1127        struct efx_msi_context *context = dev_id;
1128        struct efx_nic *efx = context->efx;
1129
1130        netif_vdbg(efx, intr, efx->net_dev,
1131                   "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
1132
1133        if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
1134                /* Note test interrupts */
1135                if (context->index == efx->irq_level)
1136                        efx->last_irq_cpu = raw_smp_processor_id();
1137
1138                /* Schedule processing of the channel */
1139                efx_schedule_channel_irq(efx->channel[context->index]);
1140        }
1141
1142        return IRQ_HANDLED;
1143}
1144
1145static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
1146{
1147        struct efx_nic *efx = dev_id;
1148        bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1149        struct efx_channel *channel;
1150        efx_dword_t reg;
1151        u32 queues;
1152
1153        /* Read the ISR which also ACKs the interrupts */
1154        efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
1155        queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
1156
1157        if (queues == 0)
1158                return IRQ_NONE;
1159
1160        if (likely(soft_enabled)) {
1161                /* Note test interrupts */
1162                if (queues & (1U << efx->irq_level))
1163                        efx->last_irq_cpu = raw_smp_processor_id();
1164
1165                efx_for_each_channel(channel, efx) {
1166                        if (queues & 1)
1167                                efx_schedule_channel_irq(channel);
1168                        queues >>= 1;
1169                }
1170        }
1171
1172        netif_vdbg(efx, intr, efx->net_dev,
1173                   "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1174                   irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1175
1176        return IRQ_HANDLED;
1177}
1178
1179static void efx_ef10_irq_test_generate(struct efx_nic *efx)
1180{
1181        MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
1182
1183        BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
1184
1185        MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
1186        (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
1187                            inbuf, sizeof(inbuf), NULL, 0, NULL);
1188}
1189
1190static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
1191{
1192        return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
1193                                    (tx_queue->ptr_mask + 1) *
1194                                    sizeof(efx_qword_t),
1195                                    GFP_KERNEL);
1196}
1197
1198/* This writes to the TX_DESC_WPTR and also pushes data */
1199static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
1200                                         const efx_qword_t *txd)
1201{
1202        unsigned int write_ptr;
1203        efx_oword_t reg;
1204
1205        write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1206        EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
1207        reg.qword[0] = *txd;
1208        efx_writeo_page(tx_queue->efx, &reg,
1209                        ER_DZ_TX_DESC_UPD, tx_queue->queue);
1210}
1211
1212static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
1213{
1214        MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1215                                                       EFX_BUF_SIZE));
1216        MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
1217        bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
1218        size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
1219        struct efx_channel *channel = tx_queue->channel;
1220        struct efx_nic *efx = tx_queue->efx;
1221        size_t inlen, outlen;
1222        dma_addr_t dma_addr;
1223        efx_qword_t *txd;
1224        int rc;
1225        int i;
1226
1227        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
1228        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
1229        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
1230        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
1231        MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
1232                              INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
1233                              INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
1234        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
1235        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1236
1237        dma_addr = tx_queue->txd.buf.dma_addr;
1238
1239        netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
1240                  tx_queue->queue, entries, (u64)dma_addr);
1241
1242        for (i = 0; i < entries; ++i) {
1243                MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
1244                dma_addr += EFX_BUF_SIZE;
1245        }
1246
1247        inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
1248
1249        rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
1250                          outbuf, sizeof(outbuf), &outlen);
1251        if (rc)
1252                goto fail;
1253
1254        /* A previous user of this TX queue might have set us up the
1255         * bomb by writing a descriptor to the TX push collector but
1256         * not the doorbell.  (Each collector belongs to a port, not a
1257         * queue or function, so cannot easily be reset.)  We must
1258         * attempt to push a no-op descriptor in its place.
1259         */
1260        tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
1261        tx_queue->insert_count = 1;
1262        txd = efx_tx_desc(tx_queue, 0);
1263        EFX_POPULATE_QWORD_4(*txd,
1264                             ESF_DZ_TX_DESC_IS_OPT, true,
1265                             ESF_DZ_TX_OPTION_TYPE,
1266                             ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
1267                             ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
1268                             ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
1269        tx_queue->write_count = 1;
1270        wmb();
1271        efx_ef10_push_tx_desc(tx_queue, txd);
1272
1273        return;
1274
1275fail:
1276        netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
1277                    tx_queue->queue);
1278}
1279
1280static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
1281{
1282        MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
1283        MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
1284        struct efx_nic *efx = tx_queue->efx;
1285        size_t outlen;
1286        int rc;
1287
1288        MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
1289                       tx_queue->queue);
1290
1291        rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
1292                          outbuf, sizeof(outbuf), &outlen);
1293
1294        if (rc && rc != -EALREADY)
1295                goto fail;
1296
1297        return;
1298
1299fail:
1300        efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
1301                               outbuf, outlen, rc);
1302}
1303
1304static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
1305{
1306        efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
1307}
1308
1309/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
1310static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
1311{
1312        unsigned int write_ptr;
1313        efx_dword_t reg;
1314
1315        write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1316        EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
1317        efx_writed_page(tx_queue->efx, &reg,
1318                        ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
1319}
1320
1321static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
1322{
1323        unsigned int old_write_count = tx_queue->write_count;
1324        struct efx_tx_buffer *buffer;
1325        unsigned int write_ptr;
1326        efx_qword_t *txd;
1327
1328        BUG_ON(tx_queue->write_count == tx_queue->insert_count);
1329
1330        do {
1331                write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1332                buffer = &tx_queue->buffer[write_ptr];
1333                txd = efx_tx_desc(tx_queue, write_ptr);
1334                ++tx_queue->write_count;
1335
1336                /* Create TX descriptor ring entry */
1337                if (buffer->flags & EFX_TX_BUF_OPTION) {
1338                        *txd = buffer->option;
1339                } else {
1340                        BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
1341                        EFX_POPULATE_QWORD_3(
1342                                *txd,
1343                                ESF_DZ_TX_KER_CONT,
1344                                buffer->flags & EFX_TX_BUF_CONT,
1345                                ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
1346                                ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
1347                }
1348        } while (tx_queue->write_count != tx_queue->insert_count);
1349
1350        wmb(); /* Ensure descriptors are written before they are fetched */
1351
1352        if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
1353                txd = efx_tx_desc(tx_queue,
1354                                  old_write_count & tx_queue->ptr_mask);
1355                efx_ef10_push_tx_desc(tx_queue, txd);
1356                ++tx_queue->pushes;
1357        } else {
1358                efx_ef10_notify_tx_desc(tx_queue);
1359        }
1360}
1361
1362static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
1363{
1364        MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
1365        MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
1366        size_t outlen;
1367        int rc;
1368
1369        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
1370                       EVB_PORT_ID_ASSIGNED);
1371        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
1372                       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
1373        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
1374                       EFX_MAX_CHANNELS);
1375
1376        rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
1377                outbuf, sizeof(outbuf), &outlen);
1378        if (rc != 0)
1379                return rc;
1380
1381        if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
1382                return -EIO;
1383
1384        *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
1385
1386        return 0;
1387}
1388
1389static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
1390{
1391        MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
1392        int rc;
1393
1394        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
1395                       context);
1396
1397        rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
1398                            NULL, 0, NULL);
1399        WARN_ON(rc != 0);
1400}
1401
1402static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
1403{
1404        MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1405        MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1406        int i, rc;
1407
1408        MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1409                       context);
1410        BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1411                     MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1412
1413        for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1414                MCDI_PTR(tablebuf,
1415                         RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
1416                                (u8) efx->rx_indir_table[i];
1417
1418        rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1419                          sizeof(tablebuf), NULL, 0, NULL);
1420        if (rc != 0)
1421                return rc;
1422
1423        MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
1424                       context);
1425        BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
1426                     MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
1427        for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
1428                MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
1429                        efx->rx_hash_key[i];
1430
1431        return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
1432                            sizeof(keybuf), NULL, 0, NULL);
1433}
1434
1435static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
1436{
1437        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1438
1439        if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
1440                efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
1441        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1442}
1443
1444static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
1445{
1446        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1447        int rc;
1448
1449        netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
1450
1451        if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
1452                rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
1453                if (rc != 0)
1454                        goto fail;
1455        }
1456
1457        rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
1458        if (rc != 0)
1459                goto fail;
1460
1461        return;
1462
1463fail:
1464        netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1465}
1466
1467static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
1468{
1469        return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
1470                                    (rx_queue->ptr_mask + 1) *
1471                                    sizeof(efx_qword_t),
1472                                    GFP_KERNEL);
1473}
1474
1475static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
1476{
1477        MCDI_DECLARE_BUF(inbuf,
1478                         MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1479                                                EFX_BUF_SIZE));
1480        MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
1481        struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1482        size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
1483        struct efx_nic *efx = rx_queue->efx;
1484        size_t inlen, outlen;
1485        dma_addr_t dma_addr;
1486        int rc;
1487        int i;
1488
1489        rx_queue->scatter_n = 0;
1490        rx_queue->scatter_len = 0;
1491
1492        MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
1493        MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
1494        MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
1495        MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
1496                       efx_rx_queue_index(rx_queue));
1497        MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
1498                              INIT_RXQ_IN_FLAG_PREFIX, 1,
1499                              INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
1500        MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
1501        MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1502
1503        dma_addr = rx_queue->rxd.buf.dma_addr;
1504
1505        netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
1506                  efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
1507
1508        for (i = 0; i < entries; ++i) {
1509                MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
1510                dma_addr += EFX_BUF_SIZE;
1511        }
1512
1513        inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
1514
1515        rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
1516                          outbuf, sizeof(outbuf), &outlen);
1517        if (rc)
1518                netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
1519                            efx_rx_queue_index(rx_queue));
1520}
1521
1522static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
1523{
1524        MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
1525        MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
1526        struct efx_nic *efx = rx_queue->efx;
1527        size_t outlen;
1528        int rc;
1529
1530        MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
1531                       efx_rx_queue_index(rx_queue));
1532
1533        rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
1534                          outbuf, sizeof(outbuf), &outlen);
1535
1536        if (rc && rc != -EALREADY)
1537                goto fail;
1538
1539        return;
1540
1541fail:
1542        efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
1543                               outbuf, outlen, rc);
1544}
1545
1546static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
1547{
1548        efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
1549}
1550
1551/* This creates an entry in the RX descriptor queue */
1552static inline void
1553efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
1554{
1555        struct efx_rx_buffer *rx_buf;
1556        efx_qword_t *rxd;
1557
1558        rxd = efx_rx_desc(rx_queue, index);
1559        rx_buf = efx_rx_buffer(rx_queue, index);
1560        EFX_POPULATE_QWORD_2(*rxd,
1561                             ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
1562                             ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
1563}
1564
1565static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
1566{
1567        struct efx_nic *efx = rx_queue->efx;
1568        unsigned int write_count;
1569        efx_dword_t reg;
1570
1571        /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
1572        write_count = rx_queue->added_count & ~7;
1573        if (rx_queue->notified_count == write_count)
1574                return;
1575
1576        do
1577                efx_ef10_build_rx_desc(
1578                        rx_queue,
1579                        rx_queue->notified_count & rx_queue->ptr_mask);
1580        while (++rx_queue->notified_count != write_count);
1581
1582        wmb();
1583        EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
1584                             write_count & rx_queue->ptr_mask);
1585        efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
1586                        efx_rx_queue_index(rx_queue));
1587}
1588
1589static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
1590
1591static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
1592{
1593        struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1594        MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1595        efx_qword_t event;
1596
1597        EFX_POPULATE_QWORD_2(event,
1598                             ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1599                             ESF_DZ_EV_DATA, EFX_EF10_REFILL);
1600
1601        MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1602
1603        /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1604         * already swapped the data to little-endian order.
1605         */
1606        memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1607               sizeof(efx_qword_t));
1608
1609        efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
1610                           inbuf, sizeof(inbuf), 0,
1611                           efx_ef10_rx_defer_refill_complete, 0);
1612}
1613
1614static void
1615efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
1616                                  int rc, efx_dword_t *outbuf,
1617                                  size_t outlen_actual)
1618{
1619        /* nothing to do */
1620}
1621
1622static int efx_ef10_ev_probe(struct efx_channel *channel)
1623{
1624        return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
1625                                    (channel->eventq_mask + 1) *
1626                                    sizeof(efx_qword_t),
1627                                    GFP_KERNEL);
1628}
1629
1630static int efx_ef10_ev_init(struct efx_channel *channel)
1631{
1632        MCDI_DECLARE_BUF(inbuf,
1633                         MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
1634                                                EFX_BUF_SIZE));
1635        MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
1636        size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
1637        struct efx_nic *efx = channel->efx;
1638        struct efx_ef10_nic_data *nic_data;
1639        bool supports_rx_merge;
1640        size_t inlen, outlen;
1641        dma_addr_t dma_addr;
1642        int rc;
1643        int i;
1644
1645        nic_data = efx->nic_data;
1646        supports_rx_merge =
1647                !!(nic_data->datapath_caps &
1648                   1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
1649
1650        /* Fill event queue with all ones (i.e. empty events) */
1651        memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1652
1653        MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
1654        MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
1655        /* INIT_EVQ expects index in vector table, not absolute */
1656        MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
1657        MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
1658                              INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
1659                              INIT_EVQ_IN_FLAG_RX_MERGE, 1,
1660                              INIT_EVQ_IN_FLAG_TX_MERGE, 1,
1661                              INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
1662        MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
1663                       MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
1664        MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
1665        MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
1666        MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
1667                       MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
1668        MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
1669
1670        dma_addr = channel->eventq.buf.dma_addr;
1671        for (i = 0; i < entries; ++i) {
1672                MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
1673                dma_addr += EFX_BUF_SIZE;
1674        }
1675
1676        inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
1677
1678        rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
1679                          outbuf, sizeof(outbuf), &outlen);
1680        /* IRQ return is ignored */
1681        return rc;
1682}
1683
1684static void efx_ef10_ev_fini(struct efx_channel *channel)
1685{
1686        MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
1687        MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
1688        struct efx_nic *efx = channel->efx;
1689        size_t outlen;
1690        int rc;
1691
1692        MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
1693
1694        rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
1695                          outbuf, sizeof(outbuf), &outlen);
1696
1697        if (rc && rc != -EALREADY)
1698                goto fail;
1699
1700        return;
1701
1702fail:
1703        efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
1704                               outbuf, outlen, rc);
1705}
1706
1707static void efx_ef10_ev_remove(struct efx_channel *channel)
1708{
1709        efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
1710}
1711
1712static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
1713                                           unsigned int rx_queue_label)
1714{
1715        struct efx_nic *efx = rx_queue->efx;
1716
1717        netif_info(efx, hw, efx->net_dev,
1718                   "rx event arrived on queue %d labeled as queue %u\n",
1719                   efx_rx_queue_index(rx_queue), rx_queue_label);
1720
1721        efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1722}
1723
1724static void
1725efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
1726                             unsigned int actual, unsigned int expected)
1727{
1728        unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
1729        struct efx_nic *efx = rx_queue->efx;
1730
1731        netif_info(efx, hw, efx->net_dev,
1732                   "dropped %d events (index=%d expected=%d)\n",
1733                   dropped, actual, expected);
1734
1735        efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1736}
1737
1738/* partially received RX was aborted. clean up. */
1739static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
1740{
1741        unsigned int rx_desc_ptr;
1742
1743        netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
1744                  "scattered RX aborted (dropping %u buffers)\n",
1745                  rx_queue->scatter_n);
1746
1747        rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
1748
1749        efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
1750                      0, EFX_RX_PKT_DISCARD);
1751
1752        rx_queue->removed_count += rx_queue->scatter_n;
1753        rx_queue->scatter_n = 0;
1754        rx_queue->scatter_len = 0;
1755        ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
1756}
1757
1758static int efx_ef10_handle_rx_event(struct efx_channel *channel,
1759                                    const efx_qword_t *event)
1760{
1761        unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
1762        unsigned int n_descs, n_packets, i;
1763        struct efx_nic *efx = channel->efx;
1764        struct efx_rx_queue *rx_queue;
1765        bool rx_cont;
1766        u16 flags = 0;
1767
1768        if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1769                return 0;
1770
1771        /* Basic packet information */
1772        rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
1773        next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
1774        rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
1775        rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
1776        rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
1777
1778        if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
1779                netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
1780                            EFX_QWORD_FMT "\n",
1781                            EFX_QWORD_VAL(*event));
1782
1783        rx_queue = efx_channel_get_rx_queue(channel);
1784
1785        if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
1786                efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
1787
1788        n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
1789                   ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1790
1791        if (n_descs != rx_queue->scatter_n + 1) {
1792                struct efx_ef10_nic_data *nic_data = efx->nic_data;
1793
1794                /* detect rx abort */
1795                if (unlikely(n_descs == rx_queue->scatter_n)) {
1796                        if (rx_queue->scatter_n == 0 || rx_bytes != 0)
1797                                netdev_WARN(efx->net_dev,
1798                                            "invalid RX abort: scatter_n=%u event="
1799                                            EFX_QWORD_FMT "\n",
1800                                            rx_queue->scatter_n,
1801                                            EFX_QWORD_VAL(*event));
1802                        efx_ef10_handle_rx_abort(rx_queue);
1803                        return 0;
1804                }
1805
1806                /* Check that RX completion merging is valid, i.e.
1807                 * the current firmware supports it and this is a
1808                 * non-scattered packet.
1809                 */
1810                if (!(nic_data->datapath_caps &
1811                      (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
1812                    rx_queue->scatter_n != 0 || rx_cont) {
1813                        efx_ef10_handle_rx_bad_lbits(
1814                                rx_queue, next_ptr_lbits,
1815                                (rx_queue->removed_count +
1816                                 rx_queue->scatter_n + 1) &
1817                                ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1818                        return 0;
1819                }
1820
1821                /* Merged completion for multiple non-scattered packets */
1822                rx_queue->scatter_n = 1;
1823                rx_queue->scatter_len = 0;
1824                n_packets = n_descs;
1825                ++channel->n_rx_merge_events;
1826                channel->n_rx_merge_packets += n_packets;
1827                flags |= EFX_RX_PKT_PREFIX_LEN;
1828        } else {
1829                ++rx_queue->scatter_n;
1830                rx_queue->scatter_len += rx_bytes;
1831                if (rx_cont)
1832                        return 0;
1833                n_packets = 1;
1834        }
1835
1836        if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
1837                flags |= EFX_RX_PKT_DISCARD;
1838
1839        if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
1840                channel->n_rx_ip_hdr_chksum_err += n_packets;
1841        } else if (unlikely(EFX_QWORD_FIELD(*event,
1842                                            ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
1843                channel->n_rx_tcp_udp_chksum_err += n_packets;
1844        } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
1845                   rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
1846                flags |= EFX_RX_PKT_CSUMMED;
1847        }
1848
1849        if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
1850                flags |= EFX_RX_PKT_TCP;
1851
1852        channel->irq_mod_score += 2 * n_packets;
1853
1854        /* Handle received packet(s) */
1855        for (i = 0; i < n_packets; i++) {
1856                efx_rx_packet(rx_queue,
1857                              rx_queue->removed_count & rx_queue->ptr_mask,
1858                              rx_queue->scatter_n, rx_queue->scatter_len,
1859                              flags);
1860                rx_queue->removed_count += rx_queue->scatter_n;
1861        }
1862
1863        rx_queue->scatter_n = 0;
1864        rx_queue->scatter_len = 0;
1865
1866        return n_packets;
1867}
1868
1869static int
1870efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
1871{
1872        struct efx_nic *efx = channel->efx;
1873        struct efx_tx_queue *tx_queue;
1874        unsigned int tx_ev_desc_ptr;
1875        unsigned int tx_ev_q_label;
1876        int tx_descs = 0;
1877
1878        if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1879                return 0;
1880
1881        if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
1882                return 0;
1883
1884        /* Transmit completion */
1885        tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
1886        tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
1887        tx_queue = efx_channel_get_tx_queue(channel,
1888                                            tx_ev_q_label % EFX_TXQ_TYPES);
1889        tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
1890                    tx_queue->ptr_mask);
1891        efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
1892
1893        return tx_descs;
1894}
1895
1896static void
1897efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1898{
1899        struct efx_nic *efx = channel->efx;
1900        int subcode;
1901
1902        subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
1903
1904        switch (subcode) {
1905        case ESE_DZ_DRV_TIMER_EV:
1906        case ESE_DZ_DRV_WAKE_UP_EV:
1907                break;
1908        case ESE_DZ_DRV_START_UP_EV:
1909                /* event queue init complete. ok. */
1910                break;
1911        default:
1912                netif_err(efx, hw, efx->net_dev,
1913                          "channel %d unknown driver event type %d"
1914                          " (data " EFX_QWORD_FMT ")\n",
1915                          channel->channel, subcode,
1916                          EFX_QWORD_VAL(*event));
1917
1918        }
1919}
1920
1921static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
1922                                                   efx_qword_t *event)
1923{
1924        struct efx_nic *efx = channel->efx;
1925        u32 subcode;
1926
1927        subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
1928
1929        switch (subcode) {
1930        case EFX_EF10_TEST:
1931                channel->event_test_cpu = raw_smp_processor_id();
1932                break;
1933        case EFX_EF10_REFILL:
1934                /* The queue must be empty, so we won't receive any rx
1935                 * events, so efx_process_channel() won't refill the
1936                 * queue. Refill it here
1937                 */
1938                efx_fast_push_rx_descriptors(&channel->rx_queue, true);
1939                break;
1940        default:
1941                netif_err(efx, hw, efx->net_dev,
1942                          "channel %d unknown driver event type %u"
1943                          " (data " EFX_QWORD_FMT ")\n",
1944                          channel->channel, (unsigned) subcode,
1945                          EFX_QWORD_VAL(*event));
1946        }
1947}
1948
1949static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
1950{
1951        struct efx_nic *efx = channel->efx;
1952        efx_qword_t event, *p_event;
1953        unsigned int read_ptr;
1954        int ev_code;
1955        int tx_descs = 0;
1956        int spent = 0;
1957
1958        read_ptr = channel->eventq_read_ptr;
1959
1960        for (;;) {
1961                p_event = efx_event(channel, read_ptr);
1962                event = *p_event;
1963
1964                if (!efx_event_present(&event))
1965                        break;
1966
1967                EFX_SET_QWORD(*p_event);
1968
1969                ++read_ptr;
1970
1971                ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
1972
1973                netif_vdbg(efx, drv, efx->net_dev,
1974                           "processing event on %d " EFX_QWORD_FMT "\n",
1975                           channel->channel, EFX_QWORD_VAL(event));
1976
1977                switch (ev_code) {
1978                case ESE_DZ_EV_CODE_MCDI_EV:
1979                        efx_mcdi_process_event(channel, &event);
1980                        break;
1981                case ESE_DZ_EV_CODE_RX_EV:
1982                        spent += efx_ef10_handle_rx_event(channel, &event);
1983                        if (spent >= quota) {
1984                                /* XXX can we split a merged event to
1985                                 * avoid going over-quota?
1986                                 */
1987                                spent = quota;
1988                                goto out;
1989                        }
1990                        break;
1991                case ESE_DZ_EV_CODE_TX_EV:
1992                        tx_descs += efx_ef10_handle_tx_event(channel, &event);
1993                        if (tx_descs > efx->txq_entries) {
1994                                spent = quota;
1995                                goto out;
1996                        } else if (++spent == quota) {
1997                                goto out;
1998                        }
1999                        break;
2000                case ESE_DZ_EV_CODE_DRIVER_EV:
2001                        efx_ef10_handle_driver_event(channel, &event);
2002                        if (++spent == quota)
2003                                goto out;
2004                        break;
2005                case EFX_EF10_DRVGEN_EV:
2006                        efx_ef10_handle_driver_generated_event(channel, &event);
2007                        break;
2008                default:
2009                        netif_err(efx, hw, efx->net_dev,
2010                                  "channel %d unknown event type %d"
2011                                  " (data " EFX_QWORD_FMT ")\n",
2012                                  channel->channel, ev_code,
2013                                  EFX_QWORD_VAL(event));
2014                }
2015        }
2016
2017out:
2018        channel->eventq_read_ptr = read_ptr;
2019        return spent;
2020}
2021
2022static void efx_ef10_ev_read_ack(struct efx_channel *channel)
2023{
2024        struct efx_nic *efx = channel->efx;
2025        efx_dword_t rptr;
2026
2027        if (EFX_EF10_WORKAROUND_35388(efx)) {
2028                BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
2029                             (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
2030                BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
2031                             (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
2032
2033                EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2034                                     EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
2035                                     ERF_DD_EVQ_IND_RPTR,
2036                                     (channel->eventq_read_ptr &
2037                                      channel->eventq_mask) >>
2038                                     ERF_DD_EVQ_IND_RPTR_WIDTH);
2039                efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2040                                channel->channel);
2041                EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2042                                     EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
2043                                     ERF_DD_EVQ_IND_RPTR,
2044                                     channel->eventq_read_ptr &
2045                                     ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
2046                efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2047                                channel->channel);
2048        } else {
2049                EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
2050                                     channel->eventq_read_ptr &
2051                                     channel->eventq_mask);
2052                efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
2053        }
2054}
2055
2056static void efx_ef10_ev_test_generate(struct efx_channel *channel)
2057{
2058        MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2059        struct efx_nic *efx = channel->efx;
2060        efx_qword_t event;
2061        int rc;
2062
2063        EFX_POPULATE_QWORD_2(event,
2064                             ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2065                             ESF_DZ_EV_DATA, EFX_EF10_TEST);
2066
2067        MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2068
2069        /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2070         * already swapped the data to little-endian order.
2071         */
2072        memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2073               sizeof(efx_qword_t));
2074
2075        rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
2076                          NULL, 0, NULL);
2077        if (rc != 0)
2078                goto fail;
2079
2080        return;
2081
2082fail:
2083        WARN_ON(true);
2084        netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2085}
2086
2087void efx_ef10_handle_drain_event(struct efx_nic *efx)
2088{
2089        if (atomic_dec_and_test(&efx->active_queues))
2090                wake_up(&efx->flush_wq);
2091
2092        WARN_ON(atomic_read(&efx->active_queues) < 0);
2093}
2094
2095static int efx_ef10_fini_dmaq(struct efx_nic *efx)
2096{
2097        struct efx_ef10_nic_data *nic_data = efx->nic_data;
2098        struct efx_channel *channel;
2099        struct efx_tx_queue *tx_queue;
2100        struct efx_rx_queue *rx_queue;
2101        int pending;
2102
2103        /* If the MC has just rebooted, the TX/RX queues will have already been
2104         * torn down, but efx->active_queues needs to be set to zero.
2105         */
2106        if (nic_data->must_realloc_vis) {
2107                atomic_set(&efx->active_queues, 0);
2108                return 0;
2109        }
2110
2111        /* Do not attempt to write to the NIC during EEH recovery */
2112        if (efx->state != STATE_RECOVERY) {
2113                efx_for_each_channel(channel, efx) {
2114                        efx_for_each_channel_rx_queue(rx_queue, channel)
2115                                efx_ef10_rx_fini(rx_queue);
2116                        efx_for_each_channel_tx_queue(tx_queue, channel)
2117                                efx_ef10_tx_fini(tx_queue);
2118                }
2119
2120                wait_event_timeout(efx->flush_wq,
2121                                   atomic_read(&efx->active_queues) == 0,
2122                                   msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
2123                pending = atomic_read(&efx->active_queues);
2124                if (pending) {
2125                        netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
2126                                  pending);
2127                        return -ETIMEDOUT;
2128                }
2129        }
2130
2131        return 0;
2132}
2133
2134static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
2135                                  const struct efx_filter_spec *right)
2136{
2137        if ((left->match_flags ^ right->match_flags) |
2138            ((left->flags ^ right->flags) &
2139             (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
2140                return false;
2141
2142        return memcmp(&left->outer_vid, &right->outer_vid,
2143                      sizeof(struct efx_filter_spec) -
2144                      offsetof(struct efx_filter_spec, outer_vid)) == 0;
2145}
2146
2147static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
2148{
2149        BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
2150        return jhash2((const u32 *)&spec->outer_vid,
2151                      (sizeof(struct efx_filter_spec) -
2152                       offsetof(struct efx_filter_spec, outer_vid)) / 4,
2153                      0);
2154        /* XXX should we randomise the initval? */
2155}
2156
2157/* Decide whether a filter should be exclusive or else should allow
2158 * delivery to additional recipients.  Currently we decide that
2159 * filters for specific local unicast MAC and IP addresses are
2160 * exclusive.
2161 */
2162static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
2163{
2164        if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
2165            !is_multicast_ether_addr(spec->loc_mac))
2166                return true;
2167
2168        if ((spec->match_flags &
2169             (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
2170            (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
2171                if (spec->ether_type == htons(ETH_P_IP) &&
2172                    !ipv4_is_multicast(spec->loc_host[0]))
2173                        return true;
2174                if (spec->ether_type == htons(ETH_P_IPV6) &&
2175                    ((const u8 *)spec->loc_host)[0] != 0xff)
2176                        return true;
2177        }
2178
2179        return false;
2180}
2181
2182static struct efx_filter_spec *
2183efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
2184                           unsigned int filter_idx)
2185{
2186        return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
2187                                          ~EFX_EF10_FILTER_FLAGS);
2188}
2189
2190static unsigned int
2191efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
2192                           unsigned int filter_idx)
2193{
2194        return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
2195}
2196
2197static void
2198efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
2199                          unsigned int filter_idx,
2200                          const struct efx_filter_spec *spec,
2201                          unsigned int flags)
2202{
2203        table->entry[filter_idx].spec = (unsigned long)spec | flags;
2204}
2205
2206static void efx_ef10_filter_push_prep(struct efx_nic *efx,
2207                                      const struct efx_filter_spec *spec,
2208                                      efx_dword_t *inbuf, u64 handle,
2209                                      bool replacing)
2210{
2211        struct efx_ef10_nic_data *nic_data = efx->nic_data;
2212
2213        memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
2214
2215        if (replacing) {
2216                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2217                               MC_CMD_FILTER_OP_IN_OP_REPLACE);
2218                MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
2219        } else {
2220                u32 match_fields = 0;
2221
2222                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2223                               efx_ef10_filter_is_exclusive(spec) ?
2224                               MC_CMD_FILTER_OP_IN_OP_INSERT :
2225                               MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
2226
2227                /* Convert match flags and values.  Unlike almost
2228                 * everything else in MCDI, these fields are in
2229                 * network byte order.
2230                 */
2231                if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
2232                        match_fields |=
2233                                is_multicast_ether_addr(spec->loc_mac) ?
2234                                1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
2235                                1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
2236#define COPY_FIELD(gen_flag, gen_field, mcdi_field)                          \
2237                if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) {     \
2238                        match_fields |=                                      \
2239                                1 << MC_CMD_FILTER_OP_IN_MATCH_ ##           \
2240                                mcdi_field ## _LBN;                          \
2241                        BUILD_BUG_ON(                                        \
2242                                MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
2243                                sizeof(spec->gen_field));                    \
2244                        memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
2245                               &spec->gen_field, sizeof(spec->gen_field));   \
2246                }
2247                COPY_FIELD(REM_HOST, rem_host, SRC_IP);
2248                COPY_FIELD(LOC_HOST, loc_host, DST_IP);
2249                COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
2250                COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
2251                COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
2252                COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
2253                COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
2254                COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
2255                COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
2256                COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
2257#undef COPY_FIELD
2258                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
2259                               match_fields);
2260        }
2261
2262        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
2263        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
2264                       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2265                       MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
2266                       MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
2267        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
2268                       MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
2269        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
2270                       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2271                       0 : spec->dmaq_id);
2272        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
2273                       (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
2274                       MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
2275                       MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
2276        if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
2277                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
2278                               spec->rss_context !=
2279                               EFX_FILTER_RSS_CONTEXT_DEFAULT ?
2280                               spec->rss_context : nic_data->rx_rss_context);
2281}
2282
2283static int efx_ef10_filter_push(struct efx_nic *efx,
2284                                const struct efx_filter_spec *spec,
2285                                u64 *handle, bool replacing)
2286{
2287        MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2288        MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
2289        int rc;
2290
2291        efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
2292        rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2293                          outbuf, sizeof(outbuf), NULL);
2294        if (rc == 0)
2295                *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2296        if (rc == -ENOSPC)
2297                rc = -EBUSY; /* to match efx_farch_filter_insert() */
2298        return rc;
2299}
2300
2301static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
2302                                        enum efx_filter_match_flags match_flags)
2303{
2304        unsigned int match_pri;
2305
2306        for (match_pri = 0;
2307             match_pri < table->rx_match_count;
2308             match_pri++)
2309                if (table->rx_match_flags[match_pri] == match_flags)
2310                        return match_pri;
2311
2312        return -EPROTONOSUPPORT;
2313}
2314
2315static s32 efx_ef10_filter_insert(struct efx_nic *efx,
2316                                  struct efx_filter_spec *spec,
2317                                  bool replace_equal)
2318{
2319        struct efx_ef10_filter_table *table = efx->filter_state;
2320        DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
2321        struct efx_filter_spec *saved_spec;
2322        unsigned int match_pri, hash;
2323        unsigned int priv_flags;
2324        bool replacing = false;
2325        int ins_index = -1;
2326        DEFINE_WAIT(wait);
2327        bool is_mc_recip;
2328        s32 rc;
2329
2330        /* For now, only support RX filters */
2331        if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
2332            EFX_FILTER_FLAG_RX)
2333                return -EINVAL;
2334
2335        rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
2336        if (rc < 0)
2337                return rc;
2338        match_pri = rc;
2339
2340        hash = efx_ef10_filter_hash(spec);
2341        is_mc_recip = efx_filter_is_mc_recipient(spec);
2342        if (is_mc_recip)
2343                bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
2344
2345        /* Find any existing filters with the same match tuple or
2346         * else a free slot to insert at.  If any of them are busy,
2347         * we have to wait and retry.
2348         */
2349        for (;;) {
2350                unsigned int depth = 1;
2351                unsigned int i;
2352
2353                spin_lock_bh(&efx->filter_lock);
2354
2355                for (;;) {
2356                        i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2357                        saved_spec = efx_ef10_filter_entry_spec(table, i);
2358
2359                        if (!saved_spec) {
2360                                if (ins_index < 0)
2361                                        ins_index = i;
2362                        } else if (efx_ef10_filter_equal(spec, saved_spec)) {
2363                                if (table->entry[i].spec &
2364                                    EFX_EF10_FILTER_FLAG_BUSY)
2365                                        break;
2366                                if (spec->priority < saved_spec->priority &&
2367                                    spec->priority != EFX_FILTER_PRI_AUTO) {
2368                                        rc = -EPERM;
2369                                        goto out_unlock;
2370                                }
2371                                if (!is_mc_recip) {
2372                                        /* This is the only one */
2373                                        if (spec->priority ==
2374                                            saved_spec->priority &&
2375                                            !replace_equal) {
2376                                                rc = -EEXIST;
2377                                                goto out_unlock;
2378                                        }
2379                                        ins_index = i;
2380                                        goto found;
2381                                } else if (spec->priority >
2382                                           saved_spec->priority ||
2383                                           (spec->priority ==
2384                                            saved_spec->priority &&
2385                                            replace_equal)) {
2386                                        if (ins_index < 0)
2387                                                ins_index = i;
2388                                        else
2389                                                __set_bit(depth, mc_rem_map);
2390                                }
2391                        }
2392
2393                        /* Once we reach the maximum search depth, use
2394                         * the first suitable slot or return -EBUSY if
2395                         * there was none
2396                         */
2397                        if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2398                                if (ins_index < 0) {
2399                                        rc = -EBUSY;
2400                                        goto out_unlock;
2401                                }
2402                                goto found;
2403                        }
2404
2405                        ++depth;
2406                }
2407
2408                prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2409                spin_unlock_bh(&efx->filter_lock);
2410                schedule();
2411        }
2412
2413found:
2414        /* Create a software table entry if necessary, and mark it
2415         * busy.  We might yet fail to insert, but any attempt to
2416         * insert a conflicting filter while we're waiting for the
2417         * firmware must find the busy entry.
2418         */
2419        saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2420        if (saved_spec) {
2421                if (spec->priority == EFX_FILTER_PRI_AUTO &&
2422                    saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
2423                        /* Just make sure it won't be removed */
2424                        if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
2425                                saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
2426                        table->entry[ins_index].spec &=
2427                                ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
2428                        rc = ins_index;
2429                        goto out_unlock;
2430                }
2431                replacing = true;
2432                priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
2433        } else {
2434                saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2435                if (!saved_spec) {
2436                        rc = -ENOMEM;
2437                        goto out_unlock;
2438                }
2439                *saved_spec = *spec;
2440                priv_flags = 0;
2441        }
2442        efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2443                                  priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
2444
2445        /* Mark lower-priority multicast recipients busy prior to removal */
2446        if (is_mc_recip) {
2447                unsigned int depth, i;
2448
2449                for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2450                        i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2451                        if (test_bit(depth, mc_rem_map))
2452                                table->entry[i].spec |=
2453                                        EFX_EF10_FILTER_FLAG_BUSY;
2454                }
2455        }
2456
2457        spin_unlock_bh(&efx->filter_lock);
2458
2459        rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
2460                                  replacing);
2461
2462        /* Finalise the software table entry */
2463        spin_lock_bh(&efx->filter_lock);
2464        if (rc == 0) {
2465                if (replacing) {
2466                        /* Update the fields that may differ */
2467                        if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
2468                                saved_spec->flags |=
2469                                        EFX_FILTER_FLAG_RX_OVER_AUTO;
2470                        saved_spec->priority = spec->priority;
2471                        saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
2472                        saved_spec->flags |= spec->flags;
2473                        saved_spec->rss_context = spec->rss_context;
2474                        saved_spec->dmaq_id = spec->dmaq_id;
2475                }
2476        } else if (!replacing) {
2477                kfree(saved_spec);
2478                saved_spec = NULL;
2479        }
2480        efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
2481
2482        /* Remove and finalise entries for lower-priority multicast
2483         * recipients
2484         */
2485        if (is_mc_recip) {
2486                MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2487                unsigned int depth, i;
2488
2489                memset(inbuf, 0, sizeof(inbuf));
2490
2491                for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2492                        if (!test_bit(depth, mc_rem_map))
2493                                continue;
2494
2495                        i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2496                        saved_spec = efx_ef10_filter_entry_spec(table, i);
2497                        priv_flags = efx_ef10_filter_entry_flags(table, i);
2498
2499                        if (rc == 0) {
2500                                spin_unlock_bh(&efx->filter_lock);
2501                                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2502                                               MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2503                                MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2504                                               table->entry[i].handle);
2505                                rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2506                                                  inbuf, sizeof(inbuf),
2507                                                  NULL, 0, NULL);
2508                                spin_lock_bh(&efx->filter_lock);
2509                        }
2510
2511                        if (rc == 0) {
2512                                kfree(saved_spec);
2513                                saved_spec = NULL;
2514                                priv_flags = 0;
2515                        } else {
2516                                priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
2517                        }
2518                        efx_ef10_filter_set_entry(table, i, saved_spec,
2519                                                  priv_flags);
2520                }
2521        }
2522
2523        /* If successful, return the inserted filter ID */
2524        if (rc == 0)
2525                rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
2526
2527        wake_up_all(&table->waitq);
2528out_unlock:
2529        spin_unlock_bh(&efx->filter_lock);
2530        finish_wait(&table->waitq, &wait);
2531        return rc;
2532}
2533
2534static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
2535{
2536        /* no need to do anything here on EF10 */
2537}
2538
2539/* Remove a filter.
2540 * If !by_index, remove by ID
2541 * If by_index, remove by index
2542 * Filter ID may come from userland and must be range-checked.
2543 */
2544static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2545                                           unsigned int priority_mask,
2546                                           u32 filter_id, bool by_index)
2547{
2548        unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2549        struct efx_ef10_filter_table *table = efx->filter_state;
2550        MCDI_DECLARE_BUF(inbuf,
2551                         MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2552                         MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2553        struct efx_filter_spec *spec;
2554        DEFINE_WAIT(wait);
2555        int rc;
2556
2557        /* Find the software table entry and mark it busy.  Don't
2558         * remove it yet; any attempt to update while we're waiting
2559         * for the firmware must find the busy entry.
2560         */
2561        for (;;) {
2562                spin_lock_bh(&efx->filter_lock);
2563                if (!(table->entry[filter_idx].spec &
2564                      EFX_EF10_FILTER_FLAG_BUSY))
2565                        break;
2566                prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2567                spin_unlock_bh(&efx->filter_lock);
2568                schedule();
2569        }
2570
2571        spec = efx_ef10_filter_entry_spec(table, filter_idx);
2572        if (!spec ||
2573            (!by_index &&
2574             efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
2575             filter_id / HUNT_FILTER_TBL_ROWS)) {
2576                rc = -ENOENT;
2577                goto out_unlock;
2578        }
2579
2580        if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
2581            priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
2582                /* Just remove flags */
2583                spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
2584                table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
2585                rc = 0;
2586                goto out_unlock;
2587        }
2588
2589        if (!(priority_mask & (1U << spec->priority))) {
2590                rc = -ENOENT;
2591                goto out_unlock;
2592        }
2593
2594        table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2595        spin_unlock_bh(&efx->filter_lock);
2596
2597        if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
2598                /* Reset to an automatic filter */
2599
2600                struct efx_filter_spec new_spec = *spec;
2601
2602                new_spec.priority = EFX_FILTER_PRI_AUTO;
2603                new_spec.flags = (EFX_FILTER_FLAG_RX |
2604                                  EFX_FILTER_FLAG_RX_RSS);
2605                new_spec.dmaq_id = 0;
2606                new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
2607                rc = efx_ef10_filter_push(efx, &new_spec,
2608                                          &table->entry[filter_idx].handle,
2609                                          true);
2610
2611                spin_lock_bh(&efx->filter_lock);
2612                if (rc == 0)
2613                        *spec = new_spec;
2614        } else {
2615                /* Really remove the filter */
2616
2617                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2618                               efx_ef10_filter_is_exclusive(spec) ?
2619                               MC_CMD_FILTER_OP_IN_OP_REMOVE :
2620                               MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2621                MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2622                               table->entry[filter_idx].handle);
2623                rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2624                                  inbuf, sizeof(inbuf), NULL, 0, NULL);
2625
2626                spin_lock_bh(&efx->filter_lock);
2627                if (rc == 0) {
2628                        kfree(spec);
2629                        efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2630                }
2631        }
2632
2633        table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2634        wake_up_all(&table->waitq);
2635out_unlock:
2636        spin_unlock_bh(&efx->filter_lock);
2637        finish_wait(&table->waitq, &wait);
2638        return rc;
2639}
2640
2641static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
2642                                       enum efx_filter_priority priority,
2643                                       u32 filter_id)
2644{
2645        return efx_ef10_filter_remove_internal(efx, 1U << priority,
2646                                               filter_id, false);
2647}
2648
2649static int efx_ef10_filter_get_safe(struct efx_nic *efx,
2650                                    enum efx_filter_priority priority,
2651                                    u32 filter_id, struct efx_filter_spec *spec)
2652{
2653        unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2654        struct efx_ef10_filter_table *table = efx->filter_state;
2655        const struct efx_filter_spec *saved_spec;
2656        int rc;
2657
2658        spin_lock_bh(&efx->filter_lock);
2659        saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
2660        if (saved_spec && saved_spec->priority == priority &&
2661            efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
2662            filter_id / HUNT_FILTER_TBL_ROWS) {
2663                *spec = *saved_spec;
2664                rc = 0;
2665        } else {
2666                rc = -ENOENT;
2667        }
2668        spin_unlock_bh(&efx->filter_lock);
2669        return rc;
2670}
2671
2672static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
2673                                     enum efx_filter_priority priority)
2674{
2675        unsigned int priority_mask;
2676        unsigned int i;
2677        int rc;
2678
2679        priority_mask = (((1U << (priority + 1)) - 1) &
2680                         ~(1U << EFX_FILTER_PRI_AUTO));
2681
2682        for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
2683                rc = efx_ef10_filter_remove_internal(efx, priority_mask,
2684                                                     i, true);
2685                if (rc && rc != -ENOENT)
2686                        return rc;
2687        }
2688
2689        return 0;
2690}
2691
2692static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
2693                                         enum efx_filter_priority priority)
2694{
2695        struct efx_ef10_filter_table *table = efx->filter_state;
2696        unsigned int filter_idx;
2697        s32 count = 0;
2698
2699        spin_lock_bh(&efx->filter_lock);
2700        for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2701                if (table->entry[filter_idx].spec &&
2702                    efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
2703                    priority)
2704                        ++count;
2705        }
2706        spin_unlock_bh(&efx->filter_lock);
2707        return count;
2708}
2709
2710static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
2711{
2712        struct efx_ef10_filter_table *table = efx->filter_state;
2713
2714        return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
2715}
2716
2717static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
2718                                      enum efx_filter_priority priority,
2719                                      u32 *buf, u32 size)
2720{
2721        struct efx_ef10_filter_table *table = efx->filter_state;
2722        struct efx_filter_spec *spec;
2723        unsigned int filter_idx;
2724        s32 count = 0;
2725
2726        spin_lock_bh(&efx->filter_lock);
2727        for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2728                spec = efx_ef10_filter_entry_spec(table, filter_idx);
2729                if (spec && spec->priority == priority) {
2730                        if (count == size) {
2731                                count = -EMSGSIZE;
2732                                break;
2733                        }
2734                        buf[count++] = (efx_ef10_filter_rx_match_pri(
2735                                                table, spec->match_flags) *
2736                                        HUNT_FILTER_TBL_ROWS +
2737                                        filter_idx);
2738                }
2739        }
2740        spin_unlock_bh(&efx->filter_lock);
2741        return count;
2742}
2743
2744#ifdef CONFIG_RFS_ACCEL
2745
2746static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
2747
2748static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
2749                                      struct efx_filter_spec *spec)
2750{
2751        struct efx_ef10_filter_table *table = efx->filter_state;
2752        MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2753        struct efx_filter_spec *saved_spec;
2754        unsigned int hash, i, depth = 1;
2755        bool replacing = false;
2756        int ins_index = -1;
2757        u64 cookie;
2758        s32 rc;
2759
2760        /* Must be an RX filter without RSS and not for a multicast
2761         * destination address (RFS only works for connected sockets).
2762         * These restrictions allow us to pass only a tiny amount of
2763         * data through to the completion function.
2764         */
2765        EFX_WARN_ON_PARANOID(spec->flags !=
2766                             (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
2767        EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
2768        EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
2769
2770        hash = efx_ef10_filter_hash(spec);
2771
2772        spin_lock_bh(&efx->filter_lock);
2773
2774        /* Find any existing filter with the same match tuple or else
2775         * a free slot to insert at.  If an existing filter is busy,
2776         * we have to give up.
2777         */
2778        for (;;) {
2779                i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2780                saved_spec = efx_ef10_filter_entry_spec(table, i);
2781
2782                if (!saved_spec) {
2783                        if (ins_index < 0)
2784                                ins_index = i;
2785                } else if (efx_ef10_filter_equal(spec, saved_spec)) {
2786                        if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
2787                                rc = -EBUSY;
2788                                goto fail_unlock;
2789                        }
2790                        if (spec->priority < saved_spec->priority) {
2791                                rc = -EPERM;
2792                                goto fail_unlock;
2793                        }
2794                        ins_index = i;
2795                        break;
2796                }
2797
2798                /* Once we reach the maximum search depth, use the
2799                 * first suitable slot or return -EBUSY if there was
2800                 * none
2801                 */
2802                if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2803                        if (ins_index < 0) {
2804                                rc = -EBUSY;
2805                                goto fail_unlock;
2806                        }
2807                        break;
2808                }
2809
2810                ++depth;
2811        }
2812
2813        /* Create a software table entry if necessary, and mark it
2814         * busy.  We might yet fail to insert, but any attempt to
2815         * insert a conflicting filter while we're waiting for the
2816         * firmware must find the busy entry.
2817         */
2818        saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2819        if (saved_spec) {
2820                replacing = true;
2821        } else {
2822                saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2823                if (!saved_spec) {
2824                        rc = -ENOMEM;
2825                        goto fail_unlock;
2826                }
2827                *saved_spec = *spec;
2828        }
2829        efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2830                                  EFX_EF10_FILTER_FLAG_BUSY);
2831
2832        spin_unlock_bh(&efx->filter_lock);
2833
2834        /* Pack up the variables needed on completion */
2835        cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
2836
2837        efx_ef10_filter_push_prep(efx, spec, inbuf,
2838                                  table->entry[ins_index].handle, replacing);
2839        efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2840                           MC_CMD_FILTER_OP_OUT_LEN,
2841                           efx_ef10_filter_rfs_insert_complete, cookie);
2842
2843        return ins_index;
2844
2845fail_unlock:
2846        spin_unlock_bh(&efx->filter_lock);
2847        return rc;
2848}
2849
2850static void
2851efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
2852                                    int rc, efx_dword_t *outbuf,
2853                                    size_t outlen_actual)
2854{
2855        struct efx_ef10_filter_table *table = efx->filter_state;
2856        unsigned int ins_index, dmaq_id;
2857        struct efx_filter_spec *spec;
2858        bool replacing;
2859
2860        /* Unpack the cookie */
2861        replacing = cookie >> 31;
2862        ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
2863        dmaq_id = cookie & 0xffff;
2864
2865        spin_lock_bh(&efx->filter_lock);
2866        spec = efx_ef10_filter_entry_spec(table, ins_index);
2867        if (rc == 0) {
2868                table->entry[ins_index].handle =
2869                        MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2870                if (replacing)
2871                        spec->dmaq_id = dmaq_id;
2872        } else if (!replacing) {
2873                kfree(spec);
2874                spec = NULL;
2875        }
2876        efx_ef10_filter_set_entry(table, ins_index, spec, 0);
2877        spin_unlock_bh(&efx->filter_lock);
2878
2879        wake_up_all(&table->waitq);
2880}
2881
2882static void
2883efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2884                                    unsigned long filter_idx,
2885                                    int rc, efx_dword_t *outbuf,
2886                                    size_t outlen_actual);
2887
2888static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2889                                           unsigned int filter_idx)
2890{
2891        struct efx_ef10_filter_table *table = efx->filter_state;
2892        struct efx_filter_spec *spec =
2893                efx_ef10_filter_entry_spec(table, filter_idx);
2894        MCDI_DECLARE_BUF(inbuf,
2895                         MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2896                         MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2897
2898        if (!spec ||
2899            (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
2900            spec->priority != EFX_FILTER_PRI_HINT ||
2901            !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
2902                                 flow_id, filter_idx))
2903                return false;
2904
2905        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2906                       MC_CMD_FILTER_OP_IN_OP_REMOVE);
2907        MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2908                       table->entry[filter_idx].handle);
2909        if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
2910                               efx_ef10_filter_rfs_expire_complete, filter_idx))
2911                return false;
2912
2913        table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2914        return true;
2915}
2916
2917static void
2918efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2919                                    unsigned long filter_idx,
2920                                    int rc, efx_dword_t *outbuf,
2921                                    size_t outlen_actual)
2922{
2923        struct efx_ef10_filter_table *table = efx->filter_state;
2924        struct efx_filter_spec *spec =
2925                efx_ef10_filter_entry_spec(table, filter_idx);
2926
2927        spin_lock_bh(&efx->filter_lock);
2928        if (rc == 0) {
2929                kfree(spec);
2930                efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2931        }
2932        table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2933        wake_up_all(&table->waitq);
2934        spin_unlock_bh(&efx->filter_lock);
2935}
2936
2937#endif /* CONFIG_RFS_ACCEL */
2938
2939static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
2940{
2941        int match_flags = 0;
2942
2943#define MAP_FLAG(gen_flag, mcdi_field) {                                \
2944                u32 old_mcdi_flags = mcdi_flags;                        \
2945                mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ##      \
2946                                mcdi_field ## _LBN);                    \
2947                if (mcdi_flags != old_mcdi_flags)                       \
2948                        match_flags |= EFX_FILTER_MATCH_ ## gen_flag;   \
2949        }
2950        MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
2951        MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
2952        MAP_FLAG(REM_HOST, SRC_IP);
2953        MAP_FLAG(LOC_HOST, DST_IP);
2954        MAP_FLAG(REM_MAC, SRC_MAC);
2955        MAP_FLAG(REM_PORT, SRC_PORT);
2956        MAP_FLAG(LOC_MAC, DST_MAC);
2957        MAP_FLAG(LOC_PORT, DST_PORT);
2958        MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
2959        MAP_FLAG(INNER_VID, INNER_VLAN);
2960        MAP_FLAG(OUTER_VID, OUTER_VLAN);
2961        MAP_FLAG(IP_PROTO, IP_PROTO);
2962#undef MAP_FLAG
2963
2964        /* Did we map them all? */
2965        if (mcdi_flags)
2966                return -EINVAL;
2967
2968        return match_flags;
2969}
2970
2971static int efx_ef10_filter_table_probe(struct efx_nic *efx)
2972{
2973        MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
2974        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
2975        unsigned int pd_match_pri, pd_match_count;
2976        struct efx_ef10_filter_table *table;
2977        size_t outlen;
2978        int rc;
2979
2980        table = kzalloc(sizeof(*table), GFP_KERNEL);
2981        if (!table)
2982                return -ENOMEM;
2983
2984        /* Find out which RX filter types are supported, and their priorities */
2985        MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
2986                       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
2987        rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
2988                          inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
2989                          &outlen);
2990        if (rc)
2991                goto fail;
2992        pd_match_count = MCDI_VAR_ARRAY_LEN(
2993                outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
2994        table->rx_match_count = 0;
2995
2996        for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
2997                u32 mcdi_flags =
2998                        MCDI_ARRAY_DWORD(
2999                                outbuf,
3000                                GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
3001                                pd_match_pri);
3002                rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
3003                if (rc < 0) {
3004                        netif_dbg(efx, probe, efx->net_dev,
3005                                  "%s: fw flags %#x pri %u not supported in driver\n",
3006                                  __func__, mcdi_flags, pd_match_pri);
3007                } else {
3008                        netif_dbg(efx, probe, efx->net_dev,
3009                                  "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
3010                                  __func__, mcdi_flags, pd_match_pri,
3011                                  rc, table->rx_match_count);
3012                        table->rx_match_flags[table->rx_match_count++] = rc;
3013                }
3014        }
3015
3016        table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
3017        if (!table->entry) {
3018                rc = -ENOMEM;
3019                goto fail;
3020        }
3021
3022        efx->filter_state = table;
3023        init_waitqueue_head(&table->waitq);
3024        return 0;
3025
3026fail:
3027        kfree(table);
3028        return rc;
3029}
3030
3031static void efx_ef10_filter_table_restore(struct efx_nic *efx)
3032{
3033        struct efx_ef10_filter_table *table = efx->filter_state;
3034        struct efx_ef10_nic_data *nic_data = efx->nic_data;
3035        struct efx_filter_spec *spec;
3036        unsigned int filter_idx;
3037        bool failed = false;
3038        int rc;
3039
3040        if (!nic_data->must_restore_filters)
3041                return;
3042
3043        spin_lock_bh(&efx->filter_lock);
3044
3045        for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3046                spec = efx_ef10_filter_entry_spec(table, filter_idx);
3047                if (!spec)
3048                        continue;
3049
3050                table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3051                spin_unlock_bh(&efx->filter_lock);
3052
3053                rc = efx_ef10_filter_push(efx, spec,
3054                                          &table->entry[filter_idx].handle,
3055                                          false);
3056                if (rc)
3057                        failed = true;
3058
3059                spin_lock_bh(&efx->filter_lock);
3060                if (rc) {
3061                        kfree(spec);
3062                        efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3063                } else {
3064                        table->entry[filter_idx].spec &=
3065                                ~EFX_EF10_FILTER_FLAG_BUSY;
3066                }
3067        }
3068
3069        spin_unlock_bh(&efx->filter_lock);
3070
3071        if (failed)
3072                netif_err(efx, hw, efx->net_dev,
3073                          "unable to restore all filters\n");
3074        else
3075                nic_data->must_restore_filters = false;
3076}
3077
3078static void efx_ef10_filter_table_remove(struct efx_nic *efx)
3079{
3080        struct efx_ef10_filter_table *table = efx->filter_state;
3081        MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3082        struct efx_filter_spec *spec;
3083        unsigned int filter_idx;
3084        int rc;
3085
3086        for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3087                spec = efx_ef10_filter_entry_spec(table, filter_idx);
3088                if (!spec)
3089                        continue;
3090
3091                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3092                               efx_ef10_filter_is_exclusive(spec) ?
3093                               MC_CMD_FILTER_OP_IN_OP_REMOVE :
3094                               MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3095                MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3096                               table->entry[filter_idx].handle);
3097                rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3098                                  NULL, 0, NULL);
3099                if (rc)
3100                        netdev_WARN(efx->net_dev,
3101                                    "filter_idx=%#x handle=%#llx\n",
3102                                    filter_idx,
3103                                    table->entry[filter_idx].handle);
3104                kfree(spec);
3105        }
3106
3107        vfree(table->entry);
3108        kfree(table);
3109}
3110
3111static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3112{
3113        struct efx_ef10_filter_table *table = efx->filter_state;
3114        struct net_device *net_dev = efx->net_dev;
3115        struct efx_filter_spec spec;
3116        bool remove_failed = false;
3117        struct netdev_hw_addr *uc;
3118        struct netdev_hw_addr *mc;
3119        unsigned int filter_idx;
3120        int i, n, rc;
3121
3122        if (!efx_dev_registered(efx))
3123                return;
3124
3125        /* Mark old filters that may need to be removed */
3126        spin_lock_bh(&efx->filter_lock);
3127        n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
3128        for (i = 0; i < n; i++) {
3129                filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
3130                table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
3131        }
3132        n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
3133        for (i = 0; i < n; i++) {
3134                filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
3135                table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
3136        }
3137        spin_unlock_bh(&efx->filter_lock);
3138
3139        /* Copy/convert the address lists; add the primary station
3140         * address and broadcast address
3141         */
3142        netif_addr_lock_bh(net_dev);
3143        if (net_dev->flags & IFF_PROMISC ||
3144            netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
3145                table->dev_uc_count = -1;
3146        } else {
3147                table->dev_uc_count = 1 + netdev_uc_count(net_dev);
3148                memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr,
3149                       ETH_ALEN);
3150                i = 1;
3151                netdev_for_each_uc_addr(uc, net_dev) {
3152                        memcpy(table->dev_uc_list[i].addr,
3153                               uc->addr, ETH_ALEN);
3154                        i++;
3155                }
3156        }
3157        if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
3158            netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
3159                table->dev_mc_count = -1;
3160        } else {
3161                table->dev_mc_count = 1 + netdev_mc_count(net_dev);
3162                eth_broadcast_addr(table->dev_mc_list[0].addr);
3163                i = 1;
3164                netdev_for_each_mc_addr(mc, net_dev) {
3165                        memcpy(table->dev_mc_list[i].addr,
3166                               mc->addr, ETH_ALEN);
3167                        i++;
3168                }
3169        }
3170        netif_addr_unlock_bh(net_dev);
3171
3172        /* Insert/renew unicast filters */
3173        if (table->dev_uc_count >= 0) {
3174                for (i = 0; i < table->dev_uc_count; i++) {
3175                        efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3176                                           EFX_FILTER_FLAG_RX_RSS,
3177                                           0);
3178                        efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3179                                                 table->dev_uc_list[i].addr);
3180                        rc = efx_ef10_filter_insert(efx, &spec, true);
3181                        if (rc < 0) {
3182                                /* Fall back to unicast-promisc */
3183                                while (i--)
3184                                        efx_ef10_filter_remove_safe(
3185                                                efx, EFX_FILTER_PRI_AUTO,
3186                                                table->dev_uc_list[i].id);
3187                                table->dev_uc_count = -1;
3188                                break;
3189                        }
3190                        table->dev_uc_list[i].id = rc;
3191                }
3192        }
3193        if (table->dev_uc_count < 0) {
3194                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3195                                   EFX_FILTER_FLAG_RX_RSS,
3196                                   0);
3197                efx_filter_set_uc_def(&spec);
3198                rc = efx_ef10_filter_insert(efx, &spec, true);
3199                if (rc < 0) {
3200                        WARN_ON(1);
3201                        table->dev_uc_count = 0;
3202                } else {
3203                        table->dev_uc_list[0].id = rc;
3204                }
3205        }
3206
3207        /* Insert/renew multicast filters */
3208        if (table->dev_mc_count >= 0) {
3209                for (i = 0; i < table->dev_mc_count; i++) {
3210                        efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3211                                           EFX_FILTER_FLAG_RX_RSS,
3212                                           0);
3213                        efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3214                                                 table->dev_mc_list[i].addr);
3215                        rc = efx_ef10_filter_insert(efx, &spec, true);
3216                        if (rc < 0) {
3217                                /* Fall back to multicast-promisc */
3218                                while (i--)
3219                                        efx_ef10_filter_remove_safe(
3220                                                efx, EFX_FILTER_PRI_AUTO,
3221                                                table->dev_mc_list[i].id);
3222                                table->dev_mc_count = -1;
3223                                break;
3224                        }
3225                        table->dev_mc_list[i].id = rc;
3226                }
3227        }
3228        if (table->dev_mc_count < 0) {
3229                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
3230                                   EFX_FILTER_FLAG_RX_RSS,
3231                                   0);
3232                efx_filter_set_mc_def(&spec);
3233                rc = efx_ef10_filter_insert(efx, &spec, true);
3234                if (rc < 0) {
3235                        WARN_ON(1);
3236                        table->dev_mc_count = 0;
3237                } else {
3238                        table->dev_mc_list[0].id = rc;
3239                }
3240        }
3241
3242        /* Remove filters that weren't renewed.  Since nothing else
3243         * changes the AUTO_OLD flag or removes these filters, we
3244         * don't need to hold the filter_lock while scanning for
3245         * these filters.
3246         */
3247        for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
3248                if (ACCESS_ONCE(table->entry[i].spec) &
3249                    EFX_EF10_FILTER_FLAG_AUTO_OLD) {
3250                        if (efx_ef10_filter_remove_internal(
3251                                    efx, 1U << EFX_FILTER_PRI_AUTO,
3252                                    i, true) < 0)
3253                                remove_failed = true;
3254                }
3255        }
3256        WARN_ON(remove_failed);
3257}
3258
3259static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
3260{
3261        efx_ef10_filter_sync_rx_mode(efx);
3262
3263        return efx_mcdi_set_mac(efx);
3264}
3265
3266static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
3267{
3268        MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
3269
3270        MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
3271        return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
3272                            NULL, 0, NULL);
3273}
3274
3275/* MC BISTs follow a different poll mechanism to phy BISTs.
3276 * The BIST is done in the poll handler on the MC, and the MCDI command
3277 * will block until the BIST is done.
3278 */
3279static int efx_ef10_poll_bist(struct efx_nic *efx)
3280{
3281        int rc;
3282        MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
3283        size_t outlen;
3284        u32 result;
3285
3286        rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
3287                           outbuf, sizeof(outbuf), &outlen);
3288        if (rc != 0)
3289                return rc;
3290
3291        if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
3292                return -EIO;
3293
3294        result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
3295        switch (result) {
3296        case MC_CMD_POLL_BIST_PASSED:
3297                netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
3298                return 0;
3299        case MC_CMD_POLL_BIST_TIMEOUT:
3300                netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
3301                return -EIO;
3302        case MC_CMD_POLL_BIST_FAILED:
3303                netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
3304                return -EIO;
3305        default:
3306                netif_err(efx, hw, efx->net_dev,
3307                          "BIST returned unknown result %u", result);
3308                return -EIO;
3309        }
3310}
3311
3312static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
3313{
3314        int rc;
3315
3316        netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
3317
3318        rc = efx_ef10_start_bist(efx, bist_type);
3319        if (rc != 0)
3320                return rc;
3321
3322        return efx_ef10_poll_bist(efx);
3323}
3324
3325static int
3326efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
3327{
3328        int rc, rc2;
3329
3330        efx_reset_down(efx, RESET_TYPE_WORLD);
3331
3332        rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
3333                          NULL, 0, NULL, 0, NULL);
3334        if (rc != 0)
3335                goto out;
3336
3337        tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
3338        tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
3339
3340        rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
3341
3342out:
3343        rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
3344        return rc ? rc : rc2;
3345}
3346
3347#ifdef CONFIG_SFC_MTD
3348
3349struct efx_ef10_nvram_type_info {
3350        u16 type, type_mask;
3351        u8 port;
3352        const char *name;
3353};
3354
3355static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
3356        { NVRAM_PARTITION_TYPE_MC_FIRMWARE,        0,    0, "sfc_mcfw" },
3357        { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0,    0, "sfc_mcfw_backup" },
3358        { NVRAM_PARTITION_TYPE_EXPANSION_ROM,      0,    0, "sfc_exp_rom" },
3359        { NVRAM_PARTITION_TYPE_STATIC_CONFIG,      0,    0, "sfc_static_cfg" },
3360        { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,     0,    0, "sfc_dynamic_cfg" },
3361        { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0,   0, "sfc_exp_rom_cfg" },
3362        { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0,   1, "sfc_exp_rom_cfg" },
3363        { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0,   2, "sfc_exp_rom_cfg" },
3364        { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0,   3, "sfc_exp_rom_cfg" },
3365        { NVRAM_PARTITION_TYPE_LICENSE,            0,    0, "sfc_license" },
3366        { NVRAM_PARTITION_TYPE_PHY_MIN,            0xff, 0, "sfc_phy_fw" },
3367};
3368
3369static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
3370                                        struct efx_mcdi_mtd_partition *part,
3371                                        unsigned int type)
3372{
3373        MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
3374        MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
3375        const struct efx_ef10_nvram_type_info *info;
3376        size_t size, erase_size, outlen;
3377        bool protected;
3378        int rc;
3379
3380        for (info = efx_ef10_nvram_types; ; info++) {
3381                if (info ==
3382                    efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
3383                        return -ENODEV;
3384                if ((type & ~info->type_mask) == info->type)
3385                        break;
3386        }
3387        if (info->port != efx_port_num(efx))
3388                return -ENODEV;
3389
3390        rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
3391        if (rc)
3392                return rc;
3393        if (protected)
3394                return -ENODEV; /* hide it */
3395
3396        part->nvram_type = type;
3397
3398        MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
3399        rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
3400                          outbuf, sizeof(outbuf), &outlen);
3401        if (rc)
3402                return rc;
3403        if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
3404                return -EIO;
3405        if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
3406            (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
3407                part->fw_subtype = MCDI_DWORD(outbuf,
3408                                              NVRAM_METADATA_OUT_SUBTYPE);
3409
3410        part->common.dev_type_name = "EF10 NVRAM manager";
3411        part->common.type_name = info->name;
3412
3413        part->common.mtd.type = MTD_NORFLASH;
3414        part->common.mtd.flags = MTD_CAP_NORFLASH;
3415        part->common.mtd.size = size;
3416        part->common.mtd.erasesize = erase_size;
3417
3418        return 0;
3419}
3420
3421static int efx_ef10_mtd_probe(struct efx_nic *efx)
3422{
3423        MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
3424        struct efx_mcdi_mtd_partition *parts;
3425        size_t outlen, n_parts_total, i, n_parts;
3426        unsigned int type;
3427        int rc;
3428
3429        ASSERT_RTNL();
3430
3431        BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
3432        rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
3433                          outbuf, sizeof(outbuf), &outlen);
3434        if (rc)
3435                return rc;
3436        if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
3437                return -EIO;
3438
3439        n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
3440        if (n_parts_total >
3441            MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
3442                return -EIO;
3443
3444        parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
3445        if (!parts)
3446                return -ENOMEM;
3447
3448        n_parts = 0;
3449        for (i = 0; i < n_parts_total; i++) {
3450                type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
3451                                        i);
3452                rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
3453                if (rc == 0)
3454                        n_parts++;
3455                else if (rc != -ENODEV)
3456                        goto fail;
3457        }
3458
3459        rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
3460fail:
3461        if (rc)
3462                kfree(parts);
3463        return rc;
3464}
3465
3466#endif /* CONFIG_SFC_MTD */
3467
3468static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
3469{
3470        _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
3471}
3472
3473static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
3474                                           bool temp)
3475{
3476        MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
3477        int rc;
3478
3479        if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
3480            channel->sync_events_state == SYNC_EVENTS_VALID ||
3481            (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
3482                return 0;
3483        channel->sync_events_state = SYNC_EVENTS_REQUESTED;
3484
3485        MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
3486        MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
3487        MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
3488                       channel->channel);
3489
3490        rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
3491                          inbuf, sizeof(inbuf), NULL, 0, NULL);
3492
3493        if (rc != 0)
3494                channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
3495                                                    SYNC_EVENTS_DISABLED;
3496
3497        return rc;
3498}
3499
3500static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
3501                                            bool temp)
3502{
3503        MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
3504        int rc;
3505
3506        if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
3507            (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
3508                return 0;
3509        if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
3510                channel->sync_events_state = SYNC_EVENTS_DISABLED;
3511                return 0;
3512        }
3513        channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
3514                                            SYNC_EVENTS_DISABLED;
3515
3516        MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
3517        MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
3518        MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
3519                       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
3520        MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
3521                       channel->channel);
3522
3523        rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
3524                          inbuf, sizeof(inbuf), NULL, 0, NULL);
3525
3526        return rc;
3527}
3528
3529static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
3530                                           bool temp)
3531{
3532        int (*set)(struct efx_channel *channel, bool temp);
3533        struct efx_channel *channel;
3534
3535        set = en ?
3536              efx_ef10_rx_enable_timestamping :
3537              efx_ef10_rx_disable_timestamping;
3538
3539        efx_for_each_channel(channel, efx) {
3540                int rc = set(channel, temp);
3541                if (en && rc != 0) {
3542                        efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
3543                        return rc;
3544                }
3545        }
3546
3547        return 0;
3548}
3549
3550static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
3551                                      struct hwtstamp_config *init)
3552{
3553        int rc;
3554
3555        switch (init->rx_filter) {
3556        case HWTSTAMP_FILTER_NONE:
3557                efx_ef10_ptp_set_ts_sync_events(efx, false, false);
3558                /* if TX timestamping is still requested then leave PTP on */
3559                return efx_ptp_change_mode(efx,
3560                                           init->tx_type != HWTSTAMP_TX_OFF, 0);
3561        case HWTSTAMP_FILTER_ALL:
3562        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3563        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3564        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3565        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3566        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3567        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3568        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3569        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3570        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3571        case HWTSTAMP_FILTER_PTP_V2_EVENT:
3572        case HWTSTAMP_FILTER_PTP_V2_SYNC:
3573        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3574                init->rx_filter = HWTSTAMP_FILTER_ALL;
3575                rc = efx_ptp_change_mode(efx, true, 0);
3576                if (!rc)
3577                        rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
3578                if (rc)
3579                        efx_ptp_change_mode(efx, false, 0);
3580                return rc;
3581        default:
3582                return -ERANGE;
3583        }
3584}
3585
3586const struct efx_nic_type efx_hunt_a0_nic_type = {
3587        .mem_map_size = efx_ef10_mem_map_size,
3588        .probe = efx_ef10_probe,
3589        .remove = efx_ef10_remove,
3590        .dimension_resources = efx_ef10_dimension_resources,
3591        .init = efx_ef10_init_nic,
3592        .fini = efx_port_dummy_op_void,
3593        .map_reset_reason = efx_mcdi_map_reset_reason,
3594        .map_reset_flags = efx_ef10_map_reset_flags,
3595        .reset = efx_ef10_reset,
3596        .probe_port = efx_mcdi_port_probe,
3597        .remove_port = efx_mcdi_port_remove,
3598        .fini_dmaq = efx_ef10_fini_dmaq,
3599        .describe_stats = efx_ef10_describe_stats,
3600        .update_stats = efx_ef10_update_stats,
3601        .start_stats = efx_mcdi_mac_start_stats,
3602        .pull_stats = efx_mcdi_mac_pull_stats,
3603        .stop_stats = efx_mcdi_mac_stop_stats,
3604        .set_id_led = efx_mcdi_set_id_led,
3605        .push_irq_moderation = efx_ef10_push_irq_moderation,
3606        .reconfigure_mac = efx_ef10_mac_reconfigure,
3607        .check_mac_fault = efx_mcdi_mac_check_fault,
3608        .reconfigure_port = efx_mcdi_port_reconfigure,
3609        .get_wol = efx_ef10_get_wol,
3610        .set_wol = efx_ef10_set_wol,
3611        .resume_wol = efx_port_dummy_op_void,
3612        .test_chip = efx_ef10_test_chip,
3613        .test_nvram = efx_mcdi_nvram_test_all,
3614        .mcdi_request = efx_ef10_mcdi_request,
3615        .mcdi_poll_response = efx_ef10_mcdi_poll_response,
3616        .mcdi_read_response = efx_ef10_mcdi_read_response,
3617        .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
3618        .irq_enable_master = efx_port_dummy_op_void,
3619        .irq_test_generate = efx_ef10_irq_test_generate,
3620        .irq_disable_non_ev = efx_port_dummy_op_void,
3621        .irq_handle_msi = efx_ef10_msi_interrupt,
3622        .irq_handle_legacy = efx_ef10_legacy_interrupt,
3623        .tx_probe = efx_ef10_tx_probe,
3624        .tx_init = efx_ef10_tx_init,
3625        .tx_remove = efx_ef10_tx_remove,
3626        .tx_write = efx_ef10_tx_write,
3627        .rx_push_rss_config = efx_ef10_rx_push_rss_config,
3628        .rx_probe = efx_ef10_rx_probe,
3629        .rx_init = efx_ef10_rx_init,
3630        .rx_remove = efx_ef10_rx_remove,
3631        .rx_write = efx_ef10_rx_write,
3632        .rx_defer_refill = efx_ef10_rx_defer_refill,
3633        .ev_probe = efx_ef10_ev_probe,
3634        .ev_init = efx_ef10_ev_init,
3635        .ev_fini = efx_ef10_ev_fini,
3636        .ev_remove = efx_ef10_ev_remove,
3637        .ev_process = efx_ef10_ev_process,
3638        .ev_read_ack = efx_ef10_ev_read_ack,
3639        .ev_test_generate = efx_ef10_ev_test_generate,
3640        .filter_table_probe = efx_ef10_filter_table_probe,
3641        .filter_table_restore = efx_ef10_filter_table_restore,
3642        .filter_table_remove = efx_ef10_filter_table_remove,
3643        .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
3644        .filter_insert = efx_ef10_filter_insert,
3645        .filter_remove_safe = efx_ef10_filter_remove_safe,
3646        .filter_get_safe = efx_ef10_filter_get_safe,
3647        .filter_clear_rx = efx_ef10_filter_clear_rx,
3648        .filter_count_rx_used = efx_ef10_filter_count_rx_used,
3649        .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
3650        .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
3651#ifdef CONFIG_RFS_ACCEL
3652        .filter_rfs_insert = efx_ef10_filter_rfs_insert,
3653        .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
3654#endif
3655#ifdef CONFIG_SFC_MTD
3656        .mtd_probe = efx_ef10_mtd_probe,
3657        .mtd_rename = efx_mcdi_mtd_rename,
3658        .mtd_read = efx_mcdi_mtd_read,
3659        .mtd_erase = efx_mcdi_mtd_erase,
3660        .mtd_write = efx_mcdi_mtd_write,
3661        .mtd_sync = efx_mcdi_mtd_sync,
3662#endif
3663        .ptp_write_host_time = efx_ef10_ptp_write_host_time,
3664        .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
3665        .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
3666
3667        .revision = EFX_REV_HUNT_A0,
3668        .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
3669        .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
3670        .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
3671        .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
3672        .can_rx_scatter = true,
3673        .always_rx_scatter = true,
3674        .max_interrupt_mode = EFX_INT_MODE_MSIX,
3675        .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
3676        .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3677                             NETIF_F_RXHASH | NETIF_F_NTUPLE),
3678        .mcdi_max_ver = 2,
3679        .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
3680        .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
3681                            1 << HWTSTAMP_FILTER_ALL,
3682};
3683