linux/drivers/net/ethernet/sfc/ef10.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2012-2013 Solarflare Communications Inc.
   5 */
   6
   7#include "net_driver.h"
   8#include "rx_common.h"
   9#include "tx_common.h"
  10#include "ef10_regs.h"
  11#include "io.h"
  12#include "mcdi.h"
  13#include "mcdi_pcol.h"
  14#include "mcdi_port.h"
  15#include "mcdi_port_common.h"
  16#include "mcdi_functions.h"
  17#include "nic.h"
  18#include "mcdi_filters.h"
  19#include "workarounds.h"
  20#include "selftest.h"
  21#include "ef10_sriov.h"
  22#include <linux/in.h>
  23#include <linux/jhash.h>
  24#include <linux/wait.h>
  25#include <linux/workqueue.h>
  26#include <net/udp_tunnel.h>
  27
  28/* Hardware control for EF10 architecture including 'Huntington'. */
  29
  30#define EFX_EF10_DRVGEN_EV              7
  31enum {
  32        EFX_EF10_TEST = 1,
  33        EFX_EF10_REFILL,
  34};
  35
  36/* VLAN list entry */
  37struct efx_ef10_vlan {
  38        struct list_head list;
  39        u16 vid;
  40};
  41
  42static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
  43static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels;
  44
  45static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
  46{
  47        efx_dword_t reg;
  48
  49        efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
  50        return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
  51                EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
  52}
  53
  54/* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for
  55 * I/O space and BAR 2(&3) for memory.  On SFC9250 (Medford2), there is no I/O
  56 * bar; PFs use BAR 0/1 for memory.
  57 */
  58static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx)
  59{
  60        switch (efx->pci_dev->device) {
  61        case 0x0b03: /* SFC9250 PF */
  62                return 0;
  63        default:
  64                return 2;
  65        }
  66}
  67
  68/* All VFs use BAR 0/1 for memory */
  69static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx)
  70{
  71        return 0;
  72}
  73
  74static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
  75{
  76        int bar;
  77
  78        bar = efx->type->mem_bar(efx);
  79        return resource_size(&efx->pci_dev->resource[bar]);
  80}
  81
  82static bool efx_ef10_is_vf(struct efx_nic *efx)
  83{
  84        return efx->type->is_vf;
  85}
  86
  87#ifdef CONFIG_SFC_SRIOV
  88static int efx_ef10_get_vf_index(struct efx_nic *efx)
  89{
  90        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
  91        struct efx_ef10_nic_data *nic_data = efx->nic_data;
  92        size_t outlen;
  93        int rc;
  94
  95        rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
  96                          sizeof(outbuf), &outlen);
  97        if (rc)
  98                return rc;
  99        if (outlen < sizeof(outbuf))
 100                return -EIO;
 101
 102        nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
 103        return 0;
 104}
 105#endif
 106
 107static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
 108{
 109        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN);
 110        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 111        size_t outlen;
 112        int rc;
 113
 114        BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
 115
 116        rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
 117                          outbuf, sizeof(outbuf), &outlen);
 118        if (rc)
 119                return rc;
 120        if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
 121                netif_err(efx, drv, efx->net_dev,
 122                          "unable to read datapath firmware capabilities\n");
 123                return -EIO;
 124        }
 125
 126        nic_data->datapath_caps =
 127                MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
 128
 129        if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
 130                nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
 131                                GET_CAPABILITIES_V2_OUT_FLAGS2);
 132                nic_data->piobuf_size = MCDI_WORD(outbuf,
 133                                GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
 134        } else {
 135                nic_data->datapath_caps2 = 0;
 136                nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
 137        }
 138
 139        /* record the DPCPU firmware IDs to determine VEB vswitching support.
 140         */
 141        nic_data->rx_dpcpu_fw_id =
 142                MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
 143        nic_data->tx_dpcpu_fw_id =
 144                MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
 145
 146        if (!(nic_data->datapath_caps &
 147              (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
 148                netif_err(efx, probe, efx->net_dev,
 149                          "current firmware does not support an RX prefix\n");
 150                return -ENODEV;
 151        }
 152
 153        if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
 154                u8 vi_window_mode = MCDI_BYTE(outbuf,
 155                                GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
 156
 157                rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode);
 158                if (rc)
 159                        return rc;
 160        } else {
 161                /* keep default VI stride */
 162                netif_dbg(efx, probe, efx->net_dev,
 163                          "firmware did not report VI window mode, assuming vi_stride = %u\n",
 164                          efx->vi_stride);
 165        }
 166
 167        if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
 168                efx->num_mac_stats = MCDI_WORD(outbuf,
 169                                GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
 170                netif_dbg(efx, probe, efx->net_dev,
 171                          "firmware reports num_mac_stats = %u\n",
 172                          efx->num_mac_stats);
 173        } else {
 174                /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */
 175                netif_dbg(efx, probe, efx->net_dev,
 176                          "firmware did not report num_mac_stats, assuming %u\n",
 177                          efx->num_mac_stats);
 178        }
 179
 180        return 0;
 181}
 182
 183static void efx_ef10_read_licensed_features(struct efx_nic *efx)
 184{
 185        MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN);
 186        MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN);
 187        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 188        size_t outlen;
 189        int rc;
 190
 191        MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP,
 192                       MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
 193        rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf),
 194                                outbuf, sizeof(outbuf), &outlen);
 195        if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN))
 196                return;
 197
 198        nic_data->licensed_features = MCDI_QWORD(outbuf,
 199                                         LICENSING_V3_OUT_LICENSED_FEATURES);
 200}
 201
 202static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
 203{
 204        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
 205        int rc;
 206
 207        rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
 208                          outbuf, sizeof(outbuf), NULL);
 209        if (rc)
 210                return rc;
 211        rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
 212        return rc > 0 ? rc : -ERANGE;
 213}
 214
 215static int efx_ef10_get_timer_workarounds(struct efx_nic *efx)
 216{
 217        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 218        unsigned int implemented;
 219        unsigned int enabled;
 220        int rc;
 221
 222        nic_data->workaround_35388 = false;
 223        nic_data->workaround_61265 = false;
 224
 225        rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
 226
 227        if (rc == -ENOSYS) {
 228                /* Firmware without GET_WORKAROUNDS - not a problem. */
 229                rc = 0;
 230        } else if (rc == 0) {
 231                /* Bug61265 workaround is always enabled if implemented. */
 232                if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265)
 233                        nic_data->workaround_61265 = true;
 234
 235                if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
 236                        nic_data->workaround_35388 = true;
 237                } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) {
 238                        /* Workaround is implemented but not enabled.
 239                         * Try to enable it.
 240                         */
 241                        rc = efx_mcdi_set_workaround(efx,
 242                                                     MC_CMD_WORKAROUND_BUG35388,
 243                                                     true, NULL);
 244                        if (rc == 0)
 245                                nic_data->workaround_35388 = true;
 246                        /* If we failed to set the workaround just carry on. */
 247                        rc = 0;
 248                }
 249        }
 250
 251        netif_dbg(efx, probe, efx->net_dev,
 252                  "workaround for bug 35388 is %sabled\n",
 253                  nic_data->workaround_35388 ? "en" : "dis");
 254        netif_dbg(efx, probe, efx->net_dev,
 255                  "workaround for bug 61265 is %sabled\n",
 256                  nic_data->workaround_61265 ? "en" : "dis");
 257
 258        return rc;
 259}
 260
 261static void efx_ef10_process_timer_config(struct efx_nic *efx,
 262                                          const efx_dword_t *data)
 263{
 264        unsigned int max_count;
 265
 266        if (EFX_EF10_WORKAROUND_61265(efx)) {
 267                efx->timer_quantum_ns = MCDI_DWORD(data,
 268                        GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS);
 269                efx->timer_max_ns = MCDI_DWORD(data,
 270                        GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS);
 271        } else if (EFX_EF10_WORKAROUND_35388(efx)) {
 272                efx->timer_quantum_ns = MCDI_DWORD(data,
 273                        GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT);
 274                max_count = MCDI_DWORD(data,
 275                        GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT);
 276                efx->timer_max_ns = max_count * efx->timer_quantum_ns;
 277        } else {
 278                efx->timer_quantum_ns = MCDI_DWORD(data,
 279                        GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT);
 280                max_count = MCDI_DWORD(data,
 281                        GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT);
 282                efx->timer_max_ns = max_count * efx->timer_quantum_ns;
 283        }
 284
 285        netif_dbg(efx, probe, efx->net_dev,
 286                  "got timer properties from MC: quantum %u ns; max %u ns\n",
 287                  efx->timer_quantum_ns, efx->timer_max_ns);
 288}
 289
 290static int efx_ef10_get_timer_config(struct efx_nic *efx)
 291{
 292        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN);
 293        int rc;
 294
 295        rc = efx_ef10_get_timer_workarounds(efx);
 296        if (rc)
 297                return rc;
 298
 299        rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0,
 300                                outbuf, sizeof(outbuf), NULL);
 301
 302        if (rc == 0) {
 303                efx_ef10_process_timer_config(efx, outbuf);
 304        } else if (rc == -ENOSYS || rc == -EPERM) {
 305                /* Not available - fall back to Huntington defaults. */
 306                unsigned int quantum;
 307
 308                rc = efx_ef10_get_sysclk_freq(efx);
 309                if (rc < 0)
 310                        return rc;
 311
 312                quantum = 1536000 / rc; /* 1536 cycles */
 313                efx->timer_quantum_ns = quantum;
 314                efx->timer_max_ns = efx->type->timer_period_max * quantum;
 315                rc = 0;
 316        } else {
 317                efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES,
 318                                       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN,
 319                                       NULL, 0, rc);
 320        }
 321
 322        return rc;
 323}
 324
 325static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
 326{
 327        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
 328        size_t outlen;
 329        int rc;
 330
 331        BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
 332
 333        rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
 334                          outbuf, sizeof(outbuf), &outlen);
 335        if (rc)
 336                return rc;
 337        if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
 338                return -EIO;
 339
 340        ether_addr_copy(mac_address,
 341                        MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
 342        return 0;
 343}
 344
 345static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
 346{
 347        MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
 348        MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
 349        size_t outlen;
 350        int num_addrs, rc;
 351
 352        MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
 353                       EVB_PORT_ID_ASSIGNED);
 354        rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
 355                          sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
 356
 357        if (rc)
 358                return rc;
 359        if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
 360                return -EIO;
 361
 362        num_addrs = MCDI_DWORD(outbuf,
 363                               VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
 364
 365        WARN_ON(num_addrs != 1);
 366
 367        ether_addr_copy(mac_address,
 368                        MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
 369
 370        return 0;
 371}
 372
 373static ssize_t link_control_flag_show(struct device *dev,
 374                                      struct device_attribute *attr,
 375                                      char *buf)
 376{
 377        struct efx_nic *efx = dev_get_drvdata(dev);
 378
 379        return sprintf(buf, "%d\n",
 380                       ((efx->mcdi->fn_flags) &
 381                        (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
 382                       ? 1 : 0);
 383}
 384
 385static ssize_t primary_flag_show(struct device *dev,
 386                                 struct device_attribute *attr,
 387                                 char *buf)
 388{
 389        struct efx_nic *efx = dev_get_drvdata(dev);
 390
 391        return sprintf(buf, "%d\n",
 392                       ((efx->mcdi->fn_flags) &
 393                        (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
 394                       ? 1 : 0);
 395}
 396
 397static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
 398{
 399        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 400        struct efx_ef10_vlan *vlan;
 401
 402        WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
 403
 404        list_for_each_entry(vlan, &nic_data->vlan_list, list) {
 405                if (vlan->vid == vid)
 406                        return vlan;
 407        }
 408
 409        return NULL;
 410}
 411
 412static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
 413{
 414        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 415        struct efx_ef10_vlan *vlan;
 416        int rc;
 417
 418        mutex_lock(&nic_data->vlan_lock);
 419
 420        vlan = efx_ef10_find_vlan(efx, vid);
 421        if (vlan) {
 422                /* We add VID 0 on init. 8021q adds it on module init
 423                 * for all interfaces with VLAN filtring feature.
 424                 */
 425                if (vid == 0)
 426                        goto done_unlock;
 427                netif_warn(efx, drv, efx->net_dev,
 428                           "VLAN %u already added\n", vid);
 429                rc = -EALREADY;
 430                goto fail_exist;
 431        }
 432
 433        rc = -ENOMEM;
 434        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
 435        if (!vlan)
 436                goto fail_alloc;
 437
 438        vlan->vid = vid;
 439
 440        list_add_tail(&vlan->list, &nic_data->vlan_list);
 441
 442        if (efx->filter_state) {
 443                mutex_lock(&efx->mac_lock);
 444                down_write(&efx->filter_sem);
 445                rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
 446                up_write(&efx->filter_sem);
 447                mutex_unlock(&efx->mac_lock);
 448                if (rc)
 449                        goto fail_filter_add_vlan;
 450        }
 451
 452done_unlock:
 453        mutex_unlock(&nic_data->vlan_lock);
 454        return 0;
 455
 456fail_filter_add_vlan:
 457        list_del(&vlan->list);
 458        kfree(vlan);
 459fail_alloc:
 460fail_exist:
 461        mutex_unlock(&nic_data->vlan_lock);
 462        return rc;
 463}
 464
 465static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
 466                                       struct efx_ef10_vlan *vlan)
 467{
 468        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 469
 470        WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
 471
 472        if (efx->filter_state) {
 473                down_write(&efx->filter_sem);
 474                efx_mcdi_filter_del_vlan(efx, vlan->vid);
 475                up_write(&efx->filter_sem);
 476        }
 477
 478        list_del(&vlan->list);
 479        kfree(vlan);
 480}
 481
 482static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
 483{
 484        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 485        struct efx_ef10_vlan *vlan;
 486        int rc = 0;
 487
 488        /* 8021q removes VID 0 on module unload for all interfaces
 489         * with VLAN filtering feature. We need to keep it to receive
 490         * untagged traffic.
 491         */
 492        if (vid == 0)
 493                return 0;
 494
 495        mutex_lock(&nic_data->vlan_lock);
 496
 497        vlan = efx_ef10_find_vlan(efx, vid);
 498        if (!vlan) {
 499                netif_err(efx, drv, efx->net_dev,
 500                          "VLAN %u to be deleted not found\n", vid);
 501                rc = -ENOENT;
 502        } else {
 503                efx_ef10_del_vlan_internal(efx, vlan);
 504        }
 505
 506        mutex_unlock(&nic_data->vlan_lock);
 507
 508        return rc;
 509}
 510
 511static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
 512{
 513        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 514        struct efx_ef10_vlan *vlan, *next_vlan;
 515
 516        mutex_lock(&nic_data->vlan_lock);
 517        list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
 518                efx_ef10_del_vlan_internal(efx, vlan);
 519        mutex_unlock(&nic_data->vlan_lock);
 520}
 521
 522static DEVICE_ATTR_RO(link_control_flag);
 523static DEVICE_ATTR_RO(primary_flag);
 524
 525static int efx_ef10_probe(struct efx_nic *efx)
 526{
 527        struct efx_ef10_nic_data *nic_data;
 528        int i, rc;
 529
 530        nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
 531        if (!nic_data)
 532                return -ENOMEM;
 533        efx->nic_data = nic_data;
 534
 535        /* we assume later that we can copy from this buffer in dwords */
 536        BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
 537
 538        rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
 539                                  8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
 540        if (rc)
 541                goto fail1;
 542
 543        /* Get the MC's warm boot count.  In case it's rebooting right
 544         * now, be prepared to retry.
 545         */
 546        i = 0;
 547        for (;;) {
 548                rc = efx_ef10_get_warm_boot_count(efx);
 549                if (rc >= 0)
 550                        break;
 551                if (++i == 5)
 552                        goto fail2;
 553                ssleep(1);
 554        }
 555        nic_data->warm_boot_count = rc;
 556
 557        /* In case we're recovering from a crash (kexec), we want to
 558         * cancel any outstanding request by the previous user of this
 559         * function.  We send a special message using the least
 560         * significant bits of the 'high' (doorbell) register.
 561         */
 562        _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
 563
 564        rc = efx_mcdi_init(efx);
 565        if (rc)
 566                goto fail2;
 567
 568        mutex_init(&nic_data->udp_tunnels_lock);
 569        for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
 570                nic_data->udp_tunnels[i].type =
 571                        TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID;
 572
 573        /* Reset (most) configuration for this function */
 574        rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
 575        if (rc)
 576                goto fail3;
 577
 578        /* Enable event logging */
 579        rc = efx_mcdi_log_ctrl(efx, true, false, 0);
 580        if (rc)
 581                goto fail3;
 582
 583        rc = device_create_file(&efx->pci_dev->dev,
 584                                &dev_attr_link_control_flag);
 585        if (rc)
 586                goto fail3;
 587
 588        rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
 589        if (rc)
 590                goto fail4;
 591
 592        rc = efx_get_pf_index(efx, &nic_data->pf_index);
 593        if (rc)
 594                goto fail5;
 595
 596        rc = efx_ef10_init_datapath_caps(efx);
 597        if (rc < 0)
 598                goto fail5;
 599
 600        efx_ef10_read_licensed_features(efx);
 601
 602        /* We can have one VI for each vi_stride-byte region.
 603         * However, until we use TX option descriptors we need up to four
 604         * TX queues per channel for different checksumming combinations.
 605         */
 606        if (nic_data->datapath_caps &
 607            (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
 608                efx->tx_queues_per_channel = 4;
 609        else
 610                efx->tx_queues_per_channel = 2;
 611        efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride;
 612        if (!efx->max_vis) {
 613                netif_err(efx, drv, efx->net_dev, "error determining max VIs\n");
 614                rc = -EIO;
 615                goto fail5;
 616        }
 617        efx->max_channels = min_t(unsigned int, EFX_MAX_CHANNELS,
 618                                  efx->max_vis / efx->tx_queues_per_channel);
 619        efx->max_tx_channels = efx->max_channels;
 620        if (WARN_ON(efx->max_channels == 0)) {
 621                rc = -EIO;
 622                goto fail5;
 623        }
 624
 625        efx->rx_packet_len_offset =
 626                ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
 627
 628        if (nic_data->datapath_caps &
 629            (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN))
 630                efx->net_dev->hw_features |= NETIF_F_RXFCS;
 631
 632        rc = efx_mcdi_port_get_number(efx);
 633        if (rc < 0)
 634                goto fail5;
 635        efx->port_num = rc;
 636
 637        rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
 638        if (rc)
 639                goto fail5;
 640
 641        rc = efx_ef10_get_timer_config(efx);
 642        if (rc < 0)
 643                goto fail5;
 644
 645        rc = efx_mcdi_mon_probe(efx);
 646        if (rc && rc != -EPERM)
 647                goto fail5;
 648
 649        efx_ptp_defer_probe_with_channel(efx);
 650
 651#ifdef CONFIG_SFC_SRIOV
 652        if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
 653                struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
 654                struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
 655
 656                efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
 657        } else
 658#endif
 659                ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
 660
 661        INIT_LIST_HEAD(&nic_data->vlan_list);
 662        mutex_init(&nic_data->vlan_lock);
 663
 664        /* Add unspecified VID to support VLAN filtering being disabled */
 665        rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
 666        if (rc)
 667                goto fail_add_vid_unspec;
 668
 669        /* If VLAN filtering is enabled, we need VID 0 to get untagged
 670         * traffic.  It is added automatically if 8021q module is loaded,
 671         * but we can't rely on it since module may be not loaded.
 672         */
 673        rc = efx_ef10_add_vlan(efx, 0);
 674        if (rc)
 675                goto fail_add_vid_0;
 676
 677        if (nic_data->datapath_caps &
 678            (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) &&
 679            efx->mcdi->fn_flags &
 680            (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED))
 681                efx->net_dev->udp_tunnel_nic_info = &efx_ef10_udp_tunnels;
 682
 683        return 0;
 684
 685fail_add_vid_0:
 686        efx_ef10_cleanup_vlans(efx);
 687fail_add_vid_unspec:
 688        mutex_destroy(&nic_data->vlan_lock);
 689        efx_ptp_remove(efx);
 690        efx_mcdi_mon_remove(efx);
 691fail5:
 692        device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
 693fail4:
 694        device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
 695fail3:
 696        efx_mcdi_detach(efx);
 697
 698        mutex_lock(&nic_data->udp_tunnels_lock);
 699        memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
 700        (void)efx_ef10_set_udp_tnl_ports(efx, true);
 701        mutex_unlock(&nic_data->udp_tunnels_lock);
 702        mutex_destroy(&nic_data->udp_tunnels_lock);
 703
 704        efx_mcdi_fini(efx);
 705fail2:
 706        efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
 707fail1:
 708        kfree(nic_data);
 709        efx->nic_data = NULL;
 710        return rc;
 711}
 712
 713#ifdef EFX_USE_PIO
 714
 715static void efx_ef10_free_piobufs(struct efx_nic *efx)
 716{
 717        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 718        MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
 719        unsigned int i;
 720        int rc;
 721
 722        BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
 723
 724        for (i = 0; i < nic_data->n_piobufs; i++) {
 725                MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
 726                               nic_data->piobuf_handle[i]);
 727                rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
 728                                  NULL, 0, NULL);
 729                WARN_ON(rc);
 730        }
 731
 732        nic_data->n_piobufs = 0;
 733}
 734
 735static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
 736{
 737        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 738        MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
 739        unsigned int i;
 740        size_t outlen;
 741        int rc = 0;
 742
 743        BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
 744
 745        for (i = 0; i < n; i++) {
 746                rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
 747                                        outbuf, sizeof(outbuf), &outlen);
 748                if (rc) {
 749                        /* Don't display the MC error if we didn't have space
 750                         * for a VF.
 751                         */
 752                        if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC))
 753                                efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF,
 754                                                       0, outbuf, outlen, rc);
 755                        break;
 756                }
 757                if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
 758                        rc = -EIO;
 759                        break;
 760                }
 761                nic_data->piobuf_handle[i] =
 762                        MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
 763                netif_dbg(efx, probe, efx->net_dev,
 764                          "allocated PIO buffer %u handle %x\n", i,
 765                          nic_data->piobuf_handle[i]);
 766        }
 767
 768        nic_data->n_piobufs = i;
 769        if (rc)
 770                efx_ef10_free_piobufs(efx);
 771        return rc;
 772}
 773
 774static int efx_ef10_link_piobufs(struct efx_nic *efx)
 775{
 776        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 777        MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN);
 778        struct efx_channel *channel;
 779        struct efx_tx_queue *tx_queue;
 780        unsigned int offset, index;
 781        int rc;
 782
 783        BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
 784        BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
 785
 786        /* Link a buffer to each VI in the write-combining mapping */
 787        for (index = 0; index < nic_data->n_piobufs; ++index) {
 788                MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
 789                               nic_data->piobuf_handle[index]);
 790                MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
 791                               nic_data->pio_write_vi_base + index);
 792                rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
 793                                  inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
 794                                  NULL, 0, NULL);
 795                if (rc) {
 796                        netif_err(efx, drv, efx->net_dev,
 797                                  "failed to link VI %u to PIO buffer %u (%d)\n",
 798                                  nic_data->pio_write_vi_base + index, index,
 799                                  rc);
 800                        goto fail;
 801                }
 802                netif_dbg(efx, probe, efx->net_dev,
 803                          "linked VI %u to PIO buffer %u\n",
 804                          nic_data->pio_write_vi_base + index, index);
 805        }
 806
 807        /* Link a buffer to each TX queue */
 808        efx_for_each_channel(channel, efx) {
 809                /* Extra channels, even those with TXQs (PTP), do not require
 810                 * PIO resources.
 811                 */
 812                if (!channel->type->want_pio ||
 813                    channel->channel >= efx->xdp_channel_offset)
 814                        continue;
 815
 816                efx_for_each_channel_tx_queue(tx_queue, channel) {
 817                        /* We assign the PIO buffers to queues in
 818                         * reverse order to allow for the following
 819                         * special case.
 820                         */
 821                        offset = ((efx->tx_channel_offset + efx->n_tx_channels -
 822                                   tx_queue->channel->channel - 1) *
 823                                  efx_piobuf_size);
 824                        index = offset / nic_data->piobuf_size;
 825                        offset = offset % nic_data->piobuf_size;
 826
 827                        /* When the host page size is 4K, the first
 828                         * host page in the WC mapping may be within
 829                         * the same VI page as the last TX queue.  We
 830                         * can only link one buffer to each VI.
 831                         */
 832                        if (tx_queue->queue == nic_data->pio_write_vi_base) {
 833                                BUG_ON(index != 0);
 834                                rc = 0;
 835                        } else {
 836                                MCDI_SET_DWORD(inbuf,
 837                                               LINK_PIOBUF_IN_PIOBUF_HANDLE,
 838                                               nic_data->piobuf_handle[index]);
 839                                MCDI_SET_DWORD(inbuf,
 840                                               LINK_PIOBUF_IN_TXQ_INSTANCE,
 841                                               tx_queue->queue);
 842                                rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
 843                                                  inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
 844                                                  NULL, 0, NULL);
 845                        }
 846
 847                        if (rc) {
 848                                /* This is non-fatal; the TX path just
 849                                 * won't use PIO for this queue
 850                                 */
 851                                netif_err(efx, drv, efx->net_dev,
 852                                          "failed to link VI %u to PIO buffer %u (%d)\n",
 853                                          tx_queue->queue, index, rc);
 854                                tx_queue->piobuf = NULL;
 855                        } else {
 856                                tx_queue->piobuf =
 857                                        nic_data->pio_write_base +
 858                                        index * efx->vi_stride + offset;
 859                                tx_queue->piobuf_offset = offset;
 860                                netif_dbg(efx, probe, efx->net_dev,
 861                                          "linked VI %u to PIO buffer %u offset %x addr %p\n",
 862                                          tx_queue->queue, index,
 863                                          tx_queue->piobuf_offset,
 864                                          tx_queue->piobuf);
 865                        }
 866                }
 867        }
 868
 869        return 0;
 870
 871fail:
 872        /* inbuf was defined for MC_CMD_LINK_PIOBUF.  We can use the same
 873         * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter.
 874         */
 875        BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN);
 876        while (index--) {
 877                MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
 878                               nic_data->pio_write_vi_base + index);
 879                efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
 880                             inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
 881                             NULL, 0, NULL);
 882        }
 883        return rc;
 884}
 885
 886static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
 887{
 888        struct efx_channel *channel;
 889        struct efx_tx_queue *tx_queue;
 890
 891        /* All our existing PIO buffers went away */
 892        efx_for_each_channel(channel, efx)
 893                efx_for_each_channel_tx_queue(tx_queue, channel)
 894                        tx_queue->piobuf = NULL;
 895}
 896
 897#else /* !EFX_USE_PIO */
 898
 899static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
 900{
 901        return n == 0 ? 0 : -ENOBUFS;
 902}
 903
 904static int efx_ef10_link_piobufs(struct efx_nic *efx)
 905{
 906        return 0;
 907}
 908
 909static void efx_ef10_free_piobufs(struct efx_nic *efx)
 910{
 911}
 912
 913static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
 914{
 915}
 916
 917#endif /* EFX_USE_PIO */
 918
 919static void efx_ef10_remove(struct efx_nic *efx)
 920{
 921        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 922        int rc;
 923
 924#ifdef CONFIG_SFC_SRIOV
 925        struct efx_ef10_nic_data *nic_data_pf;
 926        struct pci_dev *pci_dev_pf;
 927        struct efx_nic *efx_pf;
 928        struct ef10_vf *vf;
 929
 930        if (efx->pci_dev->is_virtfn) {
 931                pci_dev_pf = efx->pci_dev->physfn;
 932                if (pci_dev_pf) {
 933                        efx_pf = pci_get_drvdata(pci_dev_pf);
 934                        nic_data_pf = efx_pf->nic_data;
 935                        vf = nic_data_pf->vf + nic_data->vf_index;
 936                        vf->efx = NULL;
 937                } else
 938                        netif_info(efx, drv, efx->net_dev,
 939                                   "Could not get the PF id from VF\n");
 940        }
 941#endif
 942
 943        efx_ef10_cleanup_vlans(efx);
 944        mutex_destroy(&nic_data->vlan_lock);
 945
 946        efx_ptp_remove(efx);
 947
 948        efx_mcdi_mon_remove(efx);
 949
 950        efx_mcdi_rx_free_indir_table(efx);
 951
 952        if (nic_data->wc_membase)
 953                iounmap(nic_data->wc_membase);
 954
 955        rc = efx_mcdi_free_vis(efx);
 956        WARN_ON(rc != 0);
 957
 958        if (!nic_data->must_restore_piobufs)
 959                efx_ef10_free_piobufs(efx);
 960
 961        device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
 962        device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
 963
 964        efx_mcdi_detach(efx);
 965
 966        memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
 967        mutex_lock(&nic_data->udp_tunnels_lock);
 968        (void)efx_ef10_set_udp_tnl_ports(efx, true);
 969        mutex_unlock(&nic_data->udp_tunnels_lock);
 970
 971        mutex_destroy(&nic_data->udp_tunnels_lock);
 972
 973        efx_mcdi_fini(efx);
 974        efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
 975        kfree(nic_data);
 976}
 977
 978static int efx_ef10_probe_pf(struct efx_nic *efx)
 979{
 980        return efx_ef10_probe(efx);
 981}
 982
 983int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
 984                            u32 *port_flags, u32 *vadaptor_flags,
 985                            unsigned int *vlan_tags)
 986{
 987        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 988        MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
 989        MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
 990        size_t outlen;
 991        int rc;
 992
 993        if (nic_data->datapath_caps &
 994            (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
 995                MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
 996                               port_id);
 997
 998                rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
 999                                  outbuf, sizeof(outbuf), &outlen);
1000                if (rc)
1001                        return rc;
1002
1003                if (outlen < sizeof(outbuf)) {
1004                        rc = -EIO;
1005                        return rc;
1006                }
1007        }
1008
1009        if (port_flags)
1010                *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
1011        if (vadaptor_flags)
1012                *vadaptor_flags =
1013                        MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
1014        if (vlan_tags)
1015                *vlan_tags =
1016                        MCDI_DWORD(outbuf,
1017                                   VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
1018
1019        return 0;
1020}
1021
1022int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
1023{
1024        MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
1025
1026        MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
1027        return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
1028                            NULL, 0, NULL);
1029}
1030
1031int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
1032{
1033        MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
1034
1035        MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
1036        return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
1037                            NULL, 0, NULL);
1038}
1039
1040int efx_ef10_vport_add_mac(struct efx_nic *efx,
1041                           unsigned int port_id, u8 *mac)
1042{
1043        MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
1044
1045        MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
1046        ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
1047
1048        return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
1049                            sizeof(inbuf), NULL, 0, NULL);
1050}
1051
1052int efx_ef10_vport_del_mac(struct efx_nic *efx,
1053                           unsigned int port_id, u8 *mac)
1054{
1055        MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
1056
1057        MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
1058        ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
1059
1060        return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
1061                            sizeof(inbuf), NULL, 0, NULL);
1062}
1063
1064#ifdef CONFIG_SFC_SRIOV
1065static int efx_ef10_probe_vf(struct efx_nic *efx)
1066{
1067        int rc;
1068        struct pci_dev *pci_dev_pf;
1069
1070        /* If the parent PF has no VF data structure, it doesn't know about this
1071         * VF so fail probe.  The VF needs to be re-created.  This can happen
1072         * if the PF driver was unloaded while any VF was assigned to a guest
1073         * (using Xen, only).
1074         */
1075        pci_dev_pf = efx->pci_dev->physfn;
1076        if (pci_dev_pf) {
1077                struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
1078                struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
1079
1080                if (!nic_data_pf->vf) {
1081                        netif_info(efx, drv, efx->net_dev,
1082                                   "The VF cannot link to its parent PF; "
1083                                   "please destroy and re-create the VF\n");
1084                        return -EBUSY;
1085                }
1086        }
1087
1088        rc = efx_ef10_probe(efx);
1089        if (rc)
1090                return rc;
1091
1092        rc = efx_ef10_get_vf_index(efx);
1093        if (rc)
1094                goto fail;
1095
1096        if (efx->pci_dev->is_virtfn) {
1097                if (efx->pci_dev->physfn) {
1098                        struct efx_nic *efx_pf =
1099                                pci_get_drvdata(efx->pci_dev->physfn);
1100                        struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
1101                        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1102
1103                        nic_data_p->vf[nic_data->vf_index].efx = efx;
1104                        nic_data_p->vf[nic_data->vf_index].pci_dev =
1105                                efx->pci_dev;
1106                } else
1107                        netif_info(efx, drv, efx->net_dev,
1108                                   "Could not get the PF id from VF\n");
1109        }
1110
1111        return 0;
1112
1113fail:
1114        efx_ef10_remove(efx);
1115        return rc;
1116}
1117#else
1118static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
1119{
1120        return 0;
1121}
1122#endif
1123
1124static int efx_ef10_alloc_vis(struct efx_nic *efx,
1125                              unsigned int min_vis, unsigned int max_vis)
1126{
1127        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1128
1129        return efx_mcdi_alloc_vis(efx, min_vis, max_vis, &nic_data->vi_base,
1130                                  &nic_data->n_allocated_vis);
1131}
1132
1133/* Note that the failure path of this function does not free
1134 * resources, as this will be done by efx_ef10_remove().
1135 */
1136static int efx_ef10_dimension_resources(struct efx_nic *efx)
1137{
1138        unsigned int min_vis = max_t(unsigned int, efx->tx_queues_per_channel,
1139                                     efx_separate_tx_channels ? 2 : 1);
1140        unsigned int channel_vis, pio_write_vi_base, max_vis;
1141        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1142        unsigned int uc_mem_map_size, wc_mem_map_size;
1143        void __iomem *membase;
1144        int rc;
1145
1146        channel_vis = max(efx->n_channels,
1147                          ((efx->n_tx_channels + efx->n_extra_tx_channels) *
1148                           efx->tx_queues_per_channel) +
1149                           efx->n_xdp_channels * efx->xdp_tx_per_channel);
1150        if (efx->max_vis && efx->max_vis < channel_vis) {
1151                netif_dbg(efx, drv, efx->net_dev,
1152                          "Reducing channel VIs from %u to %u\n",
1153                          channel_vis, efx->max_vis);
1154                channel_vis = efx->max_vis;
1155        }
1156
1157#ifdef EFX_USE_PIO
1158        /* Try to allocate PIO buffers if wanted and if the full
1159         * number of PIO buffers would be sufficient to allocate one
1160         * copy-buffer per TX channel.  Failure is non-fatal, as there
1161         * are only a small number of PIO buffers shared between all
1162         * functions of the controller.
1163         */
1164        if (efx_piobuf_size != 0 &&
1165            nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
1166            efx->n_tx_channels) {
1167                unsigned int n_piobufs =
1168                        DIV_ROUND_UP(efx->n_tx_channels,
1169                                     nic_data->piobuf_size / efx_piobuf_size);
1170
1171                rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
1172                if (rc == -ENOSPC)
1173                        netif_dbg(efx, probe, efx->net_dev,
1174                                  "out of PIO buffers; cannot allocate more\n");
1175                else if (rc == -EPERM)
1176                        netif_dbg(efx, probe, efx->net_dev,
1177                                  "not permitted to allocate PIO buffers\n");
1178                else if (rc)
1179                        netif_err(efx, probe, efx->net_dev,
1180                                  "failed to allocate PIO buffers (%d)\n", rc);
1181                else
1182                        netif_dbg(efx, probe, efx->net_dev,
1183                                  "allocated %u PIO buffers\n", n_piobufs);
1184        }
1185#else
1186        nic_data->n_piobufs = 0;
1187#endif
1188
1189        /* PIO buffers should be mapped with write-combining enabled,
1190         * and we want to make single UC and WC mappings rather than
1191         * several of each (in fact that's the only option if host
1192         * page size is >4K).  So we may allocate some extra VIs just
1193         * for writing PIO buffers through.
1194         *
1195         * The UC mapping contains (channel_vis - 1) complete VIs and the
1196         * first 4K of the next VI.  Then the WC mapping begins with
1197         * the remainder of this last VI.
1198         */
1199        uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride +
1200                                     ER_DZ_TX_PIOBUF);
1201        if (nic_data->n_piobufs) {
1202                /* pio_write_vi_base rounds down to give the number of complete
1203                 * VIs inside the UC mapping.
1204                 */
1205                pio_write_vi_base = uc_mem_map_size / efx->vi_stride;
1206                wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
1207                                               nic_data->n_piobufs) *
1208                                              efx->vi_stride) -
1209                                   uc_mem_map_size);
1210                max_vis = pio_write_vi_base + nic_data->n_piobufs;
1211        } else {
1212                pio_write_vi_base = 0;
1213                wc_mem_map_size = 0;
1214                max_vis = channel_vis;
1215        }
1216
1217        /* In case the last attached driver failed to free VIs, do it now */
1218        rc = efx_mcdi_free_vis(efx);
1219        if (rc != 0)
1220                return rc;
1221
1222        rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
1223        if (rc != 0)
1224                return rc;
1225
1226        if (nic_data->n_allocated_vis < channel_vis) {
1227                netif_info(efx, drv, efx->net_dev,
1228                           "Could not allocate enough VIs to satisfy RSS"
1229                           " requirements. Performance may not be optimal.\n");
1230                /* We didn't get the VIs to populate our channels.
1231                 * We could keep what we got but then we'd have more
1232                 * interrupts than we need.
1233                 * Instead calculate new max_channels and restart
1234                 */
1235                efx->max_channels = nic_data->n_allocated_vis;
1236                efx->max_tx_channels =
1237                        nic_data->n_allocated_vis / efx->tx_queues_per_channel;
1238
1239                efx_mcdi_free_vis(efx);
1240                return -EAGAIN;
1241        }
1242
1243        /* If we didn't get enough VIs to map all the PIO buffers, free the
1244         * PIO buffers
1245         */
1246        if (nic_data->n_piobufs &&
1247            nic_data->n_allocated_vis <
1248            pio_write_vi_base + nic_data->n_piobufs) {
1249                netif_dbg(efx, probe, efx->net_dev,
1250                          "%u VIs are not sufficient to map %u PIO buffers\n",
1251                          nic_data->n_allocated_vis, nic_data->n_piobufs);
1252                efx_ef10_free_piobufs(efx);
1253        }
1254
1255        /* Shrink the original UC mapping of the memory BAR */
1256        membase = ioremap(efx->membase_phys, uc_mem_map_size);
1257        if (!membase) {
1258                netif_err(efx, probe, efx->net_dev,
1259                          "could not shrink memory BAR to %x\n",
1260                          uc_mem_map_size);
1261                return -ENOMEM;
1262        }
1263        iounmap(efx->membase);
1264        efx->membase = membase;
1265
1266        /* Set up the WC mapping if needed */
1267        if (wc_mem_map_size) {
1268                nic_data->wc_membase = ioremap_wc(efx->membase_phys +
1269                                                  uc_mem_map_size,
1270                                                  wc_mem_map_size);
1271                if (!nic_data->wc_membase) {
1272                        netif_err(efx, probe, efx->net_dev,
1273                                  "could not allocate WC mapping of size %x\n",
1274                                  wc_mem_map_size);
1275                        return -ENOMEM;
1276                }
1277                nic_data->pio_write_vi_base = pio_write_vi_base;
1278                nic_data->pio_write_base =
1279                        nic_data->wc_membase +
1280                        (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF -
1281                         uc_mem_map_size);
1282
1283                rc = efx_ef10_link_piobufs(efx);
1284                if (rc)
1285                        efx_ef10_free_piobufs(efx);
1286        }
1287
1288        netif_dbg(efx, probe, efx->net_dev,
1289                  "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
1290                  &efx->membase_phys, efx->membase, uc_mem_map_size,
1291                  nic_data->wc_membase, wc_mem_map_size);
1292
1293        return 0;
1294}
1295
1296static void efx_ef10_fini_nic(struct efx_nic *efx)
1297{
1298        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1299
1300        kfree(nic_data->mc_stats);
1301        nic_data->mc_stats = NULL;
1302}
1303
1304static int efx_ef10_init_nic(struct efx_nic *efx)
1305{
1306        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1307        netdev_features_t hw_enc_features = 0;
1308        int rc;
1309
1310        if (nic_data->must_check_datapath_caps) {
1311                rc = efx_ef10_init_datapath_caps(efx);
1312                if (rc)
1313                        return rc;
1314                nic_data->must_check_datapath_caps = false;
1315        }
1316
1317        if (efx->must_realloc_vis) {
1318                /* We cannot let the number of VIs change now */
1319                rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
1320                                        nic_data->n_allocated_vis);
1321                if (rc)
1322                        return rc;
1323                efx->must_realloc_vis = false;
1324        }
1325
1326        nic_data->mc_stats = kmalloc(efx->num_mac_stats * sizeof(__le64),
1327                                     GFP_KERNEL);
1328        if (!nic_data->mc_stats)
1329                return -ENOMEM;
1330
1331        if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1332                rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1333                if (rc == 0) {
1334                        rc = efx_ef10_link_piobufs(efx);
1335                        if (rc)
1336                                efx_ef10_free_piobufs(efx);
1337                }
1338
1339                /* Log an error on failure, but this is non-fatal.
1340                 * Permission errors are less important - we've presumably
1341                 * had the PIO buffer licence removed.
1342                 */
1343                if (rc == -EPERM)
1344                        netif_dbg(efx, drv, efx->net_dev,
1345                                  "not permitted to restore PIO buffers\n");
1346                else if (rc)
1347                        netif_err(efx, drv, efx->net_dev,
1348                                  "failed to restore PIO buffers (%d)\n", rc);
1349                nic_data->must_restore_piobufs = false;
1350        }
1351
1352        /* add encapsulated checksum offload features */
1353        if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
1354                hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1355        /* add encapsulated TSO features */
1356        if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
1357                netdev_features_t encap_tso_features;
1358
1359                encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
1360                        NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
1361
1362                hw_enc_features |= encap_tso_features | NETIF_F_TSO;
1363                efx->net_dev->features |= encap_tso_features;
1364        }
1365        efx->net_dev->hw_enc_features = hw_enc_features;
1366
1367        /* don't fail init if RSS setup doesn't work */
1368        rc = efx->type->rx_push_rss_config(efx, false,
1369                                           efx->rss_context.rx_indir_table, NULL);
1370
1371        return 0;
1372}
1373
1374static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
1375{
1376        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1377#ifdef CONFIG_SFC_SRIOV
1378        unsigned int i;
1379#endif
1380
1381        /* All our allocations have been reset */
1382        efx->must_realloc_vis = true;
1383        efx_mcdi_filter_table_reset_mc_allocations(efx);
1384        nic_data->must_restore_piobufs = true;
1385        efx_ef10_forget_old_piobufs(efx);
1386        efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
1387
1388        /* Driver-created vswitches and vports must be re-created */
1389        nic_data->must_probe_vswitching = true;
1390        efx->vport_id = EVB_PORT_ID_ASSIGNED;
1391#ifdef CONFIG_SFC_SRIOV
1392        if (nic_data->vf)
1393                for (i = 0; i < efx->vf_count; i++)
1394                        nic_data->vf[i].vport_id = 0;
1395#endif
1396}
1397
1398static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1399{
1400        if (reason == RESET_TYPE_MC_FAILURE)
1401                return RESET_TYPE_DATAPATH;
1402
1403        return efx_mcdi_map_reset_reason(reason);
1404}
1405
1406static int efx_ef10_map_reset_flags(u32 *flags)
1407{
1408        enum {
1409                EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1410                                   ETH_RESET_SHARED_SHIFT),
1411                EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1412                                  ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1413                                  ETH_RESET_PHY | ETH_RESET_MGMT) <<
1414                                 ETH_RESET_SHARED_SHIFT)
1415        };
1416
1417        /* We assume for now that our PCI function is permitted to
1418         * reset everything.
1419         */
1420
1421        if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1422                *flags &= ~EF10_RESET_MC;
1423                return RESET_TYPE_WORLD;
1424        }
1425
1426        if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1427                *flags &= ~EF10_RESET_PORT;
1428                return RESET_TYPE_ALL;
1429        }
1430
1431        /* no invisible reset implemented */
1432
1433        return -EINVAL;
1434}
1435
1436static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1437{
1438        int rc = efx_mcdi_reset(efx, reset_type);
1439
1440        /* Unprivileged functions return -EPERM, but need to return success
1441         * here so that the datapath is brought back up.
1442         */
1443        if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1444                rc = 0;
1445
1446        /* If it was a port reset, trigger reallocation of MC resources.
1447         * Note that on an MC reset nothing needs to be done now because we'll
1448         * detect the MC reset later and handle it then.
1449         * For an FLR, we never get an MC reset event, but the MC has reset all
1450         * resources assigned to us, so we have to trigger reallocation now.
1451         */
1452        if ((reset_type == RESET_TYPE_ALL ||
1453             reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
1454                efx_ef10_table_reset_mc_allocations(efx);
1455        return rc;
1456}
1457
1458#define EF10_DMA_STAT(ext_name, mcdi_name)                      \
1459        [EF10_STAT_ ## ext_name] =                              \
1460        { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1461#define EF10_DMA_INVIS_STAT(int_name, mcdi_name)                \
1462        [EF10_STAT_ ## int_name] =                              \
1463        { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1464#define EF10_OTHER_STAT(ext_name)                               \
1465        [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1466
1467static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
1468        EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1469        EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1470        EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1471        EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1472        EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1473        EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1474        EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1475        EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1476        EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1477        EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1478        EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1479        EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1480        EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1481        EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1482        EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1483        EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1484        EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1485        EF10_OTHER_STAT(port_rx_good_bytes),
1486        EF10_OTHER_STAT(port_rx_bad_bytes),
1487        EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1488        EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1489        EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1490        EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1491        EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1492        EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1493        EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1494        EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1495        EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1496        EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1497        EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1498        EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1499        EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1500        EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1501        EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1502        EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1503        EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1504        EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1505        EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1506        EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1507        EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1508        EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
1509        EFX_GENERIC_SW_STAT(rx_nodesc_trunc),
1510        EFX_GENERIC_SW_STAT(rx_noskb_drops),
1511        EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1512        EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1513        EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1514        EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1515        EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1516        EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1517        EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1518        EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1519        EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1520        EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1521        EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1522        EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
1523        EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1524        EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1525        EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1526        EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1527        EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1528        EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1529        EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1530        EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1531        EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1532        EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1533        EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1534        EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1535        EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1536        EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1537        EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1538        EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1539        EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1540        EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
1541        EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS),
1542        EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS),
1543        EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0),
1544        EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1),
1545        EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2),
1546        EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3),
1547        EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK),
1548        EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS),
1549        EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL),
1550        EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL),
1551        EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL),
1552        EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL),
1553        EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL),
1554        EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL),
1555        EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL),
1556        EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK),
1557        EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK),
1558        EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK),
1559        EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS),
1560        EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK),
1561        EF10_DMA_STAT(ctpio_poison, CTPIO_POISON),
1562        EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE),
1563};
1564
1565#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) |      \
1566                               (1ULL << EF10_STAT_port_tx_packets) |    \
1567                               (1ULL << EF10_STAT_port_tx_pause) |      \
1568                               (1ULL << EF10_STAT_port_tx_unicast) |    \
1569                               (1ULL << EF10_STAT_port_tx_multicast) |  \
1570                               (1ULL << EF10_STAT_port_tx_broadcast) |  \
1571                               (1ULL << EF10_STAT_port_rx_bytes) |      \
1572                               (1ULL <<                                 \
1573                                EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1574                               (1ULL << EF10_STAT_port_rx_good_bytes) | \
1575                               (1ULL << EF10_STAT_port_rx_bad_bytes) |  \
1576                               (1ULL << EF10_STAT_port_rx_packets) |    \
1577                               (1ULL << EF10_STAT_port_rx_good) |       \
1578                               (1ULL << EF10_STAT_port_rx_bad) |        \
1579                               (1ULL << EF10_STAT_port_rx_pause) |      \
1580                               (1ULL << EF10_STAT_port_rx_control) |    \
1581                               (1ULL << EF10_STAT_port_rx_unicast) |    \
1582                               (1ULL << EF10_STAT_port_rx_multicast) |  \
1583                               (1ULL << EF10_STAT_port_rx_broadcast) |  \
1584                               (1ULL << EF10_STAT_port_rx_lt64) |       \
1585                               (1ULL << EF10_STAT_port_rx_64) |         \
1586                               (1ULL << EF10_STAT_port_rx_65_to_127) |  \
1587                               (1ULL << EF10_STAT_port_rx_128_to_255) | \
1588                               (1ULL << EF10_STAT_port_rx_256_to_511) | \
1589                               (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1590                               (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1591                               (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1592                               (1ULL << EF10_STAT_port_rx_gtjumbo) |    \
1593                               (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1594                               (1ULL << EF10_STAT_port_rx_overflow) |   \
1595                               (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
1596                               (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
1597                               (1ULL << GENERIC_STAT_rx_noskb_drops))
1598
1599/* On 7000 series NICs, these statistics are only provided by the 10G MAC.
1600 * For a 10G/40G switchable port we do not expose these because they might
1601 * not include all the packets they should.
1602 * On 8000 series NICs these statistics are always provided.
1603 */
1604#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) |  \
1605                                 (1ULL << EF10_STAT_port_tx_lt64) |     \
1606                                 (1ULL << EF10_STAT_port_tx_64) |       \
1607                                 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1608                                 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1609                                 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1610                                 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1611                                 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1612                                 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
1613
1614/* These statistics are only provided by the 40G MAC.  For a 10G/40G
1615 * switchable port we do expose these because the errors will otherwise
1616 * be silent.
1617 */
1618#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1619                                  (1ULL << EF10_STAT_port_rx_length_error))
1620
1621/* These statistics are only provided if the firmware supports the
1622 * capability PM_AND_RXDP_COUNTERS.
1623 */
1624#define HUNT_PM_AND_RXDP_STAT_MASK (                                    \
1625        (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) |              \
1626        (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) |            \
1627        (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) |               \
1628        (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) |             \
1629        (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) |                      \
1630        (1ULL << EF10_STAT_port_rx_pm_discard_qbb) |                    \
1631        (1ULL << EF10_STAT_port_rx_pm_discard_mapping) |                \
1632        (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) |             \
1633        (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) |             \
1634        (1ULL << EF10_STAT_port_rx_dp_streaming_packets) |              \
1635        (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) |                      \
1636        (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
1637
1638/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2,
1639 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in
1640 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1641 * These bits are in the second u64 of the raw mask.
1642 */
1643#define EF10_FEC_STAT_MASK (                                            \
1644        (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) |             \
1645        (1ULL << (EF10_STAT_fec_corrected_errors - 64)) |               \
1646        (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) |        \
1647        (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) |        \
1648        (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) |        \
1649        (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64)))
1650
1651/* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3,
1652 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in
1653 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS.
1654 * These bits are in the second u64 of the raw mask.
1655 */
1656#define EF10_CTPIO_STAT_MASK (                                          \
1657        (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) |             \
1658        (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) |           \
1659        (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) |           \
1660        (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) |                \
1661        (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) |               \
1662        (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) |                 \
1663        (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) |            \
1664        (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) |             \
1665        (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) |              \
1666        (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) |          \
1667        (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) |         \
1668        (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) |                \
1669        (1ULL << (EF10_STAT_ctpio_success - 64)) |                      \
1670        (1ULL << (EF10_STAT_ctpio_fallback - 64)) |                     \
1671        (1ULL << (EF10_STAT_ctpio_poison - 64)) |                       \
1672        (1ULL << (EF10_STAT_ctpio_erase - 64)))
1673
1674static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
1675{
1676        u64 raw_mask = HUNT_COMMON_STAT_MASK;
1677        u32 port_caps = efx_mcdi_phy_get_caps(efx);
1678        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1679
1680        if (!(efx->mcdi->fn_flags &
1681              1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1682                return 0;
1683
1684        if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
1685                raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
1686                /* 8000 series have everything even at 40G */
1687                if (nic_data->datapath_caps2 &
1688                    (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN))
1689                        raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1690        } else {
1691                raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1692        }
1693
1694        if (nic_data->datapath_caps &
1695            (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1696                raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1697
1698        return raw_mask;
1699}
1700
1701static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1702{
1703        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1704        u64 raw_mask[2];
1705
1706        raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1707
1708        /* Only show vadaptor stats when EVB capability is present */
1709        if (nic_data->datapath_caps &
1710            (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1711                raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
1712                raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1;
1713        } else {
1714                raw_mask[1] = 0;
1715        }
1716        /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */
1717        if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2)
1718                raw_mask[1] |= EF10_FEC_STAT_MASK;
1719
1720        /* CTPIO stats appear in V3. Only show them on devices that actually
1721         * support CTPIO. Although this driver doesn't use CTPIO others might,
1722         * and we may be reporting the stats for the underlying port.
1723         */
1724        if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 &&
1725            (nic_data->datapath_caps2 &
1726             (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN)))
1727                raw_mask[1] |= EF10_CTPIO_STAT_MASK;
1728
1729#if BITS_PER_LONG == 64
1730        BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
1731        mask[0] = raw_mask[0];
1732        mask[1] = raw_mask[1];
1733#else
1734        BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
1735        mask[0] = raw_mask[0] & 0xffffffff;
1736        mask[1] = raw_mask[0] >> 32;
1737        mask[2] = raw_mask[1] & 0xffffffff;
1738#endif
1739}
1740
1741static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1742{
1743        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1744
1745        efx_ef10_get_stat_mask(efx, mask);
1746        return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
1747                                      mask, names);
1748}
1749
1750static void efx_ef10_get_fec_stats(struct efx_nic *efx,
1751                                   struct ethtool_fec_stats *fec_stats)
1752{
1753        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1754        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1755        u64 *stats = nic_data->stats;
1756
1757        efx_ef10_get_stat_mask(efx, mask);
1758        if (test_bit(EF10_STAT_fec_corrected_errors, mask))
1759                fec_stats->corrected_blocks.total =
1760                        stats[EF10_STAT_fec_corrected_errors];
1761        if (test_bit(EF10_STAT_fec_uncorrected_errors, mask))
1762                fec_stats->uncorrectable_blocks.total =
1763                        stats[EF10_STAT_fec_uncorrected_errors];
1764}
1765
1766static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1767                                           struct rtnl_link_stats64 *core_stats)
1768{
1769        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1770        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1771        u64 *stats = nic_data->stats;
1772        size_t stats_count = 0, index;
1773
1774        efx_ef10_get_stat_mask(efx, mask);
1775
1776        if (full_stats) {
1777                for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1778                        if (efx_ef10_stat_desc[index].name) {
1779                                *full_stats++ = stats[index];
1780                                ++stats_count;
1781                        }
1782                }
1783        }
1784
1785        if (!core_stats)
1786                return stats_count;
1787
1788        if (nic_data->datapath_caps &
1789                        1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1790                /* Use vadaptor stats. */
1791                core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1792                                         stats[EF10_STAT_rx_multicast] +
1793                                         stats[EF10_STAT_rx_broadcast];
1794                core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1795                                         stats[EF10_STAT_tx_multicast] +
1796                                         stats[EF10_STAT_tx_broadcast];
1797                core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1798                                       stats[EF10_STAT_rx_multicast_bytes] +
1799                                       stats[EF10_STAT_rx_broadcast_bytes];
1800                core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1801                                       stats[EF10_STAT_tx_multicast_bytes] +
1802                                       stats[EF10_STAT_tx_broadcast_bytes];
1803                core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
1804                                         stats[GENERIC_STAT_rx_noskb_drops];
1805                core_stats->multicast = stats[EF10_STAT_rx_multicast];
1806                core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1807                core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1808                core_stats->rx_errors = core_stats->rx_crc_errors;
1809                core_stats->tx_errors = stats[EF10_STAT_tx_bad];
1810        } else {
1811                /* Use port stats. */
1812                core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1813                core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1814                core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1815                core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1816                core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1817                                         stats[GENERIC_STAT_rx_nodesc_trunc] +
1818                                         stats[GENERIC_STAT_rx_noskb_drops];
1819                core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1820                core_stats->rx_length_errors =
1821                                stats[EF10_STAT_port_rx_gtjumbo] +
1822                                stats[EF10_STAT_port_rx_length_error];
1823                core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1824                core_stats->rx_frame_errors =
1825                                stats[EF10_STAT_port_rx_align_error];
1826                core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1827                core_stats->rx_errors = (core_stats->rx_length_errors +
1828                                         core_stats->rx_crc_errors +
1829                                         core_stats->rx_frame_errors);
1830        }
1831
1832        return stats_count;
1833}
1834
1835static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1836                                       struct rtnl_link_stats64 *core_stats)
1837{
1838        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1839        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1840        u64 *stats = nic_data->stats;
1841
1842        efx_ef10_get_stat_mask(efx, mask);
1843
1844        efx_nic_copy_stats(efx, nic_data->mc_stats);
1845        efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
1846                             mask, stats, nic_data->mc_stats, false);
1847
1848        /* Update derived statistics */
1849        efx_nic_fix_nodesc_drop_stat(efx,
1850                                     &stats[EF10_STAT_port_rx_nodesc_drops]);
1851        /* MC Firmware reads RX_BYTES and RX_GOOD_BYTES from the MAC.
1852         * It then calculates RX_BAD_BYTES and DMAs it to us with RX_BYTES.
1853         * We report these as port_rx_ stats. We are not given RX_GOOD_BYTES.
1854         * Here we calculate port_rx_good_bytes.
1855         */
1856        stats[EF10_STAT_port_rx_good_bytes] =
1857                stats[EF10_STAT_port_rx_bytes] -
1858                stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1859
1860        /* The asynchronous reads used to calculate RX_BAD_BYTES in
1861         * MC Firmware are done such that we should not see an increase in
1862         * RX_BAD_BYTES when a good packet has arrived. Unfortunately this
1863         * does mean that the stat can decrease at times. Here we do not
1864         * update the stat unless it has increased or has gone to zero
1865         * (In the case of the NIC rebooting).
1866         * Please see Bug 33781 for a discussion of why things work this way.
1867         */
1868        efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1869                             stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
1870        efx_update_sw_stats(efx, stats);
1871
1872        return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1873}
1874
1875static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
1876        __must_hold(&efx->stats_lock)
1877{
1878        MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1879        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1880        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1881        __le64 generation_start, generation_end;
1882        u64 *stats = nic_data->stats;
1883        u32 dma_len = efx->num_mac_stats * sizeof(u64);
1884        struct efx_buffer stats_buf;
1885        __le64 *dma_stats;
1886        int rc;
1887
1888        spin_unlock_bh(&efx->stats_lock);
1889
1890        efx_ef10_get_stat_mask(efx, mask);
1891
1892        rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_KERNEL);
1893        if (rc) {
1894                spin_lock_bh(&efx->stats_lock);
1895                return rc;
1896        }
1897
1898        dma_stats = stats_buf.addr;
1899        dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
1900
1901        MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
1902        MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
1903                              MAC_STATS_IN_DMA, 1);
1904        MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
1905        MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1906
1907        rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
1908                                NULL, 0, NULL);
1909        spin_lock_bh(&efx->stats_lock);
1910        if (rc) {
1911                /* Expect ENOENT if DMA queues have not been set up */
1912                if (rc != -ENOENT || atomic_read(&efx->active_queues))
1913                        efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
1914                                               sizeof(inbuf), NULL, 0, rc);
1915                goto out;
1916        }
1917
1918        generation_end = dma_stats[efx->num_mac_stats - 1];
1919        if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
1920                WARN_ON_ONCE(1);
1921                goto out;
1922        }
1923        rmb();
1924        efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1925                             stats, stats_buf.addr, false);
1926        rmb();
1927        generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1928        if (generation_end != generation_start) {
1929                rc = -EAGAIN;
1930                goto out;
1931        }
1932
1933        efx_update_sw_stats(efx, stats);
1934out:
1935        efx_nic_free_buffer(efx, &stats_buf);
1936        return rc;
1937}
1938
1939static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
1940                                       struct rtnl_link_stats64 *core_stats)
1941{
1942        if (efx_ef10_try_update_nic_stats_vf(efx))
1943                return 0;
1944
1945        return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1946}
1947
1948static size_t efx_ef10_update_stats_atomic_vf(struct efx_nic *efx, u64 *full_stats,
1949                                              struct rtnl_link_stats64 *core_stats)
1950{
1951        struct efx_ef10_nic_data *nic_data = efx->nic_data;
1952
1953        /* In atomic context, cannot update HW stats.  Just update the
1954         * software stats and return so the caller can continue.
1955         */
1956        efx_update_sw_stats(efx, nic_data->stats);
1957        return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1958}
1959
1960static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
1961{
1962        struct efx_nic *efx = channel->efx;
1963        unsigned int mode, usecs;
1964        efx_dword_t timer_cmd;
1965
1966        if (channel->irq_moderation_us) {
1967                mode = 3;
1968                usecs = channel->irq_moderation_us;
1969        } else {
1970                mode = 0;
1971                usecs = 0;
1972        }
1973
1974        if (EFX_EF10_WORKAROUND_61265(efx)) {
1975                MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN);
1976                unsigned int ns = usecs * 1000;
1977
1978                MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE,
1979                               channel->channel);
1980                MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns);
1981                MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns);
1982                MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode);
1983
1984                efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR,
1985                                   inbuf, sizeof(inbuf), 0, NULL, 0);
1986        } else if (EFX_EF10_WORKAROUND_35388(efx)) {
1987                unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
1988
1989                EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
1990                                     EFE_DD_EVQ_IND_TIMER_FLAGS,
1991                                     ERF_DD_EVQ_IND_TIMER_MODE, mode,
1992                                     ERF_DD_EVQ_IND_TIMER_VAL, ticks);
1993                efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
1994                                channel->channel);
1995        } else {
1996                unsigned int ticks = efx_usecs_to_ticks(efx, usecs);
1997
1998                EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
1999                                     ERF_DZ_TC_TIMER_VAL, ticks,
2000                                     ERF_FZ_TC_TMR_REL_VAL, ticks);
2001                efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
2002                                channel->channel);
2003        }
2004}
2005
2006static void efx_ef10_get_wol_vf(struct efx_nic *efx,
2007                                struct ethtool_wolinfo *wol) {}
2008
2009static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
2010{
2011        return -EOPNOTSUPP;
2012}
2013
2014static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
2015{
2016        wol->supported = 0;
2017        wol->wolopts = 0;
2018        memset(&wol->sopass, 0, sizeof(wol->sopass));
2019}
2020
2021static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
2022{
2023        if (type != 0)
2024                return -EINVAL;
2025        return 0;
2026}
2027
2028static void efx_ef10_mcdi_request(struct efx_nic *efx,
2029                                  const efx_dword_t *hdr, size_t hdr_len,
2030                                  const efx_dword_t *sdu, size_t sdu_len)
2031{
2032        struct efx_ef10_nic_data *nic_data = efx->nic_data;
2033        u8 *pdu = nic_data->mcdi_buf.addr;
2034
2035        memcpy(pdu, hdr, hdr_len);
2036        memcpy(pdu + hdr_len, sdu, sdu_len);
2037        wmb();
2038
2039        /* The hardware provides 'low' and 'high' (doorbell) registers
2040         * for passing the 64-bit address of an MCDI request to
2041         * firmware.  However the dwords are swapped by firmware.  The
2042         * least significant bits of the doorbell are then 0 for all
2043         * MCDI requests due to alignment.
2044         */
2045        _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
2046                    ER_DZ_MC_DB_LWRD);
2047        _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
2048                    ER_DZ_MC_DB_HWRD);
2049}
2050
2051static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
2052{
2053        struct efx_ef10_nic_data *nic_data = efx->nic_data;
2054        const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
2055
2056        rmb();
2057        return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
2058}
2059
2060static void
2061efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
2062                            size_t offset, size_t outlen)
2063{
2064        struct efx_ef10_nic_data *nic_data = efx->nic_data;
2065        const u8 *pdu = nic_data->mcdi_buf.addr;
2066
2067        memcpy(outbuf, pdu + offset, outlen);
2068}
2069
2070static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
2071{
2072        struct efx_ef10_nic_data *nic_data = efx->nic_data;
2073
2074        /* All our allocations have been reset */
2075        efx_ef10_table_reset_mc_allocations(efx);
2076
2077        /* The datapath firmware might have been changed */
2078        nic_data->must_check_datapath_caps = true;
2079
2080        /* MAC statistics have been cleared on the NIC; clear the local
2081         * statistic that we update with efx_update_diff_stat().
2082         */
2083        nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
2084}
2085
2086static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
2087{
2088        struct efx_ef10_nic_data *nic_data = efx->nic_data;
2089        int rc;
2090
2091        rc = efx_ef10_get_warm_boot_count(efx);
2092        if (rc < 0) {
2093                /* The firmware is presumably in the process of
2094                 * rebooting.  However, we are supposed to report each
2095                 * reboot just once, so we must only do that once we
2096                 * can read and store the updated warm boot count.
2097                 */
2098                return 0;
2099        }
2100
2101        if (rc == nic_data->warm_boot_count)
2102                return 0;
2103
2104        nic_data->warm_boot_count = rc;
2105        efx_ef10_mcdi_reboot_detected(efx);
2106
2107        return -EIO;
2108}
2109
2110/* Handle an MSI interrupt
2111 *
2112 * Handle an MSI hardware interrupt.  This routine schedules event
2113 * queue processing.  No interrupt acknowledgement cycle is necessary.
2114 * Also, we never need to check that the interrupt is for us, since
2115 * MSI interrupts cannot be shared.
2116 */
2117static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
2118{
2119        struct efx_msi_context *context = dev_id;
2120        struct efx_nic *efx = context->efx;
2121
2122        netif_vdbg(efx, intr, efx->net_dev,
2123                   "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
2124
2125        if (likely(READ_ONCE(efx->irq_soft_enabled))) {
2126                /* Note test interrupts */
2127                if (context->index == efx->irq_level)
2128                        efx->last_irq_cpu = raw_smp_processor_id();
2129
2130                /* Schedule processing of the channel */
2131                efx_schedule_channel_irq(efx->channel[context->index]);
2132        }
2133
2134        return IRQ_HANDLED;
2135}
2136
2137static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
2138{
2139        struct efx_nic *efx = dev_id;
2140        bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
2141        struct efx_channel *channel;
2142        efx_dword_t reg;
2143        u32 queues;
2144
2145        /* Read the ISR which also ACKs the interrupts */
2146        efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
2147        queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
2148
2149        if (queues == 0)
2150                return IRQ_NONE;
2151
2152        if (likely(soft_enabled)) {
2153                /* Note test interrupts */
2154                if (queues & (1U << efx->irq_level))
2155                        efx->last_irq_cpu = raw_smp_processor_id();
2156
2157                efx_for_each_channel(channel, efx) {
2158                        if (queues & 1)
2159                                efx_schedule_channel_irq(channel);
2160                        queues >>= 1;
2161                }
2162        }
2163
2164        netif_vdbg(efx, intr, efx->net_dev,
2165                   "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
2166                   irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
2167
2168        return IRQ_HANDLED;
2169}
2170
2171static int efx_ef10_irq_test_generate(struct efx_nic *efx)
2172{
2173        MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
2174
2175        if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true,
2176                                    NULL) == 0)
2177                return -ENOTSUPP;
2178
2179        BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
2180
2181        MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
2182        return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
2183                            inbuf, sizeof(inbuf), NULL, 0, NULL);
2184}
2185
2186static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
2187{
2188        /* low two bits of label are what we want for type */
2189        BUILD_BUG_ON((EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM) != 3);
2190        tx_queue->type = tx_queue->label & 3;
2191        return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
2192                                    (tx_queue->ptr_mask + 1) *
2193                                    sizeof(efx_qword_t),
2194                                    GFP_KERNEL);
2195}
2196
2197/* This writes to the TX_DESC_WPTR and also pushes data */
2198static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
2199                                         const efx_qword_t *txd)
2200{
2201        unsigned int write_ptr;
2202        efx_oword_t reg;
2203
2204        write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2205        EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
2206        reg.qword[0] = *txd;
2207        efx_writeo_page(tx_queue->efx, &reg,
2208                        ER_DZ_TX_DESC_UPD, tx_queue->queue);
2209}
2210
2211/* Add Firmware-Assisted TSO v2 option descriptors to a queue.
2212 */
2213int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
2214                         bool *data_mapped)
2215{
2216        struct efx_tx_buffer *buffer;
2217        u16 inner_ipv4_id = 0;
2218        u16 outer_ipv4_id = 0;
2219        struct tcphdr *tcp;
2220        struct iphdr *ip;
2221        u16 ip_tot_len;
2222        u32 seqnum;
2223        u32 mss;
2224
2225        EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2);
2226
2227        mss = skb_shinfo(skb)->gso_size;
2228
2229        if (unlikely(mss < 4)) {
2230                WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss);
2231                return -EINVAL;
2232        }
2233
2234        if (skb->encapsulation) {
2235                if (!tx_queue->tso_encap)
2236                        return -EINVAL;
2237                ip = ip_hdr(skb);
2238                if (ip->version == 4)
2239                        outer_ipv4_id = ntohs(ip->id);
2240
2241                ip = inner_ip_hdr(skb);
2242                tcp = inner_tcp_hdr(skb);
2243        } else {
2244                ip = ip_hdr(skb);
2245                tcp = tcp_hdr(skb);
2246        }
2247
2248        /* 8000-series EF10 hardware requires that IP Total Length be
2249         * greater than or equal to the value it will have in each segment
2250         * (which is at most mss + 208 + TCP header length), but also less
2251         * than (0x10000 - inner_network_header).  Otherwise the TCP
2252         * checksum calculation will be broken for encapsulated packets.
2253         * We fill in ip->tot_len with 0xff30, which should satisfy the
2254         * first requirement unless the MSS is ridiculously large (which
2255         * should be impossible as the driver max MTU is 9216); it is
2256         * guaranteed to satisfy the second as we only attempt TSO if
2257         * inner_network_header <= 208.
2258         */
2259        ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
2260        EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
2261                                  (tcp->doff << 2u) > ip_tot_len);
2262
2263        if (ip->version == 4) {
2264                ip->tot_len = htons(ip_tot_len);
2265                ip->check = 0;
2266                inner_ipv4_id = ntohs(ip->id);
2267        } else {
2268                ((struct ipv6hdr *)ip)->payload_len = htons(ip_tot_len);
2269        }
2270
2271        seqnum = ntohl(tcp->seq);
2272
2273        buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2274
2275        buffer->flags = EFX_TX_BUF_OPTION;
2276        buffer->len = 0;
2277        buffer->unmap_len = 0;
2278        EFX_POPULATE_QWORD_5(buffer->option,
2279                        ESF_DZ_TX_DESC_IS_OPT, 1,
2280                        ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2281                        ESF_DZ_TX_TSO_OPTION_TYPE,
2282                        ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
2283                        ESF_DZ_TX_TSO_IP_ID, inner_ipv4_id,
2284                        ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
2285                        );
2286        ++tx_queue->insert_count;
2287
2288        buffer = efx_tx_queue_get_insert_buffer(tx_queue);
2289
2290        buffer->flags = EFX_TX_BUF_OPTION;
2291        buffer->len = 0;
2292        buffer->unmap_len = 0;
2293        EFX_POPULATE_QWORD_5(buffer->option,
2294                        ESF_DZ_TX_DESC_IS_OPT, 1,
2295                        ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
2296                        ESF_DZ_TX_TSO_OPTION_TYPE,
2297                        ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
2298                        ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id,
2299                        ESF_DZ_TX_TSO_TCP_MSS, mss
2300                        );
2301        ++tx_queue->insert_count;
2302
2303        return 0;
2304}
2305
2306static u32 efx_ef10_tso_versions(struct efx_nic *efx)
2307{
2308        struct efx_ef10_nic_data *nic_data = efx->nic_data;
2309        u32 tso_versions = 0;
2310
2311        if (nic_data->datapath_caps &
2312            (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))
2313                tso_versions |= BIT(1);
2314        if (nic_data->datapath_caps2 &
2315            (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))
2316                tso_versions |= BIT(2);
2317        return tso_versions;
2318}
2319
2320static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
2321{
2322        bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
2323        bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
2324        struct efx_channel *channel = tx_queue->channel;
2325        struct efx_nic *efx = tx_queue->efx;
2326        struct efx_ef10_nic_data *nic_data;
2327        efx_qword_t *txd;
2328        int rc;
2329
2330        nic_data = efx->nic_data;
2331
2332        /* Only attempt to enable TX timestamping if we have the license for it,
2333         * otherwise TXQ init will fail
2334         */
2335        if (!(nic_data->licensed_features &
2336              (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) {
2337                tx_queue->timestamping = false;
2338                /* Disable sync events on this channel. */
2339                if (efx->type->ptp_set_ts_sync_events)
2340                        efx->type->ptp_set_ts_sync_events(efx, false, false);
2341        }
2342
2343        /* TSOv2 is a limited resource that can only be configured on a limited
2344         * number of queues. TSO without checksum offload is not really a thing,
2345         * so we only enable it for those queues.
2346         * TSOv2 cannot be used with Hardware timestamping, and is never needed
2347         * for XDP tx.
2348         */
2349        if (efx_has_cap(efx, TX_TSO_V2)) {
2350                if ((csum_offload || inner_csum) &&
2351                    !tx_queue->timestamping && !tx_queue->xdp_tx) {
2352                        tx_queue->tso_version = 2;
2353                        netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
2354                                  channel->channel);
2355                }
2356        } else if (efx_has_cap(efx, TX_TSO)) {
2357                tx_queue->tso_version = 1;
2358        }
2359
2360        rc = efx_mcdi_tx_init(tx_queue);
2361        if (rc)
2362                goto fail;
2363
2364        /* A previous user of this TX queue might have set us up the
2365         * bomb by writing a descriptor to the TX push collector but
2366         * not the doorbell.  (Each collector belongs to a port, not a
2367         * queue or function, so cannot easily be reset.)  We must
2368         * attempt to push a no-op descriptor in its place.
2369         */
2370        tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
2371        tx_queue->insert_count = 1;
2372        txd = efx_tx_desc(tx_queue, 0);
2373        EFX_POPULATE_QWORD_7(*txd,
2374                             ESF_DZ_TX_DESC_IS_OPT, true,
2375                             ESF_DZ_TX_OPTION_TYPE,
2376                             ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
2377                             ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
2378                             ESF_DZ_TX_OPTION_IP_CSUM, csum_offload && tx_queue->tso_version != 2,
2379                             ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, inner_csum,
2380                             ESF_DZ_TX_OPTION_INNER_IP_CSUM, inner_csum && tx_queue->tso_version != 2,
2381                             ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
2382        tx_queue->write_count = 1;
2383
2384        if (tx_queue->tso_version == 2 && efx_has_cap(efx, TX_TSO_V2_ENCAP))
2385                tx_queue->tso_encap = true;
2386
2387        wmb();
2388        efx_ef10_push_tx_desc(tx_queue, txd);
2389
2390        return;
2391
2392fail:
2393        netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
2394                    tx_queue->queue);
2395}
2396
2397/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
2398static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
2399{
2400        unsigned int write_ptr;
2401        efx_dword_t reg;
2402
2403        write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2404        EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
2405        efx_writed_page(tx_queue->efx, &reg,
2406                        ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
2407}
2408
2409#define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff
2410
2411static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue,
2412                                          dma_addr_t dma_addr, unsigned int len)
2413{
2414        if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) {
2415                /* If we need to break across multiple descriptors we should
2416                 * stop at a page boundary. This assumes the length limit is
2417                 * greater than the page size.
2418                 */
2419                dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN;
2420
2421                BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE);
2422                len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr;
2423        }
2424
2425        return len;
2426}
2427
2428static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
2429{
2430        unsigned int old_write_count = tx_queue->write_count;
2431        struct efx_tx_buffer *buffer;
2432        unsigned int write_ptr;
2433        efx_qword_t *txd;
2434
2435        tx_queue->xmit_pending = false;
2436        if (unlikely(tx_queue->write_count == tx_queue->insert_count))
2437                return;
2438
2439        do {
2440                write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
2441                buffer = &tx_queue->buffer[write_ptr];
2442                txd = efx_tx_desc(tx_queue, write_ptr);
2443                ++tx_queue->write_count;
2444
2445                /* Create TX descriptor ring entry */
2446                if (buffer->flags & EFX_TX_BUF_OPTION) {
2447                        *txd = buffer->option;
2448                        if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
2449                                /* PIO descriptor */
2450                                tx_queue->packet_write_count = tx_queue->write_count;
2451                } else {
2452                        tx_queue->packet_write_count = tx_queue->write_count;
2453                        BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
2454                        EFX_POPULATE_QWORD_3(
2455                                *txd,
2456                                ESF_DZ_TX_KER_CONT,
2457                                buffer->flags & EFX_TX_BUF_CONT,
2458                                ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
2459                                ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
2460                }
2461        } while (tx_queue->write_count != tx_queue->insert_count);
2462
2463        wmb(); /* Ensure descriptors are written before they are fetched */
2464
2465        if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
2466                txd = efx_tx_desc(tx_queue,
2467                                  old_write_count & tx_queue->ptr_mask);
2468                efx_ef10_push_tx_desc(tx_queue, txd);
2469                ++tx_queue->pushes;
2470        } else {
2471                efx_ef10_notify_tx_desc(tx_queue);
2472        }
2473}
2474
2475static int efx_ef10_probe_multicast_chaining(struct efx_nic *efx)
2476{
2477        struct efx_ef10_nic_data *nic_data = efx->nic_data;
2478        unsigned int enabled, implemented;
2479        bool want_workaround_26807;
2480        int rc;
2481
2482        rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
2483        if (rc == -ENOSYS) {
2484                /* GET_WORKAROUNDS was implemented before this workaround,
2485                 * thus it must be unavailable in this firmware.
2486                 */
2487                nic_data->workaround_26807 = false;
2488                return 0;
2489        }
2490        if (rc)
2491                return rc;
2492        want_workaround_26807 =
2493                implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807;
2494        nic_data->workaround_26807 =
2495                !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
2496
2497        if (want_workaround_26807 && !nic_data->workaround_26807) {
2498                unsigned int flags;
2499
2500                rc = efx_mcdi_set_workaround(efx,
2501                                             MC_CMD_WORKAROUND_BUG26807,
2502                                             true, &flags);
2503                if (!rc) {
2504                        if (flags &
2505                            1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
2506                                netif_info(efx, drv, efx->net_dev,
2507                                           "other functions on NIC have been reset\n");
2508
2509                                /* With MCFW v4.6.x and earlier, the
2510                                 * boot count will have incremented,
2511                                 * so re-read the warm_boot_count
2512                                 * value now to ensure this function
2513                                 * doesn't think it has changed next
2514                                 * time it checks.
2515                                 */
2516                                rc = efx_ef10_get_warm_boot_count(efx);
2517                                if (rc >= 0) {
2518                                        nic_data->warm_boot_count = rc;
2519                                        rc = 0;
2520                                }
2521                        }
2522                        nic_data->workaround_26807 = true;
2523                } else if (rc == -EPERM) {
2524                        rc = 0;
2525                }
2526        }
2527        return rc;
2528}
2529
2530static int efx_ef10_filter_table_probe(struct efx_nic *efx)
2531{
2532        struct efx_ef10_nic_data *nic_data = efx->nic_data;
2533        int rc = efx_ef10_probe_multicast_chaining(efx);
2534        struct efx_mcdi_filter_vlan *vlan;
2535
2536        if (rc)
2537                return rc;
2538        rc = efx_mcdi_filter_table_probe(efx, nic_data->workaround_26807);
2539
2540        if (rc)
2541                return rc;
2542
2543        list_for_each_entry(vlan, &nic_data->vlan_list, list) {
2544                rc = efx_mcdi_filter_add_vlan(efx, vlan->vid);
2545                if (rc)
2546                        goto fail_add_vlan;
2547        }
2548        return 0;
2549
2550fail_add_vlan:
2551        efx_mcdi_filter_table_remove(efx);
2552        return rc;
2553}
2554
2555/* This creates an entry in the RX descriptor queue */
2556static inline void
2557efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
2558{
2559        struct efx_rx_buffer *rx_buf;
2560        efx_qword_t *rxd;
2561
2562        rxd = efx_rx_desc(rx_queue, index);
2563        rx_buf = efx_rx_buffer(rx_queue, index);
2564        EFX_POPULATE_QWORD_2(*rxd,
2565                             ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
2566                             ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
2567}
2568
2569static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
2570{
2571        struct efx_nic *efx = rx_queue->efx;
2572        unsigned int write_count;
2573        efx_dword_t reg;
2574
2575        /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
2576        write_count = rx_queue->added_count & ~7;
2577        if (rx_queue->notified_count == write_count)
2578                return;
2579
2580        do
2581                efx_ef10_build_rx_desc(
2582                        rx_queue,
2583                        rx_queue->notified_count & rx_queue->ptr_mask);
2584        while (++rx_queue->notified_count != write_count);
2585
2586        wmb();
2587        EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
2588                             write_count & rx_queue->ptr_mask);
2589        efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
2590                        efx_rx_queue_index(rx_queue));
2591}
2592
2593static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
2594
2595static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
2596{
2597        struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2598        MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2599        efx_qword_t event;
2600
2601        EFX_POPULATE_QWORD_2(event,
2602                             ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2603                             ESF_DZ_EV_DATA, EFX_EF10_REFILL);
2604
2605        MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2606
2607        /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2608         * already swapped the data to little-endian order.
2609         */
2610        memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2611               sizeof(efx_qword_t));
2612
2613        efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
2614                           inbuf, sizeof(inbuf), 0,
2615                           efx_ef10_rx_defer_refill_complete, 0);
2616}
2617
2618static void
2619efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
2620                                  int rc, efx_dword_t *outbuf,
2621                                  size_t outlen_actual)
2622{
2623        /* nothing to do */
2624}
2625
2626static int efx_ef10_ev_init(struct efx_channel *channel)
2627{
2628        struct efx_nic *efx = channel->efx;
2629        struct efx_ef10_nic_data *nic_data;
2630        bool use_v2, cut_thru;
2631
2632        nic_data = efx->nic_data;
2633        use_v2 = nic_data->datapath_caps2 &
2634                            1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN;
2635        cut_thru = !(nic_data->datapath_caps &
2636                              1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
2637        return efx_mcdi_ev_init(channel, cut_thru, use_v2);
2638}
2639
2640static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
2641                                           unsigned int rx_queue_label)
2642{
2643        struct efx_nic *efx = rx_queue->efx;
2644
2645        netif_info(efx, hw, efx->net_dev,
2646                   "rx event arrived on queue %d labeled as queue %u\n",
2647                   efx_rx_queue_index(rx_queue), rx_queue_label);
2648
2649        efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2650}
2651
2652static void
2653efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
2654                             unsigned int actual, unsigned int expected)
2655{
2656        unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
2657        struct efx_nic *efx = rx_queue->efx;
2658
2659        netif_info(efx, hw, efx->net_dev,
2660                   "dropped %d events (index=%d expected=%d)\n",
2661                   dropped, actual, expected);
2662
2663        efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2664}
2665
2666/* partially received RX was aborted. clean up. */
2667static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
2668{
2669        unsigned int rx_desc_ptr;
2670
2671        netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
2672                  "scattered RX aborted (dropping %u buffers)\n",
2673                  rx_queue->scatter_n);
2674
2675        rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
2676
2677        efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
2678                      0, EFX_RX_PKT_DISCARD);
2679
2680        rx_queue->removed_count += rx_queue->scatter_n;
2681        rx_queue->scatter_n = 0;
2682        rx_queue->scatter_len = 0;
2683        ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
2684}
2685
2686static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
2687                                           unsigned int n_packets,
2688                                           unsigned int rx_encap_hdr,
2689                                           unsigned int rx_l3_class,
2690                                           unsigned int rx_l4_class,
2691                                           const efx_qword_t *event)
2692{
2693        struct efx_nic *efx = channel->efx;
2694        bool handled = false;
2695
2696        if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
2697                if (!(efx->net_dev->features & NETIF_F_RXALL)) {
2698                        if (!efx->loopback_selftest)
2699                                channel->n_rx_eth_crc_err += n_packets;
2700                        return EFX_RX_PKT_DISCARD;
2701                }
2702                handled = true;
2703        }
2704        if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
2705                if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
2706                             rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
2707                             rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
2708                             rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
2709                             rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
2710                        netdev_WARN(efx->net_dev,
2711                                    "invalid class for RX_IPCKSUM_ERR: event="
2712                                    EFX_QWORD_FMT "\n",
2713                                    EFX_QWORD_VAL(*event));
2714                if (!efx->loopback_selftest)
2715                        *(rx_encap_hdr ?
2716                          &channel->n_rx_outer_ip_hdr_chksum_err :
2717                          &channel->n_rx_ip_hdr_chksum_err) += n_packets;
2718                return 0;
2719        }
2720        if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
2721                if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
2722                             ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
2723                               rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
2724                              (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
2725                               rx_l4_class != ESE_FZ_L4_CLASS_UDP))))
2726                        netdev_WARN(efx->net_dev,
2727                                    "invalid class for RX_TCPUDP_CKSUM_ERR: event="
2728                                    EFX_QWORD_FMT "\n",
2729                                    EFX_QWORD_VAL(*event));
2730                if (!efx->loopback_selftest)
2731                        *(rx_encap_hdr ?
2732                          &channel->n_rx_outer_tcp_udp_chksum_err :
2733                          &channel->n_rx_tcp_udp_chksum_err) += n_packets;
2734                return 0;
2735        }
2736        if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) {
2737                if (unlikely(!rx_encap_hdr))
2738                        netdev_WARN(efx->net_dev,
2739                                    "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event="
2740                                    EFX_QWORD_FMT "\n",
2741                                    EFX_QWORD_VAL(*event));
2742                else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
2743                                  rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
2744                                  rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
2745                                  rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
2746                        netdev_WARN(efx->net_dev,
2747                                    "invalid class for RX_IP_INNER_CHKSUM_ERR: event="
2748                                    EFX_QWORD_FMT "\n",
2749                                    EFX_QWORD_VAL(*event));
2750                if (!efx->loopback_selftest)
2751                        channel->n_rx_inner_ip_hdr_chksum_err += n_packets;
2752                return 0;
2753        }
2754        if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) {
2755                if (unlikely(!rx_encap_hdr))
2756                        netdev_WARN(efx->net_dev,
2757                                    "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
2758                                    EFX_QWORD_FMT "\n",
2759                                    EFX_QWORD_VAL(*event));
2760                else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
2761                                   rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
2762                                  (rx_l4_class != ESE_FZ_L4_CLASS_TCP &&
2763                                   rx_l4_class != ESE_FZ_L4_CLASS_UDP)))
2764                        netdev_WARN(efx->net_dev,
2765                                    "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
2766                                    EFX_QWORD_FMT "\n",
2767                                    EFX_QWORD_VAL(*event));
2768                if (!efx->loopback_selftest)
2769                        channel->n_rx_inner_tcp_udp_chksum_err += n_packets;
2770                return 0;
2771        }
2772
2773        WARN_ON(!handled); /* No error bits were recognised */
2774        return 0;
2775}
2776
2777static int efx_ef10_handle_rx_event(struct efx_channel *channel,
2778                                    const efx_qword_t *event)
2779{
2780        unsigned int rx_bytes, next_ptr_lbits, rx_queue_label;
2781        unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr;
2782        unsigned int n_descs, n_packets, i;
2783        struct efx_nic *efx = channel->efx;
2784        struct efx_ef10_nic_data *nic_data = efx->nic_data;
2785        struct efx_rx_queue *rx_queue;
2786        efx_qword_t errors;
2787        bool rx_cont;
2788        u16 flags = 0;
2789
2790        if (unlikely(READ_ONCE(efx->reset_pending)))
2791                return 0;
2792
2793        /* Basic packet information */
2794        rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
2795        next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
2796        rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
2797        rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
2798        rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS);
2799        rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
2800        rx_encap_hdr =
2801                nic_data->datapath_caps &
2802                        (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ?
2803                EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) :
2804                ESE_EZ_ENCAP_HDR_NONE;
2805
2806        if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
2807                netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
2808                            EFX_QWORD_FMT "\n",
2809                            EFX_QWORD_VAL(*event));
2810
2811        rx_queue = efx_channel_get_rx_queue(channel);
2812
2813        if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
2814                efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
2815
2816        n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
2817                   ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2818
2819        if (n_descs != rx_queue->scatter_n + 1) {
2820                struct efx_ef10_nic_data *nic_data = efx->nic_data;
2821
2822                /* detect rx abort */
2823                if (unlikely(n_descs == rx_queue->scatter_n)) {
2824                        if (rx_queue->scatter_n == 0 || rx_bytes != 0)
2825                                netdev_WARN(efx->net_dev,
2826                                            "invalid RX abort: scatter_n=%u event="
2827                                            EFX_QWORD_FMT "\n",
2828                                            rx_queue->scatter_n,
2829                                            EFX_QWORD_VAL(*event));
2830                        efx_ef10_handle_rx_abort(rx_queue);
2831                        return 0;
2832                }
2833
2834                /* Check that RX completion merging is valid, i.e.
2835                 * the current firmware supports it and this is a
2836                 * non-scattered packet.
2837                 */
2838                if (!(nic_data->datapath_caps &
2839                      (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
2840                    rx_queue->scatter_n != 0 || rx_cont) {
2841                        efx_ef10_handle_rx_bad_lbits(
2842                                rx_queue, next_ptr_lbits,
2843                                (rx_queue->removed_count +
2844                                 rx_queue->scatter_n + 1) &
2845                                ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2846                        return 0;
2847                }
2848
2849                /* Merged completion for multiple non-scattered packets */
2850                rx_queue->scatter_n = 1;
2851                rx_queue->scatter_len = 0;
2852                n_packets = n_descs;
2853                ++channel->n_rx_merge_events;
2854                channel->n_rx_merge_packets += n_packets;
2855                flags |= EFX_RX_PKT_PREFIX_LEN;
2856        } else {
2857                ++rx_queue->scatter_n;
2858                rx_queue->scatter_len += rx_bytes;
2859                if (rx_cont)
2860                        return 0;
2861                n_packets = 1;
2862        }
2863
2864        EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1,
2865                                     ESF_DZ_RX_IPCKSUM_ERR, 1,
2866                                     ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1,
2867                                     ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1,
2868                                     ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1);
2869        EFX_AND_QWORD(errors, *event, errors);
2870        if (unlikely(!EFX_QWORD_IS_ZERO(errors))) {
2871                flags |= efx_ef10_handle_rx_event_errors(channel, n_packets,
2872                                                         rx_encap_hdr,
2873                                                         rx_l3_class, rx_l4_class,
2874                                                         event);
2875        } else {
2876                bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP ||
2877                              rx_l4_class == ESE_FZ_L4_CLASS_UDP;
2878
2879                switch (rx_encap_hdr) {
2880                case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
2881                        flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */
2882                        if (tcpudp)
2883                                flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */
2884                        break;
2885                case ESE_EZ_ENCAP_HDR_GRE:
2886                case ESE_EZ_ENCAP_HDR_NONE:
2887                        if (tcpudp)
2888                                flags |= EFX_RX_PKT_CSUMMED;
2889                        break;
2890                default:
2891                        netdev_WARN(efx->net_dev,
2892                                    "unknown encapsulation type: event="
2893                                    EFX_QWORD_FMT "\n",
2894                                    EFX_QWORD_VAL(*event));
2895                }
2896        }
2897
2898        if (rx_l4_class == ESE_FZ_L4_CLASS_TCP)
2899                flags |= EFX_RX_PKT_TCP;
2900
2901        channel->irq_mod_score += 2 * n_packets;
2902
2903        /* Handle received packet(s) */
2904        for (i = 0; i < n_packets; i++) {
2905                efx_rx_packet(rx_queue,
2906                              rx_queue->removed_count & rx_queue->ptr_mask,
2907                              rx_queue->scatter_n, rx_queue->scatter_len,
2908                              flags);
2909                rx_queue->removed_count += rx_queue->scatter_n;
2910        }
2911
2912        rx_queue->scatter_n = 0;
2913        rx_queue->scatter_len = 0;
2914
2915        return n_packets;
2916}
2917
2918static u32 efx_ef10_extract_event_ts(efx_qword_t *event)
2919{
2920        u32 tstamp;
2921
2922        tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI);
2923        tstamp <<= 16;
2924        tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO);
2925
2926        return tstamp;
2927}
2928
2929static void
2930efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
2931{
2932        struct efx_nic *efx = channel->efx;
2933        struct efx_tx_queue *tx_queue;
2934        unsigned int tx_ev_desc_ptr;
2935        unsigned int tx_ev_q_label;
2936        unsigned int tx_ev_type;
2937        u64 ts_part;
2938
2939        if (unlikely(READ_ONCE(efx->reset_pending)))
2940                return;
2941
2942        if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
2943                return;
2944
2945        /* Get the transmit queue */
2946        tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
2947        tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
2948
2949        if (!tx_queue->timestamping) {
2950                /* Transmit completion */
2951                tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
2952                efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
2953                return;
2954        }
2955
2956        /* Transmit timestamps are only available for 8XXX series. They result
2957         * in up to three events per packet. These occur in order, and are:
2958         *  - the normal completion event (may be omitted)
2959         *  - the low part of the timestamp
2960         *  - the high part of the timestamp
2961         *
2962         * It's possible for multiple completion events to appear before the
2963         * corresponding timestamps. So we can for example get:
2964         *  COMP N
2965         *  COMP N+1
2966         *  TS_LO N
2967         *  TS_HI N
2968         *  TS_LO N+1
2969         *  TS_HI N+1
2970         *
2971         * In addition it's also possible for the adjacent completions to be
2972         * merged, so we may not see COMP N above. As such, the completion
2973         * events are not very useful here.
2974         *
2975         * Each part of the timestamp is itself split across two 16 bit
2976         * fields in the event.
2977         */
2978        tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1);
2979
2980        switch (tx_ev_type) {
2981        case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
2982                /* Ignore this event - see above. */
2983                break;
2984
2985        case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
2986                ts_part = efx_ef10_extract_event_ts(event);
2987                tx_queue->completed_timestamp_minor = ts_part;
2988                break;
2989
2990        case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI:
2991                ts_part = efx_ef10_extract_event_ts(event);
2992                tx_queue->completed_timestamp_major = ts_part;
2993
2994                efx_xmit_done_single(tx_queue);
2995                break;
2996
2997        default:
2998                netif_err(efx, hw, efx->net_dev,
2999                          "channel %d unknown tx event type %d (data "
3000                          EFX_QWORD_FMT ")\n",
3001                          channel->channel, tx_ev_type,
3002                          EFX_QWORD_VAL(*event));
3003                break;
3004        }
3005}
3006
3007static void
3008efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
3009{
3010        struct efx_nic *efx = channel->efx;
3011        int subcode;
3012
3013        subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
3014
3015        switch (subcode) {
3016        case ESE_DZ_DRV_TIMER_EV:
3017        case ESE_DZ_DRV_WAKE_UP_EV:
3018                break;
3019        case ESE_DZ_DRV_START_UP_EV:
3020                /* event queue init complete. ok. */
3021                break;
3022        default:
3023                netif_err(efx, hw, efx->net_dev,
3024                          "channel %d unknown driver event type %d"
3025                          " (data " EFX_QWORD_FMT ")\n",
3026                          channel->channel, subcode,
3027                          EFX_QWORD_VAL(*event));
3028
3029        }
3030}
3031
3032static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
3033                                                   efx_qword_t *event)
3034{
3035        struct efx_nic *efx = channel->efx;
3036        u32 subcode;
3037
3038        subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
3039
3040        switch (subcode) {
3041        case EFX_EF10_TEST:
3042                channel->event_test_cpu = raw_smp_processor_id();
3043                break;
3044        case EFX_EF10_REFILL:
3045                /* The queue must be empty, so we won't receive any rx
3046                 * events, so efx_process_channel() won't refill the
3047                 * queue. Refill it here
3048                 */
3049                efx_fast_push_rx_descriptors(&channel->rx_queue, true);
3050                break;
3051        default:
3052                netif_err(efx, hw, efx->net_dev,
3053                          "channel %d unknown driver event type %u"
3054                          " (data " EFX_QWORD_FMT ")\n",
3055                          channel->channel, (unsigned) subcode,
3056                          EFX_QWORD_VAL(*event));
3057        }
3058}
3059
3060static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
3061{
3062        struct efx_nic *efx = channel->efx;
3063        efx_qword_t event, *p_event;
3064        unsigned int read_ptr;
3065        int ev_code;
3066        int spent = 0;
3067
3068        if (quota <= 0)
3069                return spent;
3070
3071        read_ptr = channel->eventq_read_ptr;
3072
3073        for (;;) {
3074                p_event = efx_event(channel, read_ptr);
3075                event = *p_event;
3076
3077                if (!efx_event_present(&event))
3078                        break;
3079
3080                EFX_SET_QWORD(*p_event);
3081
3082                ++read_ptr;
3083
3084                ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
3085
3086                netif_vdbg(efx, drv, efx->net_dev,
3087                           "processing event on %d " EFX_QWORD_FMT "\n",
3088                           channel->channel, EFX_QWORD_VAL(event));
3089
3090                switch (ev_code) {
3091                case ESE_DZ_EV_CODE_MCDI_EV:
3092                        efx_mcdi_process_event(channel, &event);
3093                        break;
3094                case ESE_DZ_EV_CODE_RX_EV:
3095                        spent += efx_ef10_handle_rx_event(channel, &event);
3096                        if (spent >= quota) {
3097                                /* XXX can we split a merged event to
3098                                 * avoid going over-quota?
3099                                 */
3100                                spent = quota;
3101                                goto out;
3102                        }
3103                        break;
3104                case ESE_DZ_EV_CODE_TX_EV:
3105                        efx_ef10_handle_tx_event(channel, &event);
3106                        break;
3107                case ESE_DZ_EV_CODE_DRIVER_EV:
3108                        efx_ef10_handle_driver_event(channel, &event);
3109                        if (++spent == quota)
3110                                goto out;
3111                        break;
3112                case EFX_EF10_DRVGEN_EV:
3113                        efx_ef10_handle_driver_generated_event(channel, &event);
3114                        break;
3115                default:
3116                        netif_err(efx, hw, efx->net_dev,
3117                                  "channel %d unknown event type %d"
3118                                  " (data " EFX_QWORD_FMT ")\n",
3119                                  channel->channel, ev_code,
3120                                  EFX_QWORD_VAL(event));
3121                }
3122        }
3123
3124out:
3125        channel->eventq_read_ptr = read_ptr;
3126        return spent;
3127}
3128
3129static void efx_ef10_ev_read_ack(struct efx_channel *channel)
3130{
3131        struct efx_nic *efx = channel->efx;
3132        efx_dword_t rptr;
3133
3134        if (EFX_EF10_WORKAROUND_35388(efx)) {
3135                BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
3136                             (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
3137                BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
3138                             (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
3139
3140                EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3141                                     EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
3142                                     ERF_DD_EVQ_IND_RPTR,
3143                                     (channel->eventq_read_ptr &
3144                                      channel->eventq_mask) >>
3145                                     ERF_DD_EVQ_IND_RPTR_WIDTH);
3146                efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3147                                channel->channel);
3148                EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
3149                                     EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
3150                                     ERF_DD_EVQ_IND_RPTR,
3151                                     channel->eventq_read_ptr &
3152                                     ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
3153                efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
3154                                channel->channel);
3155        } else {
3156                EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
3157                                     channel->eventq_read_ptr &
3158                                     channel->eventq_mask);
3159                efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
3160        }
3161}
3162
3163static void efx_ef10_ev_test_generate(struct efx_channel *channel)
3164{
3165        MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
3166        struct efx_nic *efx = channel->efx;
3167        efx_qword_t event;
3168        int rc;
3169
3170        EFX_POPULATE_QWORD_2(event,
3171                             ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
3172                             ESF_DZ_EV_DATA, EFX_EF10_TEST);
3173
3174        MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
3175
3176        /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
3177         * already swapped the data to little-endian order.
3178         */
3179        memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
3180               sizeof(efx_qword_t));
3181
3182        rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
3183                          NULL, 0, NULL);
3184        if (rc != 0)
3185                goto fail;
3186
3187        return;
3188
3189fail:
3190        WARN_ON(true);
3191        netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
3192}
3193
3194static void efx_ef10_prepare_flr(struct efx_nic *efx)
3195{
3196        atomic_set(&efx->active_queues, 0);
3197}
3198
3199static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
3200{
3201        struct efx_ef10_nic_data *nic_data = efx->nic_data;
3202        u8 mac_old[ETH_ALEN];
3203        int rc, rc2;
3204
3205        /* Only reconfigure a PF-created vport */
3206        if (is_zero_ether_addr(nic_data->vport_mac))
3207                return 0;
3208
3209        efx_device_detach_sync(efx);
3210        efx_net_stop(efx->net_dev);
3211        down_write(&efx->filter_sem);
3212        efx_mcdi_filter_table_remove(efx);
3213        up_write(&efx->filter_sem);
3214
3215        rc = efx_ef10_vadaptor_free(efx, efx->vport_id);
3216        if (rc)
3217                goto restore_filters;
3218
3219        ether_addr_copy(mac_old, nic_data->vport_mac);
3220        rc = efx_ef10_vport_del_mac(efx, efx->vport_id,
3221                                    nic_data->vport_mac);
3222        if (rc)
3223                goto restore_vadaptor;
3224
3225        rc = efx_ef10_vport_add_mac(efx, efx->vport_id,
3226                                    efx->net_dev->dev_addr);
3227        if (!rc) {
3228                ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
3229        } else {
3230                rc2 = efx_ef10_vport_add_mac(efx, efx->vport_id, mac_old);
3231                if (rc2) {
3232                        /* Failed to add original MAC, so clear vport_mac */
3233                        eth_zero_addr(nic_data->vport_mac);
3234                        goto reset_nic;
3235                }
3236        }
3237
3238restore_vadaptor:
3239        rc2 = efx_ef10_vadaptor_alloc(efx, efx->vport_id);
3240        if (rc2)
3241                goto reset_nic;
3242restore_filters:
3243        down_write(&efx->filter_sem);
3244        rc2 = efx_ef10_filter_table_probe(efx);
3245        up_write(&efx->filter_sem);
3246        if (rc2)
3247                goto reset_nic;
3248
3249        rc2 = efx_net_open(efx->net_dev);
3250        if (rc2)
3251                goto reset_nic;
3252
3253        efx_device_attach_if_not_resetting(efx);
3254
3255        return rc;
3256
3257reset_nic:
3258        netif_err(efx, drv, efx->net_dev,
3259                  "Failed to restore when changing MAC address - scheduling reset\n");
3260        efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
3261
3262        return rc ? rc : rc2;
3263}
3264
3265static int efx_ef10_set_mac_address(struct efx_nic *efx)
3266{
3267        MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
3268        bool was_enabled = efx->port_enabled;
3269        int rc;
3270
3271        efx_device_detach_sync(efx);
3272        efx_net_stop(efx->net_dev);
3273
3274        mutex_lock(&efx->mac_lock);
3275        down_write(&efx->filter_sem);
3276        efx_mcdi_filter_table_remove(efx);
3277
3278        ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
3279                        efx->net_dev->dev_addr);
3280        MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
3281                       efx->vport_id);
3282        rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
3283                                sizeof(inbuf), NULL, 0, NULL);
3284
3285        efx_ef10_filter_table_probe(efx);
3286        up_write(&efx->filter_sem);
3287        mutex_unlock(&efx->mac_lock);
3288
3289        if (was_enabled)
3290                efx_net_open(efx->net_dev);
3291        efx_device_attach_if_not_resetting(efx);
3292
3293#ifdef CONFIG_SFC_SRIOV
3294        if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
3295                struct efx_ef10_nic_data *nic_data = efx->nic_data;
3296                struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
3297
3298                if (rc == -EPERM) {
3299                        struct efx_nic *efx_pf;
3300
3301                        /* Switch to PF and change MAC address on vport */
3302                        efx_pf = pci_get_drvdata(pci_dev_pf);
3303
3304                        rc = efx_ef10_sriov_set_vf_mac(efx_pf,
3305                                                       nic_data->vf_index,
3306                                                       efx->net_dev->dev_addr);
3307                } else if (!rc) {
3308                        struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
3309                        struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
3310                        unsigned int i;
3311
3312                        /* MAC address successfully changed by VF (with MAC
3313                         * spoofing) so update the parent PF if possible.
3314                         */
3315                        for (i = 0; i < efx_pf->vf_count; ++i) {
3316                                struct ef10_vf *vf = nic_data->vf + i;
3317
3318                                if (vf->efx == efx) {
3319                                        ether_addr_copy(vf->mac,
3320                                                        efx->net_dev->dev_addr);
3321                                        return 0;
3322                                }
3323                        }
3324                }
3325        } else
3326#endif
3327        if (rc == -EPERM) {
3328                netif_err(efx, drv, efx->net_dev,
3329                          "Cannot change MAC address; use sfboot to enable"
3330                          " mac-spoofing on this interface\n");
3331        } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
3332                /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
3333                 * fall-back to the method of changing the MAC address on the
3334                 * vport.  This only applies to PFs because such versions of
3335                 * MCFW do not support VFs.
3336                 */
3337                rc = efx_ef10_vport_set_mac_address(efx);
3338        } else if (rc) {
3339                efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
3340                                       sizeof(inbuf), NULL, 0, rc);
3341        }
3342
3343        return rc;
3344}
3345
3346static int efx_ef10_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
3347{
3348        WARN_ON(!mutex_is_locked(&efx->mac_lock));
3349
3350        efx_mcdi_filter_sync_rx_mode(efx);
3351
3352        if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED))
3353                return efx_mcdi_set_mtu(efx);
3354        return efx_mcdi_set_mac(efx);
3355}
3356
3357static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
3358{
3359        MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
3360
3361        MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
3362        return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
3363                            NULL, 0, NULL);
3364}
3365
3366/* MC BISTs follow a different poll mechanism to phy BISTs.
3367 * The BIST is done in the poll handler on the MC, and the MCDI command
3368 * will block until the BIST is done.
3369 */
3370static int efx_ef10_poll_bist(struct efx_nic *efx)
3371{
3372        int rc;
3373        MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
3374        size_t outlen;
3375        u32 result;
3376
3377        rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
3378                           outbuf, sizeof(outbuf), &outlen);
3379        if (rc != 0)
3380                return rc;
3381
3382        if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
3383                return -EIO;
3384
3385        result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
3386        switch (result) {
3387        case MC_CMD_POLL_BIST_PASSED:
3388                netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
3389                return 0;
3390        case MC_CMD_POLL_BIST_TIMEOUT:
3391                netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
3392                return -EIO;
3393        case MC_CMD_POLL_BIST_FAILED:
3394                netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
3395                return -EIO;
3396        default:
3397                netif_err(efx, hw, efx->net_dev,
3398                          "BIST returned unknown result %u", result);
3399                return -EIO;
3400        }
3401}
3402
3403static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
3404{
3405        int rc;
3406
3407        netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
3408
3409        rc = efx_ef10_start_bist(efx, bist_type);
3410        if (rc != 0)
3411                return rc;
3412
3413        return efx_ef10_poll_bist(efx);
3414}
3415
3416static int
3417efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
3418{
3419        int rc, rc2;
3420
3421        efx_reset_down(efx, RESET_TYPE_WORLD);
3422
3423        rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
3424                          NULL, 0, NULL, 0, NULL);
3425        if (rc != 0)
3426                goto out;
3427
3428        tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
3429        tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
3430
3431        rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
3432
3433out:
3434        if (rc == -EPERM)
3435                rc = 0;
3436        rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
3437        return rc ? rc : rc2;
3438}
3439
3440#ifdef CONFIG_SFC_MTD
3441
3442struct efx_ef10_nvram_type_info {
3443        u16 type, type_mask;
3444        u8 port;
3445        const char *name;
3446};
3447
3448static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
3449        { NVRAM_PARTITION_TYPE_MC_FIRMWARE,        0,    0, "sfc_mcfw" },
3450        { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0,    0, "sfc_mcfw_backup" },
3451        { NVRAM_PARTITION_TYPE_EXPANSION_ROM,      0,    0, "sfc_exp_rom" },
3452        { NVRAM_PARTITION_TYPE_STATIC_CONFIG,      0,    0, "sfc_static_cfg" },
3453        { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,     0,    0, "sfc_dynamic_cfg" },
3454        { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0,   0, "sfc_exp_rom_cfg" },
3455        { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0,   1, "sfc_exp_rom_cfg" },
3456        { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0,   2, "sfc_exp_rom_cfg" },
3457        { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0,   3, "sfc_exp_rom_cfg" },
3458        { NVRAM_PARTITION_TYPE_LICENSE,            0,    0, "sfc_license" },
3459        { NVRAM_PARTITION_TYPE_PHY_MIN,            0xff, 0, "sfc_phy_fw" },
3460        { NVRAM_PARTITION_TYPE_MUM_FIRMWARE,       0,    0, "sfc_mumfw" },
3461        { NVRAM_PARTITION_TYPE_EXPANSION_UEFI,     0,    0, "sfc_uefi" },
3462        { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0,    0, "sfc_dynamic_cfg_dflt" },
3463        { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0,    0, "sfc_exp_rom_cfg_dflt" },
3464        { NVRAM_PARTITION_TYPE_STATUS,             0,    0, "sfc_status" },
3465        { NVRAM_PARTITION_TYPE_BUNDLE,             0,    0, "sfc_bundle" },
3466        { NVRAM_PARTITION_TYPE_BUNDLE_METADATA,    0,    0, "sfc_bundle_metadata" },
3467};
3468#define EF10_NVRAM_PARTITION_COUNT      ARRAY_SIZE(efx_ef10_nvram_types)
3469
3470static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
3471                                        struct efx_mcdi_mtd_partition *part,
3472                                        unsigned int type,
3473                                        unsigned long *found)
3474{
3475        MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
3476        MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
3477        const struct efx_ef10_nvram_type_info *info;
3478        size_t size, erase_size, outlen;
3479        int type_idx = 0;
3480        bool protected;
3481        int rc;
3482
3483        for (type_idx = 0; ; type_idx++) {
3484                if (type_idx == EF10_NVRAM_PARTITION_COUNT)
3485                        return -ENODEV;
3486                info = efx_ef10_nvram_types + type_idx;
3487                if ((type & ~info->type_mask) == info->type)
3488                        break;
3489        }
3490        if (info->port != efx_port_num(efx))
3491                return -ENODEV;
3492
3493        rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
3494        if (rc)
3495                return rc;
3496        if (protected &&
3497            (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS &&
3498             type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS))
3499                /* Hide protected partitions that don't provide defaults. */
3500                return -ENODEV;
3501
3502        if (protected)
3503                /* Protected partitions are read only. */
3504                erase_size = 0;
3505
3506        /* If we've already exposed a partition of this type, hide this
3507         * duplicate.  All operations on MTDs are keyed by the type anyway,
3508         * so we can't act on the duplicate.
3509         */
3510        if (__test_and_set_bit(type_idx, found))
3511                return -EEXIST;
3512
3513        part->nvram_type = type;
3514
3515        MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
3516        rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
3517                          outbuf, sizeof(outbuf), &outlen);
3518        if (rc)
3519                return rc;
3520        if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
3521                return -EIO;
3522        if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
3523            (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
3524                part->fw_subtype = MCDI_DWORD(outbuf,
3525                                              NVRAM_METADATA_OUT_SUBTYPE);
3526
3527        part->common.dev_type_name = "EF10 NVRAM manager";
3528        part->common.type_name = info->name;
3529
3530        part->common.mtd.type = MTD_NORFLASH;
3531        part->common.mtd.flags = MTD_CAP_NORFLASH;
3532        part->common.mtd.size = size;
3533        part->common.mtd.erasesize = erase_size;
3534        /* sfc_status is read-only */
3535        if (!erase_size)
3536                part->common.mtd.flags |= MTD_NO_ERASE;
3537
3538        return 0;
3539}
3540
3541static int efx_ef10_mtd_probe(struct efx_nic *efx)
3542{
3543        MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
3544        DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
3545        struct efx_mcdi_mtd_partition *parts;
3546        size_t outlen, n_parts_total, i, n_parts;
3547        unsigned int type;
3548        int rc;
3549
3550        ASSERT_RTNL();
3551
3552        BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
3553        rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
3554                          outbuf, sizeof(outbuf), &outlen);
3555        if (rc)
3556                return rc;
3557        if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
3558                return -EIO;
3559
3560        n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
3561        if (n_parts_total >
3562            MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
3563                return -EIO;
3564
3565        parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
3566        if (!parts)
3567                return -ENOMEM;
3568
3569        n_parts = 0;
3570        for (i = 0; i < n_parts_total; i++) {
3571                type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
3572                                        i);
3573                rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
3574                                                  found);
3575                if (rc == -EEXIST || rc == -ENODEV)
3576                        continue;
3577                if (rc)
3578                        goto fail;
3579                n_parts++;
3580        }
3581
3582        rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
3583fail:
3584        if (rc)
3585                kfree(parts);
3586        return rc;
3587}
3588
3589#endif /* CONFIG_SFC_MTD */
3590
3591static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
3592{
3593        _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
3594}
3595
3596static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
3597                                            u32 host_time) {}
3598
3599static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
3600                                           bool temp)
3601{
3602        MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
3603        int rc;
3604
3605        if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
3606            channel->sync_events_state == SYNC_EVENTS_VALID ||
3607            (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
3608                return 0;
3609        channel->sync_events_state = SYNC_EVENTS_REQUESTED;
3610
3611        MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
3612        MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
3613        MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
3614                       channel->channel);
3615
3616        rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
3617                          inbuf, sizeof(inbuf), NULL, 0, NULL);
3618
3619        if (rc != 0)
3620                channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
3621                                                    SYNC_EVENTS_DISABLED;
3622
3623        return rc;
3624}
3625
3626static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
3627                                            bool temp)
3628{
3629        MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
3630        int rc;
3631
3632        if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
3633            (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
3634                return 0;
3635        if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
3636                channel->sync_events_state = SYNC_EVENTS_DISABLED;
3637                return 0;
3638        }
3639        channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
3640                                            SYNC_EVENTS_DISABLED;
3641
3642        MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
3643        MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
3644        MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
3645                       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
3646        MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
3647                       channel->channel);
3648
3649        rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
3650                          inbuf, sizeof(inbuf), NULL, 0, NULL);
3651
3652        return rc;
3653}
3654
3655static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
3656                                           bool temp)
3657{
3658        int (*set)(struct efx_channel *channel, bool temp);
3659        struct efx_channel *channel;
3660
3661        set = en ?
3662              efx_ef10_rx_enable_timestamping :
3663              efx_ef10_rx_disable_timestamping;
3664
3665        channel = efx_ptp_channel(efx);
3666        if (channel) {
3667                int rc = set(channel, temp);
3668                if (en && rc != 0) {
3669                        efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
3670                        return rc;
3671                }
3672        }
3673
3674        return 0;
3675}
3676
3677static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
3678                                         struct hwtstamp_config *init)
3679{
3680        return -EOPNOTSUPP;
3681}
3682
3683static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
3684                                      struct hwtstamp_config *init)
3685{
3686        int rc;
3687
3688        switch (init->rx_filter) {
3689        case HWTSTAMP_FILTER_NONE:
3690                efx_ef10_ptp_set_ts_sync_events(efx, false, false);
3691                /* if TX timestamping is still requested then leave PTP on */
3692                return efx_ptp_change_mode(efx,
3693                                           init->tx_type != HWTSTAMP_TX_OFF, 0);
3694        case HWTSTAMP_FILTER_ALL:
3695        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3696        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3697        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3698        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3699        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3700        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3701        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3702        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3703        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3704        case HWTSTAMP_FILTER_PTP_V2_EVENT:
3705        case HWTSTAMP_FILTER_PTP_V2_SYNC:
3706        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3707        case HWTSTAMP_FILTER_NTP_ALL:
3708                init->rx_filter = HWTSTAMP_FILTER_ALL;
3709                rc = efx_ptp_change_mode(efx, true, 0);
3710                if (!rc)
3711                        rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
3712                if (rc)
3713                        efx_ptp_change_mode(efx, false, 0);
3714                return rc;
3715        default:
3716                return -ERANGE;
3717        }
3718}
3719
3720static int efx_ef10_get_phys_port_id(struct efx_nic *efx,
3721                                     struct netdev_phys_item_id *ppid)
3722{
3723        struct efx_ef10_nic_data *nic_data = efx->nic_data;
3724
3725        if (!is_valid_ether_addr(nic_data->port_id))
3726                return -EOPNOTSUPP;
3727
3728        ppid->id_len = ETH_ALEN;
3729        memcpy(ppid->id, nic_data->port_id, ppid->id_len);
3730
3731        return 0;
3732}
3733
3734static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
3735{
3736        if (proto != htons(ETH_P_8021Q))
3737                return -EINVAL;
3738
3739        return efx_ef10_add_vlan(efx, vid);
3740}
3741
3742static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
3743{
3744        if (proto != htons(ETH_P_8021Q))
3745                return -EINVAL;
3746
3747        return efx_ef10_del_vlan(efx, vid);
3748}
3749
3750/* We rely on the MCDI wiping out our TX rings if it made any changes to the
3751 * ports table, ensuring that any TSO descriptors that were made on a now-
3752 * removed tunnel port will be blown away and won't break things when we try
3753 * to transmit them using the new ports table.
3754 */
3755static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
3756{
3757        struct efx_ef10_nic_data *nic_data = efx->nic_data;
3758        MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX);
3759        MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
3760        bool will_reset = false;
3761        size_t num_entries = 0;
3762        size_t inlen, outlen;
3763        size_t i;
3764        int rc;
3765        efx_dword_t flags_and_num_entries;
3766
3767        WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock));
3768
3769        nic_data->udp_tunnels_dirty = false;
3770
3771        if (!(nic_data->datapath_caps &
3772            (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) {
3773                efx_device_attach_if_not_resetting(efx);
3774                return 0;
3775        }
3776
3777        BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) >
3778                     MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
3779
3780        for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
3781                if (nic_data->udp_tunnels[i].type !=
3782                    TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID) {
3783                        efx_dword_t entry;
3784
3785                        EFX_POPULATE_DWORD_2(entry,
3786                                TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
3787                                        ntohs(nic_data->udp_tunnels[i].port),
3788                                TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
3789                                        nic_data->udp_tunnels[i].type);
3790                        *_MCDI_ARRAY_DWORD(inbuf,
3791                                SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES,
3792                                num_entries++) = entry;
3793                }
3794        }
3795
3796        BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST -
3797                      MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 !=
3798                     EFX_WORD_1_LBN);
3799        BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 !=
3800                     EFX_WORD_1_WIDTH);
3801        EFX_POPULATE_DWORD_2(flags_and_num_entries,
3802                             MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
3803                                !!unloading,
3804                             EFX_WORD_1, num_entries);
3805        *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) =
3806                flags_and_num_entries;
3807
3808        inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries);
3809
3810        rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS,
3811                                inbuf, inlen, outbuf, sizeof(outbuf), &outlen);
3812        if (rc == -EIO) {
3813                /* Most likely the MC rebooted due to another function also
3814                 * setting its tunnel port list. Mark the tunnel port list as
3815                 * dirty, so it will be pushed upon coming up from the reboot.
3816                 */
3817                nic_data->udp_tunnels_dirty = true;
3818                return 0;
3819        }
3820
3821        if (rc) {
3822                /* expected not available on unprivileged functions */
3823                if (rc != -EPERM)
3824                        netif_warn(efx, drv, efx->net_dev,
3825                                   "Unable to set UDP tunnel ports; rc=%d.\n", rc);
3826        } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) &
3827                   (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) {
3828                netif_info(efx, drv, efx->net_dev,
3829                           "Rebooting MC due to UDP tunnel port list change\n");
3830                will_reset = true;
3831                if (unloading)
3832                        /* Delay for the MC reset to complete. This will make
3833                         * unloading other functions a bit smoother. This is a
3834                         * race, but the other unload will work whichever way
3835                         * it goes, this just avoids an unnecessary error
3836                         * message.
3837                         */
3838                        msleep(100);
3839        }
3840        if (!will_reset && !unloading) {
3841                /* The caller will have detached, relying on the MC reset to
3842                 * trigger a re-attach.  Since there won't be an MC reset, we
3843                 * have to do the attach ourselves.
3844                 */
3845                efx_device_attach_if_not_resetting(efx);
3846        }
3847
3848        return rc;
3849}
3850
3851static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
3852{
3853        struct efx_ef10_nic_data *nic_data = efx->nic_data;
3854        int rc = 0;
3855
3856        mutex_lock(&nic_data->udp_tunnels_lock);
3857        if (nic_data->udp_tunnels_dirty) {
3858                /* Make sure all TX are stopped while we modify the table, else
3859                 * we might race against an efx_features_check().
3860                 */
3861                efx_device_detach_sync(efx);
3862                rc = efx_ef10_set_udp_tnl_ports(efx, false);
3863        }
3864        mutex_unlock(&nic_data->udp_tunnels_lock);
3865        return rc;
3866}
3867
3868static int efx_ef10_udp_tnl_set_port(struct net_device *dev,
3869                                     unsigned int table, unsigned int entry,
3870                                     struct udp_tunnel_info *ti)
3871{
3872        struct efx_nic *efx = netdev_priv(dev);
3873        struct efx_ef10_nic_data *nic_data;
3874        int efx_tunnel_type, rc;
3875
3876        if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
3877                efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
3878        else
3879                efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
3880
3881        nic_data = efx->nic_data;
3882        if (!(nic_data->datapath_caps &
3883              (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
3884                return -EOPNOTSUPP;
3885
3886        mutex_lock(&nic_data->udp_tunnels_lock);
3887        /* Make sure all TX are stopped while we add to the table, else we
3888         * might race against an efx_features_check().
3889         */
3890        efx_device_detach_sync(efx);
3891        nic_data->udp_tunnels[entry].type = efx_tunnel_type;
3892        nic_data->udp_tunnels[entry].port = ti->port;
3893        rc = efx_ef10_set_udp_tnl_ports(efx, false);
3894        mutex_unlock(&nic_data->udp_tunnels_lock);
3895
3896        return rc;
3897}
3898
3899/* Called under the TX lock with the TX queue running, hence no-one can be
3900 * in the middle of updating the UDP tunnels table.  However, they could
3901 * have tried and failed the MCDI, in which case they'll have set the dirty
3902 * flag before dropping their locks.
3903 */
3904static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
3905{
3906        struct efx_ef10_nic_data *nic_data = efx->nic_data;
3907        size_t i;
3908
3909        if (!(nic_data->datapath_caps &
3910              (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
3911                return false;
3912
3913        if (nic_data->udp_tunnels_dirty)
3914                /* SW table may not match HW state, so just assume we can't
3915                 * use any UDP tunnel offloads.
3916                 */
3917                return false;
3918
3919        for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
3920                if (nic_data->udp_tunnels[i].type !=
3921                    TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID &&
3922                    nic_data->udp_tunnels[i].port == port)
3923                        return true;
3924
3925        return false;
3926}
3927
3928static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
3929                                       unsigned int table, unsigned int entry,
3930                                       struct udp_tunnel_info *ti)
3931{
3932        struct efx_nic *efx = netdev_priv(dev);
3933        struct efx_ef10_nic_data *nic_data;
3934        int rc;
3935
3936        nic_data = efx->nic_data;
3937
3938        mutex_lock(&nic_data->udp_tunnels_lock);
3939        /* Make sure all TX are stopped while we remove from the table, else we
3940         * might race against an efx_features_check().
3941         */
3942        efx_device_detach_sync(efx);
3943        nic_data->udp_tunnels[entry].type = TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID;
3944        nic_data->udp_tunnels[entry].port = 0;
3945        rc = efx_ef10_set_udp_tnl_ports(efx, false);
3946        mutex_unlock(&nic_data->udp_tunnels_lock);
3947
3948        return rc;
3949}
3950
3951static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = {
3952        .set_port       = efx_ef10_udp_tnl_set_port,
3953        .unset_port     = efx_ef10_udp_tnl_unset_port,
3954        .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
3955        .tables         = {
3956                {
3957                        .n_entries = 16,
3958                        .tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
3959                                        UDP_TUNNEL_TYPE_GENEVE,
3960                },
3961        },
3962};
3963
3964/* EF10 may have multiple datapath firmware variants within a
3965 * single version.  Report which variants are running.
3966 */
3967static size_t efx_ef10_print_additional_fwver(struct efx_nic *efx, char *buf,
3968                                              size_t len)
3969{
3970        struct efx_ef10_nic_data *nic_data = efx->nic_data;
3971
3972        return scnprintf(buf, len, " rx%x tx%x",
3973                         nic_data->rx_dpcpu_fw_id,
3974                         nic_data->tx_dpcpu_fw_id);
3975}
3976
3977static unsigned int ef10_check_caps(const struct efx_nic *efx,
3978                                    u8 flag,
3979                                    u32 offset)
3980{
3981        const struct efx_ef10_nic_data *nic_data = efx->nic_data;
3982
3983        switch (offset) {
3984        case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST):
3985                return nic_data->datapath_caps & BIT_ULL(flag);
3986        case(MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST):
3987                return nic_data->datapath_caps2 & BIT_ULL(flag);
3988        default:
3989                return 0;
3990        }
3991}
3992
3993#define EF10_OFFLOAD_FEATURES           \
3994        (NETIF_F_IP_CSUM |              \
3995         NETIF_F_HW_VLAN_CTAG_FILTER |  \
3996         NETIF_F_IPV6_CSUM |            \
3997         NETIF_F_RXHASH |               \
3998         NETIF_F_NTUPLE)
3999
4000const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
4001        .is_vf = true,
4002        .mem_bar = efx_ef10_vf_mem_bar,
4003        .mem_map_size = efx_ef10_mem_map_size,
4004        .probe = efx_ef10_probe_vf,
4005        .remove = efx_ef10_remove,
4006        .dimension_resources = efx_ef10_dimension_resources,
4007        .init = efx_ef10_init_nic,
4008        .fini = efx_ef10_fini_nic,
4009        .map_reset_reason = efx_ef10_map_reset_reason,
4010        .map_reset_flags = efx_ef10_map_reset_flags,
4011        .reset = efx_ef10_reset,
4012        .probe_port = efx_mcdi_port_probe,
4013        .remove_port = efx_mcdi_port_remove,
4014        .fini_dmaq = efx_fini_dmaq,
4015        .prepare_flr = efx_ef10_prepare_flr,
4016        .finish_flr = efx_port_dummy_op_void,
4017        .describe_stats = efx_ef10_describe_stats,
4018        .update_stats = efx_ef10_update_stats_vf,
4019        .update_stats_atomic = efx_ef10_update_stats_atomic_vf,
4020        .start_stats = efx_port_dummy_op_void,
4021        .pull_stats = efx_port_dummy_op_void,
4022        .stop_stats = efx_port_dummy_op_void,
4023        .push_irq_moderation = efx_ef10_push_irq_moderation,
4024        .reconfigure_mac = efx_ef10_mac_reconfigure,
4025        .check_mac_fault = efx_mcdi_mac_check_fault,
4026        .reconfigure_port = efx_mcdi_port_reconfigure,
4027        .get_wol = efx_ef10_get_wol_vf,
4028        .set_wol = efx_ef10_set_wol_vf,
4029        .resume_wol = efx_port_dummy_op_void,
4030        .mcdi_request = efx_ef10_mcdi_request,
4031        .mcdi_poll_response = efx_ef10_mcdi_poll_response,
4032        .mcdi_read_response = efx_ef10_mcdi_read_response,
4033        .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
4034        .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
4035        .irq_enable_master = efx_port_dummy_op_void,
4036        .irq_test_generate = efx_ef10_irq_test_generate,
4037        .irq_disable_non_ev = efx_port_dummy_op_void,
4038        .irq_handle_msi = efx_ef10_msi_interrupt,
4039        .irq_handle_legacy = efx_ef10_legacy_interrupt,
4040        .tx_probe = efx_ef10_tx_probe,
4041        .tx_init = efx_ef10_tx_init,
4042        .tx_remove = efx_mcdi_tx_remove,
4043        .tx_write = efx_ef10_tx_write,
4044        .tx_limit_len = efx_ef10_tx_limit_len,
4045        .tx_enqueue = __efx_enqueue_skb,
4046        .rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config,
4047        .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
4048        .rx_probe = efx_mcdi_rx_probe,
4049        .rx_init = efx_mcdi_rx_init,
4050        .rx_remove = efx_mcdi_rx_remove,
4051        .rx_write = efx_ef10_rx_write,
4052        .rx_defer_refill = efx_ef10_rx_defer_refill,
4053        .rx_packet = __efx_rx_packet,
4054        .ev_probe = efx_mcdi_ev_probe,
4055        .ev_init = efx_ef10_ev_init,
4056        .ev_fini = efx_mcdi_ev_fini,
4057        .ev_remove = efx_mcdi_ev_remove,
4058        .ev_process = efx_ef10_ev_process,
4059        .ev_read_ack = efx_ef10_ev_read_ack,
4060        .ev_test_generate = efx_ef10_ev_test_generate,
4061        .filter_table_probe = efx_ef10_filter_table_probe,
4062        .filter_table_restore = efx_mcdi_filter_table_restore,
4063        .filter_table_remove = efx_mcdi_filter_table_remove,
4064        .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
4065        .filter_insert = efx_mcdi_filter_insert,
4066        .filter_remove_safe = efx_mcdi_filter_remove_safe,
4067        .filter_get_safe = efx_mcdi_filter_get_safe,
4068        .filter_clear_rx = efx_mcdi_filter_clear_rx,
4069        .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
4070        .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
4071        .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
4072#ifdef CONFIG_RFS_ACCEL
4073        .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
4074#endif
4075#ifdef CONFIG_SFC_MTD
4076        .mtd_probe = efx_port_dummy_op_int,
4077#endif
4078        .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
4079        .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
4080        .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
4081        .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
4082#ifdef CONFIG_SFC_SRIOV
4083        .vswitching_probe = efx_ef10_vswitching_probe_vf,
4084        .vswitching_restore = efx_ef10_vswitching_restore_vf,
4085        .vswitching_remove = efx_ef10_vswitching_remove_vf,
4086#endif
4087        .get_mac_address = efx_ef10_get_mac_address_vf,
4088        .set_mac_address = efx_ef10_set_mac_address,
4089
4090        .get_phys_port_id = efx_ef10_get_phys_port_id,
4091        .revision = EFX_REV_HUNT_A0,
4092        .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
4093        .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
4094        .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
4095        .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
4096        .can_rx_scatter = true,
4097        .always_rx_scatter = true,
4098        .min_interrupt_mode = EFX_INT_MODE_MSIX,
4099        .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
4100        .offload_features = EF10_OFFLOAD_FEATURES,
4101        .mcdi_max_ver = 2,
4102        .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
4103        .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
4104                            1 << HWTSTAMP_FILTER_ALL,
4105        .rx_hash_key_size = 40,
4106        .check_caps = ef10_check_caps,
4107        .print_additional_fwver = efx_ef10_print_additional_fwver,
4108        .sensor_event = efx_mcdi_sensor_event,
4109};
4110
4111const struct efx_nic_type efx_hunt_a0_nic_type = {
4112        .is_vf = false,
4113        .mem_bar = efx_ef10_pf_mem_bar,
4114        .mem_map_size = efx_ef10_mem_map_size,
4115        .probe = efx_ef10_probe_pf,
4116        .remove = efx_ef10_remove,
4117        .dimension_resources = efx_ef10_dimension_resources,
4118        .init = efx_ef10_init_nic,
4119        .fini = efx_ef10_fini_nic,
4120        .map_reset_reason = efx_ef10_map_reset_reason,
4121        .map_reset_flags = efx_ef10_map_reset_flags,
4122        .reset = efx_ef10_reset,
4123        .probe_port = efx_mcdi_port_probe,
4124        .remove_port = efx_mcdi_port_remove,
4125        .fini_dmaq = efx_fini_dmaq,
4126        .prepare_flr = efx_ef10_prepare_flr,
4127        .finish_flr = efx_port_dummy_op_void,
4128        .describe_stats = efx_ef10_describe_stats,
4129        .update_stats = efx_ef10_update_stats_pf,
4130        .start_stats = efx_mcdi_mac_start_stats,
4131        .pull_stats = efx_mcdi_mac_pull_stats,
4132        .stop_stats = efx_mcdi_mac_stop_stats,
4133        .push_irq_moderation = efx_ef10_push_irq_moderation,
4134        .reconfigure_mac = efx_ef10_mac_reconfigure,
4135        .check_mac_fault = efx_mcdi_mac_check_fault,
4136        .reconfigure_port = efx_mcdi_port_reconfigure,
4137        .get_wol = efx_ef10_get_wol,
4138        .set_wol = efx_ef10_set_wol,
4139        .resume_wol = efx_port_dummy_op_void,
4140        .get_fec_stats = efx_ef10_get_fec_stats,
4141        .test_chip = efx_ef10_test_chip,
4142        .test_nvram = efx_mcdi_nvram_test_all,
4143        .mcdi_request = efx_ef10_mcdi_request,
4144        .mcdi_poll_response = efx_ef10_mcdi_poll_response,
4145        .mcdi_read_response = efx_ef10_mcdi_read_response,
4146        .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
4147        .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
4148        .irq_enable_master = efx_port_dummy_op_void,
4149        .irq_test_generate = efx_ef10_irq_test_generate,
4150        .irq_disable_non_ev = efx_port_dummy_op_void,
4151        .irq_handle_msi = efx_ef10_msi_interrupt,
4152        .irq_handle_legacy = efx_ef10_legacy_interrupt,
4153        .tx_probe = efx_ef10_tx_probe,
4154        .tx_init = efx_ef10_tx_init,
4155        .tx_remove = efx_mcdi_tx_remove,
4156        .tx_write = efx_ef10_tx_write,
4157        .tx_limit_len = efx_ef10_tx_limit_len,
4158        .tx_enqueue = __efx_enqueue_skb,
4159        .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
4160        .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
4161        .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
4162        .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
4163        .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
4164        .rx_probe = efx_mcdi_rx_probe,
4165        .rx_init = efx_mcdi_rx_init,
4166        .rx_remove = efx_mcdi_rx_remove,
4167        .rx_write = efx_ef10_rx_write,
4168        .rx_defer_refill = efx_ef10_rx_defer_refill,
4169        .rx_packet = __efx_rx_packet,
4170        .ev_probe = efx_mcdi_ev_probe,
4171        .ev_init = efx_ef10_ev_init,
4172        .ev_fini = efx_mcdi_ev_fini,
4173        .ev_remove = efx_mcdi_ev_remove,
4174        .ev_process = efx_ef10_ev_process,
4175        .ev_read_ack = efx_ef10_ev_read_ack,
4176        .ev_test_generate = efx_ef10_ev_test_generate,
4177        .filter_table_probe = efx_ef10_filter_table_probe,
4178        .filter_table_restore = efx_mcdi_filter_table_restore,
4179        .filter_table_remove = efx_mcdi_filter_table_remove,
4180        .filter_update_rx_scatter = efx_mcdi_update_rx_scatter,
4181        .filter_insert = efx_mcdi_filter_insert,
4182        .filter_remove_safe = efx_mcdi_filter_remove_safe,
4183        .filter_get_safe = efx_mcdi_filter_get_safe,
4184        .filter_clear_rx = efx_mcdi_filter_clear_rx,
4185        .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
4186        .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
4187        .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
4188#ifdef CONFIG_RFS_ACCEL
4189        .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
4190#endif
4191#ifdef CONFIG_SFC_MTD
4192        .mtd_probe = efx_ef10_mtd_probe,
4193        .mtd_rename = efx_mcdi_mtd_rename,
4194        .mtd_read = efx_mcdi_mtd_read,
4195        .mtd_erase = efx_mcdi_mtd_erase,
4196        .mtd_write = efx_mcdi_mtd_write,
4197        .mtd_sync = efx_mcdi_mtd_sync,
4198#endif
4199        .ptp_write_host_time = efx_ef10_ptp_write_host_time,
4200        .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
4201        .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
4202        .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
4203        .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
4204        .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
4205        .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
4206#ifdef CONFIG_SFC_SRIOV
4207        .sriov_configure = efx_ef10_sriov_configure,
4208        .sriov_init = efx_ef10_sriov_init,
4209        .sriov_fini = efx_ef10_sriov_fini,
4210        .sriov_wanted = efx_ef10_sriov_wanted,
4211        .sriov_reset = efx_ef10_sriov_reset,
4212        .sriov_flr = efx_ef10_sriov_flr,
4213        .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
4214        .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
4215        .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
4216        .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
4217        .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
4218        .vswitching_probe = efx_ef10_vswitching_probe_pf,
4219        .vswitching_restore = efx_ef10_vswitching_restore_pf,
4220        .vswitching_remove = efx_ef10_vswitching_remove_pf,
4221#endif
4222        .get_mac_address = efx_ef10_get_mac_address_pf,
4223        .set_mac_address = efx_ef10_set_mac_address,
4224        .tso_versions = efx_ef10_tso_versions,
4225
4226        .get_phys_port_id = efx_ef10_get_phys_port_id,
4227        .revision = EFX_REV_HUNT_A0,
4228        .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
4229        .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
4230        .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
4231        .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
4232        .can_rx_scatter = true,
4233        .always_rx_scatter = true,
4234        .option_descriptors = true,
4235        .min_interrupt_mode = EFX_INT_MODE_LEGACY,
4236        .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
4237        .offload_features = EF10_OFFLOAD_FEATURES,
4238        .mcdi_max_ver = 2,
4239        .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
4240        .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
4241                            1 << HWTSTAMP_FILTER_ALL,
4242        .rx_hash_key_size = 40,
4243        .check_caps = ef10_check_caps,
4244        .print_additional_fwver = efx_ef10_print_additional_fwver,
4245        .sensor_event = efx_mcdi_sensor_event,
4246};
4247