linux/drivers/net/ethernet/sfc/farch.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2005-2006 Fen Systems Ltd.
   5 * Copyright 2006-2013 Solarflare Communications Inc.
   6 */
   7
   8#include <linux/bitops.h>
   9#include <linux/delay.h>
  10#include <linux/interrupt.h>
  11#include <linux/pci.h>
  12#include <linux/module.h>
  13#include <linux/seq_file.h>
  14#include <linux/crc32.h>
  15#include "net_driver.h"
  16#include "bitfield.h"
  17#include "efx.h"
  18#include "rx_common.h"
  19#include "nic.h"
  20#include "farch_regs.h"
  21#include "sriov.h"
  22#include "siena_sriov.h"
  23#include "io.h"
  24#include "workarounds.h"
  25
  26/* Falcon-architecture (SFC9000-family) support */
  27
  28/**************************************************************************
  29 *
  30 * Configurable values
  31 *
  32 **************************************************************************
  33 */
  34
  35/* This is set to 16 for a good reason.  In summary, if larger than
  36 * 16, the descriptor cache holds more than a default socket
  37 * buffer's worth of packets (for UDP we can only have at most one
  38 * socket buffer's worth outstanding).  This combined with the fact
  39 * that we only get 1 TX event per descriptor cache means the NIC
  40 * goes idle.
  41 */
  42#define TX_DC_ENTRIES 16
  43#define TX_DC_ENTRIES_ORDER 1
  44
  45#define RX_DC_ENTRIES 64
  46#define RX_DC_ENTRIES_ORDER 3
  47
  48/* If EFX_MAX_INT_ERRORS internal errors occur within
  49 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
  50 * disable it.
  51 */
  52#define EFX_INT_ERROR_EXPIRE 3600
  53#define EFX_MAX_INT_ERRORS 5
  54
  55/* Depth of RX flush request fifo */
  56#define EFX_RX_FLUSH_COUNT 4
  57
  58/* Driver generated events */
  59#define _EFX_CHANNEL_MAGIC_TEST         0x000101
  60#define _EFX_CHANNEL_MAGIC_FILL         0x000102
  61#define _EFX_CHANNEL_MAGIC_RX_DRAIN     0x000103
  62#define _EFX_CHANNEL_MAGIC_TX_DRAIN     0x000104
  63
  64#define _EFX_CHANNEL_MAGIC(_code, _data)        ((_code) << 8 | (_data))
  65#define _EFX_CHANNEL_MAGIC_CODE(_magic)         ((_magic) >> 8)
  66
  67#define EFX_CHANNEL_MAGIC_TEST(_channel)                                \
  68        _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
  69#define EFX_CHANNEL_MAGIC_FILL(_rx_queue)                               \
  70        _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL,                     \
  71                           efx_rx_queue_index(_rx_queue))
  72#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue)                           \
  73        _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN,                 \
  74                           efx_rx_queue_index(_rx_queue))
  75#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue)                           \
  76        _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN,                 \
  77                           (_tx_queue)->queue)
  78
  79static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
  80
  81/**************************************************************************
  82 *
  83 * Hardware access
  84 *
  85 **************************************************************************/
  86
  87static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
  88                                     unsigned int index)
  89{
  90        efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
  91                        value, index);
  92}
  93
  94static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
  95                                     const efx_oword_t *mask)
  96{
  97        return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
  98                ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
  99}
 100
 101int efx_farch_test_registers(struct efx_nic *efx,
 102                             const struct efx_farch_register_test *regs,
 103                             size_t n_regs)
 104{
 105        unsigned address = 0;
 106        int i, j;
 107        efx_oword_t mask, imask, original, reg, buf;
 108
 109        for (i = 0; i < n_regs; ++i) {
 110                address = regs[i].address;
 111                mask = imask = regs[i].mask;
 112                EFX_INVERT_OWORD(imask);
 113
 114                efx_reado(efx, &original, address);
 115
 116                /* bit sweep on and off */
 117                for (j = 0; j < 128; j++) {
 118                        if (!EFX_EXTRACT_OWORD32(mask, j, j))
 119                                continue;
 120
 121                        /* Test this testable bit can be set in isolation */
 122                        EFX_AND_OWORD(reg, original, mask);
 123                        EFX_SET_OWORD32(reg, j, j, 1);
 124
 125                        efx_writeo(efx, &reg, address);
 126                        efx_reado(efx, &buf, address);
 127
 128                        if (efx_masked_compare_oword(&reg, &buf, &mask))
 129                                goto fail;
 130
 131                        /* Test this testable bit can be cleared in isolation */
 132                        EFX_OR_OWORD(reg, original, mask);
 133                        EFX_SET_OWORD32(reg, j, j, 0);
 134
 135                        efx_writeo(efx, &reg, address);
 136                        efx_reado(efx, &buf, address);
 137
 138                        if (efx_masked_compare_oword(&reg, &buf, &mask))
 139                                goto fail;
 140                }
 141
 142                efx_writeo(efx, &original, address);
 143        }
 144
 145        return 0;
 146
 147fail:
 148        netif_err(efx, hw, efx->net_dev,
 149                  "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
 150                  " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
 151                  EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
 152        return -EIO;
 153}
 154
 155/**************************************************************************
 156 *
 157 * Special buffer handling
 158 * Special buffers are used for event queues and the TX and RX
 159 * descriptor rings.
 160 *
 161 *************************************************************************/
 162
 163/*
 164 * Initialise a special buffer
 165 *
 166 * This will define a buffer (previously allocated via
 167 * efx_alloc_special_buffer()) in the buffer table, allowing
 168 * it to be used for event queues, descriptor rings etc.
 169 */
 170static void
 171efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
 172{
 173        efx_qword_t buf_desc;
 174        unsigned int index;
 175        dma_addr_t dma_addr;
 176        int i;
 177
 178        EFX_WARN_ON_PARANOID(!buffer->buf.addr);
 179
 180        /* Write buffer descriptors to NIC */
 181        for (i = 0; i < buffer->entries; i++) {
 182                index = buffer->index + i;
 183                dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
 184                netif_dbg(efx, probe, efx->net_dev,
 185                          "mapping special buffer %d at %llx\n",
 186                          index, (unsigned long long)dma_addr);
 187                EFX_POPULATE_QWORD_3(buf_desc,
 188                                     FRF_AZ_BUF_ADR_REGION, 0,
 189                                     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
 190                                     FRF_AZ_BUF_OWNER_ID_FBUF, 0);
 191                efx_write_buf_tbl(efx, &buf_desc, index);
 192        }
 193}
 194
 195/* Unmaps a buffer and clears the buffer table entries */
 196static void
 197efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
 198{
 199        efx_oword_t buf_tbl_upd;
 200        unsigned int start = buffer->index;
 201        unsigned int end = (buffer->index + buffer->entries - 1);
 202
 203        if (!buffer->entries)
 204                return;
 205
 206        netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
 207                  buffer->index, buffer->index + buffer->entries - 1);
 208
 209        EFX_POPULATE_OWORD_4(buf_tbl_upd,
 210                             FRF_AZ_BUF_UPD_CMD, 0,
 211                             FRF_AZ_BUF_CLR_CMD, 1,
 212                             FRF_AZ_BUF_CLR_END_ID, end,
 213                             FRF_AZ_BUF_CLR_START_ID, start);
 214        efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
 215}
 216
 217/*
 218 * Allocate a new special buffer
 219 *
 220 * This allocates memory for a new buffer, clears it and allocates a
 221 * new buffer ID range.  It does not write into the buffer table.
 222 *
 223 * This call will allocate 4KB buffers, since 8KB buffers can't be
 224 * used for event queues and descriptor rings.
 225 */
 226static int efx_alloc_special_buffer(struct efx_nic *efx,
 227                                    struct efx_special_buffer *buffer,
 228                                    unsigned int len)
 229{
 230#ifdef CONFIG_SFC_SRIOV
 231        struct siena_nic_data *nic_data = efx->nic_data;
 232#endif
 233        len = ALIGN(len, EFX_BUF_SIZE);
 234
 235        if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
 236                return -ENOMEM;
 237        buffer->entries = len / EFX_BUF_SIZE;
 238        BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
 239
 240        /* Select new buffer ID */
 241        buffer->index = efx->next_buffer_table;
 242        efx->next_buffer_table += buffer->entries;
 243#ifdef CONFIG_SFC_SRIOV
 244        BUG_ON(efx_siena_sriov_enabled(efx) &&
 245               nic_data->vf_buftbl_base < efx->next_buffer_table);
 246#endif
 247
 248        netif_dbg(efx, probe, efx->net_dev,
 249                  "allocating special buffers %d-%d at %llx+%x "
 250                  "(virt %p phys %llx)\n", buffer->index,
 251                  buffer->index + buffer->entries - 1,
 252                  (u64)buffer->buf.dma_addr, len,
 253                  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
 254
 255        return 0;
 256}
 257
 258static void
 259efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
 260{
 261        if (!buffer->buf.addr)
 262                return;
 263
 264        netif_dbg(efx, hw, efx->net_dev,
 265                  "deallocating special buffers %d-%d at %llx+%x "
 266                  "(virt %p phys %llx)\n", buffer->index,
 267                  buffer->index + buffer->entries - 1,
 268                  (u64)buffer->buf.dma_addr, buffer->buf.len,
 269                  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
 270
 271        efx_nic_free_buffer(efx, &buffer->buf);
 272        buffer->entries = 0;
 273}
 274
 275/**************************************************************************
 276 *
 277 * TX path
 278 *
 279 **************************************************************************/
 280
 281/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
 282static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
 283{
 284        unsigned write_ptr;
 285        efx_dword_t reg;
 286
 287        write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
 288        EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
 289        efx_writed_page(tx_queue->efx, &reg,
 290                        FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
 291}
 292
 293/* Write pointer and first descriptor for TX descriptor ring */
 294static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
 295                                          const efx_qword_t *txd)
 296{
 297        unsigned write_ptr;
 298        efx_oword_t reg;
 299
 300        BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
 301        BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
 302
 303        write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
 304        EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
 305                             FRF_AZ_TX_DESC_WPTR, write_ptr);
 306        reg.qword[0] = *txd;
 307        efx_writeo_page(tx_queue->efx, &reg,
 308                        FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
 309}
 310
 311
 312/* For each entry inserted into the software descriptor ring, create a
 313 * descriptor in the hardware TX descriptor ring (in host memory), and
 314 * write a doorbell.
 315 */
 316void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
 317{
 318        struct efx_tx_buffer *buffer;
 319        efx_qword_t *txd;
 320        unsigned write_ptr;
 321        unsigned old_write_count = tx_queue->write_count;
 322
 323        tx_queue->xmit_pending = false;
 324        if (unlikely(tx_queue->write_count == tx_queue->insert_count))
 325                return;
 326
 327        do {
 328                write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
 329                buffer = &tx_queue->buffer[write_ptr];
 330                txd = efx_tx_desc(tx_queue, write_ptr);
 331                ++tx_queue->write_count;
 332
 333                EFX_WARN_ON_ONCE_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
 334
 335                /* Create TX descriptor ring entry */
 336                BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
 337                EFX_POPULATE_QWORD_4(*txd,
 338                                     FSF_AZ_TX_KER_CONT,
 339                                     buffer->flags & EFX_TX_BUF_CONT,
 340                                     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
 341                                     FSF_AZ_TX_KER_BUF_REGION, 0,
 342                                     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
 343        } while (tx_queue->write_count != tx_queue->insert_count);
 344
 345        wmb(); /* Ensure descriptors are written before they are fetched */
 346
 347        if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
 348                txd = efx_tx_desc(tx_queue,
 349                                  old_write_count & tx_queue->ptr_mask);
 350                efx_farch_push_tx_desc(tx_queue, txd);
 351                ++tx_queue->pushes;
 352        } else {
 353                efx_farch_notify_tx_desc(tx_queue);
 354        }
 355}
 356
 357unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
 358                                    dma_addr_t dma_addr, unsigned int len)
 359{
 360        /* Don't cross 4K boundaries with descriptors. */
 361        unsigned int limit = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
 362
 363        len = min(limit, len);
 364
 365        return len;
 366}
 367
 368
 369/* Allocate hardware resources for a TX queue */
 370int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
 371{
 372        struct efx_nic *efx = tx_queue->efx;
 373        unsigned entries;
 374
 375        tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) |
 376                         ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0);
 377        entries = tx_queue->ptr_mask + 1;
 378        return efx_alloc_special_buffer(efx, &tx_queue->txd,
 379                                        entries * sizeof(efx_qword_t));
 380}
 381
 382void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
 383{
 384        int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
 385        struct efx_nic *efx = tx_queue->efx;
 386        efx_oword_t reg;
 387
 388        /* Pin TX descriptor ring */
 389        efx_init_special_buffer(efx, &tx_queue->txd);
 390
 391        /* Push TX descriptor ring to card */
 392        EFX_POPULATE_OWORD_10(reg,
 393                              FRF_AZ_TX_DESCQ_EN, 1,
 394                              FRF_AZ_TX_ISCSI_DDIG_EN, 0,
 395                              FRF_AZ_TX_ISCSI_HDIG_EN, 0,
 396                              FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
 397                              FRF_AZ_TX_DESCQ_EVQ_ID,
 398                              tx_queue->channel->channel,
 399                              FRF_AZ_TX_DESCQ_OWNER_ID, 0,
 400                              FRF_AZ_TX_DESCQ_LABEL, tx_queue->label,
 401                              FRF_AZ_TX_DESCQ_SIZE,
 402                              __ffs(tx_queue->txd.entries),
 403                              FRF_AZ_TX_DESCQ_TYPE, 0,
 404                              FRF_BZ_TX_NON_IP_DROP_DIS, 1);
 405
 406        EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
 407        EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum);
 408
 409        efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
 410                         tx_queue->queue);
 411
 412        EFX_POPULATE_OWORD_1(reg,
 413                             FRF_BZ_TX_PACE,
 414                             (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
 415                             FFE_BZ_TX_PACE_OFF :
 416                             FFE_BZ_TX_PACE_RESERVED);
 417        efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue);
 418
 419        tx_queue->tso_version = 1;
 420}
 421
 422static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
 423{
 424        struct efx_nic *efx = tx_queue->efx;
 425        efx_oword_t tx_flush_descq;
 426
 427        WARN_ON(atomic_read(&tx_queue->flush_outstanding));
 428        atomic_set(&tx_queue->flush_outstanding, 1);
 429
 430        EFX_POPULATE_OWORD_2(tx_flush_descq,
 431                             FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
 432                             FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
 433        efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
 434}
 435
 436void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
 437{
 438        struct efx_nic *efx = tx_queue->efx;
 439        efx_oword_t tx_desc_ptr;
 440
 441        /* Remove TX descriptor ring from card */
 442        EFX_ZERO_OWORD(tx_desc_ptr);
 443        efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
 444                         tx_queue->queue);
 445
 446        /* Unpin TX descriptor ring */
 447        efx_fini_special_buffer(efx, &tx_queue->txd);
 448}
 449
 450/* Free buffers backing TX queue */
 451void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
 452{
 453        efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
 454}
 455
 456/**************************************************************************
 457 *
 458 * RX path
 459 *
 460 **************************************************************************/
 461
 462/* This creates an entry in the RX descriptor queue */
 463static inline void
 464efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
 465{
 466        struct efx_rx_buffer *rx_buf;
 467        efx_qword_t *rxd;
 468
 469        rxd = efx_rx_desc(rx_queue, index);
 470        rx_buf = efx_rx_buffer(rx_queue, index);
 471        EFX_POPULATE_QWORD_3(*rxd,
 472                             FSF_AZ_RX_KER_BUF_SIZE,
 473                             rx_buf->len -
 474                             rx_queue->efx->type->rx_buffer_padding,
 475                             FSF_AZ_RX_KER_BUF_REGION, 0,
 476                             FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
 477}
 478
 479/* This writes to the RX_DESC_WPTR register for the specified receive
 480 * descriptor ring.
 481 */
 482void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
 483{
 484        struct efx_nic *efx = rx_queue->efx;
 485        efx_dword_t reg;
 486        unsigned write_ptr;
 487
 488        while (rx_queue->notified_count != rx_queue->added_count) {
 489                efx_farch_build_rx_desc(
 490                        rx_queue,
 491                        rx_queue->notified_count & rx_queue->ptr_mask);
 492                ++rx_queue->notified_count;
 493        }
 494
 495        wmb();
 496        write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
 497        EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
 498        efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
 499                        efx_rx_queue_index(rx_queue));
 500}
 501
 502int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
 503{
 504        struct efx_nic *efx = rx_queue->efx;
 505        unsigned entries;
 506
 507        entries = rx_queue->ptr_mask + 1;
 508        return efx_alloc_special_buffer(efx, &rx_queue->rxd,
 509                                        entries * sizeof(efx_qword_t));
 510}
 511
 512void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
 513{
 514        efx_oword_t rx_desc_ptr;
 515        struct efx_nic *efx = rx_queue->efx;
 516        bool jumbo_en;
 517
 518        /* For kernel-mode queues in Siena, the JUMBO flag enables scatter. */
 519        jumbo_en = efx->rx_scatter;
 520
 521        netif_dbg(efx, hw, efx->net_dev,
 522                  "RX queue %d ring in special buffers %d-%d\n",
 523                  efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
 524                  rx_queue->rxd.index + rx_queue->rxd.entries - 1);
 525
 526        rx_queue->scatter_n = 0;
 527
 528        /* Pin RX descriptor ring */
 529        efx_init_special_buffer(efx, &rx_queue->rxd);
 530
 531        /* Push RX descriptor ring to card */
 532        EFX_POPULATE_OWORD_10(rx_desc_ptr,
 533                              FRF_AZ_RX_ISCSI_DDIG_EN, true,
 534                              FRF_AZ_RX_ISCSI_HDIG_EN, true,
 535                              FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
 536                              FRF_AZ_RX_DESCQ_EVQ_ID,
 537                              efx_rx_queue_channel(rx_queue)->channel,
 538                              FRF_AZ_RX_DESCQ_OWNER_ID, 0,
 539                              FRF_AZ_RX_DESCQ_LABEL,
 540                              efx_rx_queue_index(rx_queue),
 541                              FRF_AZ_RX_DESCQ_SIZE,
 542                              __ffs(rx_queue->rxd.entries),
 543                              FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
 544                              FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
 545                              FRF_AZ_RX_DESCQ_EN, 1);
 546        efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
 547                         efx_rx_queue_index(rx_queue));
 548}
 549
 550static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
 551{
 552        struct efx_nic *efx = rx_queue->efx;
 553        efx_oword_t rx_flush_descq;
 554
 555        EFX_POPULATE_OWORD_2(rx_flush_descq,
 556                             FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
 557                             FRF_AZ_RX_FLUSH_DESCQ,
 558                             efx_rx_queue_index(rx_queue));
 559        efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
 560}
 561
 562void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
 563{
 564        efx_oword_t rx_desc_ptr;
 565        struct efx_nic *efx = rx_queue->efx;
 566
 567        /* Remove RX descriptor ring from card */
 568        EFX_ZERO_OWORD(rx_desc_ptr);
 569        efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
 570                         efx_rx_queue_index(rx_queue));
 571
 572        /* Unpin RX descriptor ring */
 573        efx_fini_special_buffer(efx, &rx_queue->rxd);
 574}
 575
 576/* Free buffers backing RX queue */
 577void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
 578{
 579        efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
 580}
 581
 582/**************************************************************************
 583 *
 584 * Flush handling
 585 *
 586 **************************************************************************/
 587
 588/* efx_farch_flush_queues() must be woken up when all flushes are completed,
 589 * or more RX flushes can be kicked off.
 590 */
 591static bool efx_farch_flush_wake(struct efx_nic *efx)
 592{
 593        /* Ensure that all updates are visible to efx_farch_flush_queues() */
 594        smp_mb();
 595
 596        return (atomic_read(&efx->active_queues) == 0 ||
 597                (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
 598                 && atomic_read(&efx->rxq_flush_pending) > 0));
 599}
 600
 601static bool efx_check_tx_flush_complete(struct efx_nic *efx)
 602{
 603        bool i = true;
 604        efx_oword_t txd_ptr_tbl;
 605        struct efx_channel *channel;
 606        struct efx_tx_queue *tx_queue;
 607
 608        efx_for_each_channel(channel, efx) {
 609                efx_for_each_channel_tx_queue(tx_queue, channel) {
 610                        efx_reado_table(efx, &txd_ptr_tbl,
 611                                        FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
 612                        if (EFX_OWORD_FIELD(txd_ptr_tbl,
 613                                            FRF_AZ_TX_DESCQ_FLUSH) ||
 614                            EFX_OWORD_FIELD(txd_ptr_tbl,
 615                                            FRF_AZ_TX_DESCQ_EN)) {
 616                                netif_dbg(efx, hw, efx->net_dev,
 617                                          "flush did not complete on TXQ %d\n",
 618                                          tx_queue->queue);
 619                                i = false;
 620                        } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
 621                                                  1, 0)) {
 622                                /* The flush is complete, but we didn't
 623                                 * receive a flush completion event
 624                                 */
 625                                netif_dbg(efx, hw, efx->net_dev,
 626                                          "flush complete on TXQ %d, so drain "
 627                                          "the queue\n", tx_queue->queue);
 628                                /* Don't need to increment active_queues as it
 629                                 * has already been incremented for the queues
 630                                 * which did not drain
 631                                 */
 632                                efx_farch_magic_event(channel,
 633                                                      EFX_CHANNEL_MAGIC_TX_DRAIN(
 634                                                              tx_queue));
 635                        }
 636                }
 637        }
 638
 639        return i;
 640}
 641
 642/* Flush all the transmit queues, and continue flushing receive queues until
 643 * they're all flushed. Wait for the DRAIN events to be received so that there
 644 * are no more RX and TX events left on any channel. */
 645static int efx_farch_do_flush(struct efx_nic *efx)
 646{
 647        unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
 648        struct efx_channel *channel;
 649        struct efx_rx_queue *rx_queue;
 650        struct efx_tx_queue *tx_queue;
 651        int rc = 0;
 652
 653        efx_for_each_channel(channel, efx) {
 654                efx_for_each_channel_tx_queue(tx_queue, channel) {
 655                        efx_farch_flush_tx_queue(tx_queue);
 656                }
 657                efx_for_each_channel_rx_queue(rx_queue, channel) {
 658                        rx_queue->flush_pending = true;
 659                        atomic_inc(&efx->rxq_flush_pending);
 660                }
 661        }
 662
 663        while (timeout && atomic_read(&efx->active_queues) > 0) {
 664                /* If SRIOV is enabled, then offload receive queue flushing to
 665                 * the firmware (though we will still have to poll for
 666                 * completion). If that fails, fall back to the old scheme.
 667                 */
 668                if (efx_siena_sriov_enabled(efx)) {
 669                        rc = efx_mcdi_flush_rxqs(efx);
 670                        if (!rc)
 671                                goto wait;
 672                }
 673
 674                /* The hardware supports four concurrent rx flushes, each of
 675                 * which may need to be retried if there is an outstanding
 676                 * descriptor fetch
 677                 */
 678                efx_for_each_channel(channel, efx) {
 679                        efx_for_each_channel_rx_queue(rx_queue, channel) {
 680                                if (atomic_read(&efx->rxq_flush_outstanding) >=
 681                                    EFX_RX_FLUSH_COUNT)
 682                                        break;
 683
 684                                if (rx_queue->flush_pending) {
 685                                        rx_queue->flush_pending = false;
 686                                        atomic_dec(&efx->rxq_flush_pending);
 687                                        atomic_inc(&efx->rxq_flush_outstanding);
 688                                        efx_farch_flush_rx_queue(rx_queue);
 689                                }
 690                        }
 691                }
 692
 693        wait:
 694                timeout = wait_event_timeout(efx->flush_wq,
 695                                             efx_farch_flush_wake(efx),
 696                                             timeout);
 697        }
 698
 699        if (atomic_read(&efx->active_queues) &&
 700            !efx_check_tx_flush_complete(efx)) {
 701                netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
 702                          "(rx %d+%d)\n", atomic_read(&efx->active_queues),
 703                          atomic_read(&efx->rxq_flush_outstanding),
 704                          atomic_read(&efx->rxq_flush_pending));
 705                rc = -ETIMEDOUT;
 706
 707                atomic_set(&efx->active_queues, 0);
 708                atomic_set(&efx->rxq_flush_pending, 0);
 709                atomic_set(&efx->rxq_flush_outstanding, 0);
 710        }
 711
 712        return rc;
 713}
 714
 715int efx_farch_fini_dmaq(struct efx_nic *efx)
 716{
 717        struct efx_channel *channel;
 718        struct efx_tx_queue *tx_queue;
 719        struct efx_rx_queue *rx_queue;
 720        int rc = 0;
 721
 722        /* Do not attempt to write to the NIC during EEH recovery */
 723        if (efx->state != STATE_RECOVERY) {
 724                /* Only perform flush if DMA is enabled */
 725                if (efx->pci_dev->is_busmaster) {
 726                        efx->type->prepare_flush(efx);
 727                        rc = efx_farch_do_flush(efx);
 728                        efx->type->finish_flush(efx);
 729                }
 730
 731                efx_for_each_channel(channel, efx) {
 732                        efx_for_each_channel_rx_queue(rx_queue, channel)
 733                                efx_farch_rx_fini(rx_queue);
 734                        efx_for_each_channel_tx_queue(tx_queue, channel)
 735                                efx_farch_tx_fini(tx_queue);
 736                }
 737        }
 738
 739        return rc;
 740}
 741
 742/* Reset queue and flush accounting after FLR
 743 *
 744 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
 745 * mastering was disabled), in which case we don't receive (RXQ) flush
 746 * completion events.  This means that efx->rxq_flush_outstanding remained at 4
 747 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
 748 * events were received, and we didn't go through efx_check_tx_flush_complete())
 749 * If we don't fix this up, on the next call to efx_realloc_channels() we won't
 750 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
 751 * for batched flush requests; and the efx->active_queues gets messed up because
 752 * we keep incrementing for the newly initialised queues, but it never went to
 753 * zero previously.  Then we get a timeout every time we try to restart the
 754 * queues, as it doesn't go back to zero when we should be flushing the queues.
 755 */
 756void efx_farch_finish_flr(struct efx_nic *efx)
 757{
 758        atomic_set(&efx->rxq_flush_pending, 0);
 759        atomic_set(&efx->rxq_flush_outstanding, 0);
 760        atomic_set(&efx->active_queues, 0);
 761}
 762
 763
 764/**************************************************************************
 765 *
 766 * Event queue processing
 767 * Event queues are processed by per-channel tasklets.
 768 *
 769 **************************************************************************/
 770
 771/* Update a channel's event queue's read pointer (RPTR) register
 772 *
 773 * This writes the EVQ_RPTR_REG register for the specified channel's
 774 * event queue.
 775 */
 776void efx_farch_ev_read_ack(struct efx_channel *channel)
 777{
 778        efx_dword_t reg;
 779        struct efx_nic *efx = channel->efx;
 780
 781        EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
 782                             channel->eventq_read_ptr & channel->eventq_mask);
 783
 784        /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
 785         * of 4 bytes, but it is really 16 bytes just like later revisions.
 786         */
 787        efx_writed(efx, &reg,
 788                   efx->type->evq_rptr_tbl_base +
 789                   FR_BZ_EVQ_RPTR_STEP * channel->channel);
 790}
 791
 792/* Use HW to insert a SW defined event */
 793void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
 794                              efx_qword_t *event)
 795{
 796        efx_oword_t drv_ev_reg;
 797
 798        BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
 799                     FRF_AZ_DRV_EV_DATA_WIDTH != 64);
 800        drv_ev_reg.u32[0] = event->u32[0];
 801        drv_ev_reg.u32[1] = event->u32[1];
 802        drv_ev_reg.u32[2] = 0;
 803        drv_ev_reg.u32[3] = 0;
 804        EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
 805        efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
 806}
 807
 808static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
 809{
 810        efx_qword_t event;
 811
 812        EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
 813                             FSE_AZ_EV_CODE_DRV_GEN_EV,
 814                             FSF_AZ_DRV_GEN_EV_MAGIC, magic);
 815        efx_farch_generate_event(channel->efx, channel->channel, &event);
 816}
 817
 818/* Handle a transmit completion event
 819 *
 820 * The NIC batches TX completion events; the message we receive is of
 821 * the form "complete all TX events up to this index".
 822 */
 823static void
 824efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
 825{
 826        unsigned int tx_ev_desc_ptr;
 827        unsigned int tx_ev_q_label;
 828        struct efx_tx_queue *tx_queue;
 829        struct efx_nic *efx = channel->efx;
 830
 831        if (unlikely(READ_ONCE(efx->reset_pending)))
 832                return;
 833
 834        if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
 835                /* Transmit completion */
 836                tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
 837                tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
 838                tx_queue = efx_channel_get_tx_queue(
 839                        channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
 840                efx_xmit_done(tx_queue, tx_ev_desc_ptr);
 841        } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
 842                /* Rewrite the FIFO write pointer */
 843                tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
 844                tx_queue = efx_channel_get_tx_queue(
 845                        channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
 846
 847                netif_tx_lock(efx->net_dev);
 848                efx_farch_notify_tx_desc(tx_queue);
 849                netif_tx_unlock(efx->net_dev);
 850        } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
 851                efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
 852        } else {
 853                netif_err(efx, tx_err, efx->net_dev,
 854                          "channel %d unexpected TX event "
 855                          EFX_QWORD_FMT"\n", channel->channel,
 856                          EFX_QWORD_VAL(*event));
 857        }
 858}
 859
 860/* Detect errors included in the rx_evt_pkt_ok bit. */
 861static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
 862                                      const efx_qword_t *event)
 863{
 864        struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
 865        struct efx_nic *efx = rx_queue->efx;
 866        bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
 867        bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
 868        bool rx_ev_frm_trunc, rx_ev_tobe_disc;
 869        bool rx_ev_other_err, rx_ev_pause_frm;
 870
 871        rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
 872        rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
 873                                                 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
 874        rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
 875                                                  FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
 876        rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
 877                                                   FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
 878        rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
 879        rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
 880        rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
 881
 882        /* Every error apart from tobe_disc and pause_frm */
 883        rx_ev_other_err = (rx_ev_tcp_udp_chksum_err |
 884                           rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
 885                           rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
 886
 887        /* Count errors that are not in MAC stats.  Ignore expected
 888         * checksum errors during self-test. */
 889        if (rx_ev_frm_trunc)
 890                ++channel->n_rx_frm_trunc;
 891        else if (rx_ev_tobe_disc)
 892                ++channel->n_rx_tobe_disc;
 893        else if (!efx->loopback_selftest) {
 894                if (rx_ev_ip_hdr_chksum_err)
 895                        ++channel->n_rx_ip_hdr_chksum_err;
 896                else if (rx_ev_tcp_udp_chksum_err)
 897                        ++channel->n_rx_tcp_udp_chksum_err;
 898        }
 899
 900        /* TOBE_DISC is expected on unicast mismatches; don't print out an
 901         * error message.  FRM_TRUNC indicates RXDP dropped the packet due
 902         * to a FIFO overflow.
 903         */
 904#ifdef DEBUG
 905        if (rx_ev_other_err && net_ratelimit()) {
 906                netif_dbg(efx, rx_err, efx->net_dev,
 907                          " RX queue %d unexpected RX event "
 908                          EFX_QWORD_FMT "%s%s%s%s%s%s%s\n",
 909                          efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
 910                          rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
 911                          rx_ev_ip_hdr_chksum_err ?
 912                          " [IP_HDR_CHKSUM_ERR]" : "",
 913                          rx_ev_tcp_udp_chksum_err ?
 914                          " [TCP_UDP_CHKSUM_ERR]" : "",
 915                          rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
 916                          rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
 917                          rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
 918                          rx_ev_pause_frm ? " [PAUSE]" : "");
 919        }
 920#else
 921        (void) rx_ev_other_err;
 922#endif
 923
 924        if (efx->net_dev->features & NETIF_F_RXALL)
 925                /* don't discard frame for CRC error */
 926                rx_ev_eth_crc_err = false;
 927
 928        /* The frame must be discarded if any of these are true. */
 929        return (rx_ev_eth_crc_err | rx_ev_frm_trunc |
 930                rx_ev_tobe_disc | rx_ev_pause_frm) ?
 931                EFX_RX_PKT_DISCARD : 0;
 932}
 933
 934/* Handle receive events that are not in-order. Return true if this
 935 * can be handled as a partial packet discard, false if it's more
 936 * serious.
 937 */
 938static bool
 939efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
 940{
 941        struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
 942        struct efx_nic *efx = rx_queue->efx;
 943        unsigned expected, dropped;
 944
 945        if (rx_queue->scatter_n &&
 946            index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
 947                      rx_queue->ptr_mask)) {
 948                ++channel->n_rx_nodesc_trunc;
 949                return true;
 950        }
 951
 952        expected = rx_queue->removed_count & rx_queue->ptr_mask;
 953        dropped = (index - expected) & rx_queue->ptr_mask;
 954        netif_info(efx, rx_err, efx->net_dev,
 955                   "dropped %d events (index=%d expected=%d)\n",
 956                   dropped, index, expected);
 957
 958        efx_schedule_reset(efx, RESET_TYPE_DISABLE);
 959        return false;
 960}
 961
 962/* Handle a packet received event
 963 *
 964 * The NIC gives a "discard" flag if it's a unicast packet with the
 965 * wrong destination address
 966 * Also "is multicast" and "matches multicast filter" flags can be used to
 967 * discard non-matching multicast packets.
 968 */
 969static void
 970efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
 971{
 972        unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
 973        unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
 974        unsigned expected_ptr;
 975        bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
 976        u16 flags;
 977        struct efx_rx_queue *rx_queue;
 978        struct efx_nic *efx = channel->efx;
 979
 980        if (unlikely(READ_ONCE(efx->reset_pending)))
 981                return;
 982
 983        rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
 984        rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
 985        WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
 986                channel->channel);
 987
 988        rx_queue = efx_channel_get_rx_queue(channel);
 989
 990        rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
 991        expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
 992                        rx_queue->ptr_mask);
 993
 994        /* Check for partial drops and other errors */
 995        if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
 996            unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
 997                if (rx_ev_desc_ptr != expected_ptr &&
 998                    !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
 999                        return;
1000
1001                /* Discard all pending fragments */
1002                if (rx_queue->scatter_n) {
1003                        efx_rx_packet(
1004                                rx_queue,
1005                                rx_queue->removed_count & rx_queue->ptr_mask,
1006                                rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
1007                        rx_queue->removed_count += rx_queue->scatter_n;
1008                        rx_queue->scatter_n = 0;
1009                }
1010
1011                /* Return if there is no new fragment */
1012                if (rx_ev_desc_ptr != expected_ptr)
1013                        return;
1014
1015                /* Discard new fragment if not SOP */
1016                if (!rx_ev_sop) {
1017                        efx_rx_packet(
1018                                rx_queue,
1019                                rx_queue->removed_count & rx_queue->ptr_mask,
1020                                1, 0, EFX_RX_PKT_DISCARD);
1021                        ++rx_queue->removed_count;
1022                        return;
1023                }
1024        }
1025
1026        ++rx_queue->scatter_n;
1027        if (rx_ev_cont)
1028                return;
1029
1030        rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1031        rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1032        rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1033
1034        if (likely(rx_ev_pkt_ok)) {
1035                /* If packet is marked as OK then we can rely on the
1036                 * hardware checksum and classification.
1037                 */
1038                flags = 0;
1039                switch (rx_ev_hdr_type) {
1040                case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1041                        flags |= EFX_RX_PKT_TCP;
1042                        fallthrough;
1043                case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1044                        flags |= EFX_RX_PKT_CSUMMED;
1045                        fallthrough;
1046                case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1047                case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1048                        break;
1049                }
1050        } else {
1051                flags = efx_farch_handle_rx_not_ok(rx_queue, event);
1052        }
1053
1054        /* Detect multicast packets that didn't match the filter */
1055        rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1056        if (rx_ev_mcast_pkt) {
1057                unsigned int rx_ev_mcast_hash_match =
1058                        EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1059
1060                if (unlikely(!rx_ev_mcast_hash_match)) {
1061                        ++channel->n_rx_mcast_mismatch;
1062                        flags |= EFX_RX_PKT_DISCARD;
1063                }
1064        }
1065
1066        channel->irq_mod_score += 2;
1067
1068        /* Handle received packet */
1069        efx_rx_packet(rx_queue,
1070                      rx_queue->removed_count & rx_queue->ptr_mask,
1071                      rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1072        rx_queue->removed_count += rx_queue->scatter_n;
1073        rx_queue->scatter_n = 0;
1074}
1075
1076/* If this flush done event corresponds to a &struct efx_tx_queue, then
1077 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1078 * of all transmit completions.
1079 */
1080static void
1081efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1082{
1083        struct efx_tx_queue *tx_queue;
1084        int qid;
1085
1086        qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1087        if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
1088                tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
1089                                            qid % EFX_MAX_TXQ_PER_CHANNEL);
1090                if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1091                        efx_farch_magic_event(tx_queue->channel,
1092                                              EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1093                }
1094        }
1095}
1096
1097/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1098 * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1099 * the RX queue back to the mask of RX queues in need of flushing.
1100 */
1101static void
1102efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1103{
1104        struct efx_channel *channel;
1105        struct efx_rx_queue *rx_queue;
1106        int qid;
1107        bool failed;
1108
1109        qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1110        failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1111        if (qid >= efx->n_channels)
1112                return;
1113        channel = efx_get_channel(efx, qid);
1114        if (!efx_channel_has_rx_queue(channel))
1115                return;
1116        rx_queue = efx_channel_get_rx_queue(channel);
1117
1118        if (failed) {
1119                netif_info(efx, hw, efx->net_dev,
1120                           "RXQ %d flush retry\n", qid);
1121                rx_queue->flush_pending = true;
1122                atomic_inc(&efx->rxq_flush_pending);
1123        } else {
1124                efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1125                                      EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1126        }
1127        atomic_dec(&efx->rxq_flush_outstanding);
1128        if (efx_farch_flush_wake(efx))
1129                wake_up(&efx->flush_wq);
1130}
1131
1132static void
1133efx_farch_handle_drain_event(struct efx_channel *channel)
1134{
1135        struct efx_nic *efx = channel->efx;
1136
1137        WARN_ON(atomic_read(&efx->active_queues) == 0);
1138        atomic_dec(&efx->active_queues);
1139        if (efx_farch_flush_wake(efx))
1140                wake_up(&efx->flush_wq);
1141}
1142
1143static void efx_farch_handle_generated_event(struct efx_channel *channel,
1144                                             efx_qword_t *event)
1145{
1146        struct efx_nic *efx = channel->efx;
1147        struct efx_rx_queue *rx_queue =
1148                efx_channel_has_rx_queue(channel) ?
1149                efx_channel_get_rx_queue(channel) : NULL;
1150        unsigned magic, code;
1151
1152        magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1153        code = _EFX_CHANNEL_MAGIC_CODE(magic);
1154
1155        if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1156                channel->event_test_cpu = raw_smp_processor_id();
1157        } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1158                /* The queue must be empty, so we won't receive any rx
1159                 * events, so efx_process_channel() won't refill the
1160                 * queue. Refill it here */
1161                efx_fast_push_rx_descriptors(rx_queue, true);
1162        } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1163                efx_farch_handle_drain_event(channel);
1164        } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1165                efx_farch_handle_drain_event(channel);
1166        } else {
1167                netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1168                          "generated event "EFX_QWORD_FMT"\n",
1169                          channel->channel, EFX_QWORD_VAL(*event));
1170        }
1171}
1172
1173static void
1174efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1175{
1176        struct efx_nic *efx = channel->efx;
1177        unsigned int ev_sub_code;
1178        unsigned int ev_sub_data;
1179
1180        ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1181        ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1182
1183        switch (ev_sub_code) {
1184        case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1185                netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1186                           channel->channel, ev_sub_data);
1187                efx_farch_handle_tx_flush_done(efx, event);
1188#ifdef CONFIG_SFC_SRIOV
1189                efx_siena_sriov_tx_flush_done(efx, event);
1190#endif
1191                break;
1192        case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1193                netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1194                           channel->channel, ev_sub_data);
1195                efx_farch_handle_rx_flush_done(efx, event);
1196#ifdef CONFIG_SFC_SRIOV
1197                efx_siena_sriov_rx_flush_done(efx, event);
1198#endif
1199                break;
1200        case FSE_AZ_EVQ_INIT_DONE_EV:
1201                netif_dbg(efx, hw, efx->net_dev,
1202                          "channel %d EVQ %d initialised\n",
1203                          channel->channel, ev_sub_data);
1204                break;
1205        case FSE_AZ_SRM_UPD_DONE_EV:
1206                netif_vdbg(efx, hw, efx->net_dev,
1207                           "channel %d SRAM update done\n", channel->channel);
1208                break;
1209        case FSE_AZ_WAKE_UP_EV:
1210                netif_vdbg(efx, hw, efx->net_dev,
1211                           "channel %d RXQ %d wakeup event\n",
1212                           channel->channel, ev_sub_data);
1213                break;
1214        case FSE_AZ_TIMER_EV:
1215                netif_vdbg(efx, hw, efx->net_dev,
1216                           "channel %d RX queue %d timer expired\n",
1217                           channel->channel, ev_sub_data);
1218                break;
1219        case FSE_AA_RX_RECOVER_EV:
1220                netif_err(efx, rx_err, efx->net_dev,
1221                          "channel %d seen DRIVER RX_RESET event. "
1222                        "Resetting.\n", channel->channel);
1223                atomic_inc(&efx->rx_reset);
1224                efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1225                break;
1226        case FSE_BZ_RX_DSC_ERROR_EV:
1227                if (ev_sub_data < EFX_VI_BASE) {
1228                        netif_err(efx, rx_err, efx->net_dev,
1229                                  "RX DMA Q %d reports descriptor fetch error."
1230                                  " RX Q %d is disabled.\n", ev_sub_data,
1231                                  ev_sub_data);
1232                        efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1233                }
1234#ifdef CONFIG_SFC_SRIOV
1235                else
1236                        efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
1237#endif
1238                break;
1239        case FSE_BZ_TX_DSC_ERROR_EV:
1240                if (ev_sub_data < EFX_VI_BASE) {
1241                        netif_err(efx, tx_err, efx->net_dev,
1242                                  "TX DMA Q %d reports descriptor fetch error."
1243                                  " TX Q %d is disabled.\n", ev_sub_data,
1244                                  ev_sub_data);
1245                        efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1246                }
1247#ifdef CONFIG_SFC_SRIOV
1248                else
1249                        efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
1250#endif
1251                break;
1252        default:
1253                netif_vdbg(efx, hw, efx->net_dev,
1254                           "channel %d unknown driver event code %d "
1255                           "data %04x\n", channel->channel, ev_sub_code,
1256                           ev_sub_data);
1257                break;
1258        }
1259}
1260
1261int efx_farch_ev_process(struct efx_channel *channel, int budget)
1262{
1263        struct efx_nic *efx = channel->efx;
1264        unsigned int read_ptr;
1265        efx_qword_t event, *p_event;
1266        int ev_code;
1267        int spent = 0;
1268
1269        if (budget <= 0)
1270                return spent;
1271
1272        read_ptr = channel->eventq_read_ptr;
1273
1274        for (;;) {
1275                p_event = efx_event(channel, read_ptr);
1276                event = *p_event;
1277
1278                if (!efx_event_present(&event))
1279                        /* End of events */
1280                        break;
1281
1282                netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1283                           "channel %d event is "EFX_QWORD_FMT"\n",
1284                           channel->channel, EFX_QWORD_VAL(event));
1285
1286                /* Clear this event by marking it all ones */
1287                EFX_SET_QWORD(*p_event);
1288
1289                ++read_ptr;
1290
1291                ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1292
1293                switch (ev_code) {
1294                case FSE_AZ_EV_CODE_RX_EV:
1295                        efx_farch_handle_rx_event(channel, &event);
1296                        if (++spent == budget)
1297                                goto out;
1298                        break;
1299                case FSE_AZ_EV_CODE_TX_EV:
1300                        efx_farch_handle_tx_event(channel, &event);
1301                        break;
1302                case FSE_AZ_EV_CODE_DRV_GEN_EV:
1303                        efx_farch_handle_generated_event(channel, &event);
1304                        break;
1305                case FSE_AZ_EV_CODE_DRIVER_EV:
1306                        efx_farch_handle_driver_event(channel, &event);
1307                        break;
1308#ifdef CONFIG_SFC_SRIOV
1309                case FSE_CZ_EV_CODE_USER_EV:
1310                        efx_siena_sriov_event(channel, &event);
1311                        break;
1312#endif
1313                case FSE_CZ_EV_CODE_MCDI_EV:
1314                        efx_mcdi_process_event(channel, &event);
1315                        break;
1316                case FSE_AZ_EV_CODE_GLOBAL_EV:
1317                        if (efx->type->handle_global_event &&
1318                            efx->type->handle_global_event(channel, &event))
1319                                break;
1320                        fallthrough;
1321                default:
1322                        netif_err(channel->efx, hw, channel->efx->net_dev,
1323                                  "channel %d unknown event type %d (data "
1324                                  EFX_QWORD_FMT ")\n", channel->channel,
1325                                  ev_code, EFX_QWORD_VAL(event));
1326                }
1327        }
1328
1329out:
1330        channel->eventq_read_ptr = read_ptr;
1331        return spent;
1332}
1333
1334/* Allocate buffer table entries for event queue */
1335int efx_farch_ev_probe(struct efx_channel *channel)
1336{
1337        struct efx_nic *efx = channel->efx;
1338        unsigned entries;
1339
1340        entries = channel->eventq_mask + 1;
1341        return efx_alloc_special_buffer(efx, &channel->eventq,
1342                                        entries * sizeof(efx_qword_t));
1343}
1344
1345int efx_farch_ev_init(struct efx_channel *channel)
1346{
1347        efx_oword_t reg;
1348        struct efx_nic *efx = channel->efx;
1349
1350        netif_dbg(efx, hw, efx->net_dev,
1351                  "channel %d event queue in special buffers %d-%d\n",
1352                  channel->channel, channel->eventq.index,
1353                  channel->eventq.index + channel->eventq.entries - 1);
1354
1355        EFX_POPULATE_OWORD_3(reg,
1356                             FRF_CZ_TIMER_Q_EN, 1,
1357                             FRF_CZ_HOST_NOTIFY_MODE, 0,
1358                             FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1359        efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1360
1361        /* Pin event queue buffer */
1362        efx_init_special_buffer(efx, &channel->eventq);
1363
1364        /* Fill event queue with all ones (i.e. empty events) */
1365        memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1366
1367        /* Push event queue to card */
1368        EFX_POPULATE_OWORD_3(reg,
1369                             FRF_AZ_EVQ_EN, 1,
1370                             FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1371                             FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1372        efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1373                         channel->channel);
1374
1375        return 0;
1376}
1377
1378void efx_farch_ev_fini(struct efx_channel *channel)
1379{
1380        efx_oword_t reg;
1381        struct efx_nic *efx = channel->efx;
1382
1383        /* Remove event queue from card */
1384        EFX_ZERO_OWORD(reg);
1385        efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1386                         channel->channel);
1387        efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1388
1389        /* Unpin event queue */
1390        efx_fini_special_buffer(efx, &channel->eventq);
1391}
1392
1393/* Free buffers backing event queue */
1394void efx_farch_ev_remove(struct efx_channel *channel)
1395{
1396        efx_free_special_buffer(channel->efx, &channel->eventq);
1397}
1398
1399
1400void efx_farch_ev_test_generate(struct efx_channel *channel)
1401{
1402        efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1403}
1404
1405void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
1406{
1407        efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1408                              EFX_CHANNEL_MAGIC_FILL(rx_queue));
1409}
1410
1411/**************************************************************************
1412 *
1413 * Hardware interrupts
1414 * The hardware interrupt handler does very little work; all the event
1415 * queue processing is carried out by per-channel tasklets.
1416 *
1417 **************************************************************************/
1418
1419/* Enable/disable/generate interrupts */
1420static inline void efx_farch_interrupts(struct efx_nic *efx,
1421                                      bool enabled, bool force)
1422{
1423        efx_oword_t int_en_reg_ker;
1424
1425        EFX_POPULATE_OWORD_3(int_en_reg_ker,
1426                             FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1427                             FRF_AZ_KER_INT_KER, force,
1428                             FRF_AZ_DRV_INT_EN_KER, enabled);
1429        efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1430}
1431
1432void efx_farch_irq_enable_master(struct efx_nic *efx)
1433{
1434        EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1435        wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1436
1437        efx_farch_interrupts(efx, true, false);
1438}
1439
1440void efx_farch_irq_disable_master(struct efx_nic *efx)
1441{
1442        /* Disable interrupts */
1443        efx_farch_interrupts(efx, false, false);
1444}
1445
1446/* Generate a test interrupt
1447 * Interrupt must already have been enabled, otherwise nasty things
1448 * may happen.
1449 */
1450int efx_farch_irq_test_generate(struct efx_nic *efx)
1451{
1452        efx_farch_interrupts(efx, true, true);
1453        return 0;
1454}
1455
1456/* Process a fatal interrupt
1457 * Disable bus mastering ASAP and schedule a reset
1458 */
1459irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
1460{
1461        efx_oword_t *int_ker = efx->irq_status.addr;
1462        efx_oword_t fatal_intr;
1463        int error, mem_perr;
1464
1465        efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1466        error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1467
1468        netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1469                  EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1470                  EFX_OWORD_VAL(fatal_intr),
1471                  error ? "disabling bus mastering" : "no recognised error");
1472
1473        /* If this is a memory parity error dump which blocks are offending */
1474        mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1475                    EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1476        if (mem_perr) {
1477                efx_oword_t reg;
1478                efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1479                netif_err(efx, hw, efx->net_dev,
1480                          "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1481                          EFX_OWORD_VAL(reg));
1482        }
1483
1484        /* Disable both devices */
1485        pci_clear_master(efx->pci_dev);
1486        efx_farch_irq_disable_master(efx);
1487
1488        /* Count errors and reset or disable the NIC accordingly */
1489        if (efx->int_error_count == 0 ||
1490            time_after(jiffies, efx->int_error_expire)) {
1491                efx->int_error_count = 0;
1492                efx->int_error_expire =
1493                        jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1494        }
1495        if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1496                netif_err(efx, hw, efx->net_dev,
1497                          "SYSTEM ERROR - reset scheduled\n");
1498                efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1499        } else {
1500                netif_err(efx, hw, efx->net_dev,
1501                          "SYSTEM ERROR - max number of errors seen."
1502                          "NIC will be disabled\n");
1503                efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1504        }
1505
1506        return IRQ_HANDLED;
1507}
1508
1509/* Handle a legacy interrupt
1510 * Acknowledges the interrupt and schedule event queue processing.
1511 */
1512irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
1513{
1514        struct efx_nic *efx = dev_id;
1515        bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
1516        efx_oword_t *int_ker = efx->irq_status.addr;
1517        irqreturn_t result = IRQ_NONE;
1518        struct efx_channel *channel;
1519        efx_dword_t reg;
1520        u32 queues;
1521        int syserr;
1522
1523        /* Read the ISR which also ACKs the interrupts */
1524        efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1525        queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1526
1527        /* Legacy interrupts are disabled too late by the EEH kernel
1528         * code. Disable them earlier.
1529         * If an EEH error occurred, the read will have returned all ones.
1530         */
1531        if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
1532            !efx->eeh_disabled_legacy_irq) {
1533                disable_irq_nosync(efx->legacy_irq);
1534                efx->eeh_disabled_legacy_irq = true;
1535        }
1536
1537        /* Handle non-event-queue sources */
1538        if (queues & (1U << efx->irq_level) && soft_enabled) {
1539                syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1540                if (unlikely(syserr))
1541                        return efx_farch_fatal_interrupt(efx);
1542                efx->last_irq_cpu = raw_smp_processor_id();
1543        }
1544
1545        if (queues != 0) {
1546                efx->irq_zero_count = 0;
1547
1548                /* Schedule processing of any interrupting queues */
1549                if (likely(soft_enabled)) {
1550                        efx_for_each_channel(channel, efx) {
1551                                if (queues & 1)
1552                                        efx_schedule_channel_irq(channel);
1553                                queues >>= 1;
1554                        }
1555                }
1556                result = IRQ_HANDLED;
1557
1558        } else {
1559                efx_qword_t *event;
1560
1561                /* Legacy ISR read can return zero once (SF bug 15783) */
1562
1563                /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1564                 * because this might be a shared interrupt. */
1565                if (efx->irq_zero_count++ == 0)
1566                        result = IRQ_HANDLED;
1567
1568                /* Ensure we schedule or rearm all event queues */
1569                if (likely(soft_enabled)) {
1570                        efx_for_each_channel(channel, efx) {
1571                                event = efx_event(channel,
1572                                                  channel->eventq_read_ptr);
1573                                if (efx_event_present(event))
1574                                        efx_schedule_channel_irq(channel);
1575                                else
1576                                        efx_farch_ev_read_ack(channel);
1577                        }
1578                }
1579        }
1580
1581        if (result == IRQ_HANDLED)
1582                netif_vdbg(efx, intr, efx->net_dev,
1583                           "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1584                           irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1585
1586        return result;
1587}
1588
1589/* Handle an MSI interrupt
1590 *
1591 * Handle an MSI hardware interrupt.  This routine schedules event
1592 * queue processing.  No interrupt acknowledgement cycle is necessary.
1593 * Also, we never need to check that the interrupt is for us, since
1594 * MSI interrupts cannot be shared.
1595 */
1596irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
1597{
1598        struct efx_msi_context *context = dev_id;
1599        struct efx_nic *efx = context->efx;
1600        efx_oword_t *int_ker = efx->irq_status.addr;
1601        int syserr;
1602
1603        netif_vdbg(efx, intr, efx->net_dev,
1604                   "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1605                   irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1606
1607        if (!likely(READ_ONCE(efx->irq_soft_enabled)))
1608                return IRQ_HANDLED;
1609
1610        /* Handle non-event-queue sources */
1611        if (context->index == efx->irq_level) {
1612                syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1613                if (unlikely(syserr))
1614                        return efx_farch_fatal_interrupt(efx);
1615                efx->last_irq_cpu = raw_smp_processor_id();
1616        }
1617
1618        /* Schedule processing of the channel */
1619        efx_schedule_channel_irq(efx->channel[context->index]);
1620
1621        return IRQ_HANDLED;
1622}
1623
1624/* Setup RSS indirection table.
1625 * This maps from the hash value of the packet to RXQ
1626 */
1627void efx_farch_rx_push_indir_table(struct efx_nic *efx)
1628{
1629        size_t i = 0;
1630        efx_dword_t dword;
1631
1632        BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
1633                     FR_BZ_RX_INDIRECTION_TBL_ROWS);
1634
1635        for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1636                EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1637                                     efx->rss_context.rx_indir_table[i]);
1638                efx_writed(efx, &dword,
1639                           FR_BZ_RX_INDIRECTION_TBL +
1640                           FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1641        }
1642}
1643
1644void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
1645{
1646        size_t i = 0;
1647        efx_dword_t dword;
1648
1649        BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) !=
1650                     FR_BZ_RX_INDIRECTION_TBL_ROWS);
1651
1652        for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1653                efx_readd(efx, &dword,
1654                           FR_BZ_RX_INDIRECTION_TBL +
1655                           FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1656                efx->rss_context.rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE);
1657        }
1658}
1659
1660/* Looks at available SRAM resources and works out how many queues we
1661 * can support, and where things like descriptor caches should live.
1662 *
1663 * SRAM is split up as follows:
1664 * 0                          buftbl entries for channels
1665 * efx->vf_buftbl_base        buftbl entries for SR-IOV
1666 * efx->rx_dc_base            RX descriptor caches
1667 * efx->tx_dc_base            TX descriptor caches
1668 */
1669void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1670{
1671        unsigned vi_count, buftbl_min, total_tx_channels;
1672
1673#ifdef CONFIG_SFC_SRIOV
1674        struct siena_nic_data *nic_data = efx->nic_data;
1675#endif
1676
1677        total_tx_channels = efx->n_tx_channels + efx->n_extra_tx_channels;
1678        /* Account for the buffer table entries backing the datapath channels
1679         * and the descriptor caches for those channels.
1680         */
1681        buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1682                       total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE +
1683                       efx->n_channels * EFX_MAX_EVQ_SIZE)
1684                      * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1685        vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL);
1686
1687#ifdef CONFIG_SFC_SRIOV
1688        if (efx->type->sriov_wanted) {
1689                if (efx->type->sriov_wanted(efx)) {
1690                        unsigned vi_dc_entries, buftbl_free;
1691                        unsigned entries_per_vf, vf_limit;
1692
1693                        nic_data->vf_buftbl_base = buftbl_min;
1694
1695                        vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1696                        vi_count = max(vi_count, EFX_VI_BASE);
1697                        buftbl_free = (sram_lim_qw - buftbl_min -
1698                                       vi_count * vi_dc_entries);
1699
1700                        entries_per_vf = ((vi_dc_entries +
1701                                           EFX_VF_BUFTBL_PER_VI) *
1702                                          efx_vf_size(efx));
1703                        vf_limit = min(buftbl_free / entries_per_vf,
1704                                       (1024U - EFX_VI_BASE) >> efx->vi_scale);
1705
1706                        if (efx->vf_count > vf_limit) {
1707                                netif_err(efx, probe, efx->net_dev,
1708                                          "Reducing VF count from from %d to %d\n",
1709                                          efx->vf_count, vf_limit);
1710                                efx->vf_count = vf_limit;
1711                        }
1712                        vi_count += efx->vf_count * efx_vf_size(efx);
1713                }
1714        }
1715#endif
1716
1717        efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1718        efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1719}
1720
1721u32 efx_farch_fpga_ver(struct efx_nic *efx)
1722{
1723        efx_oword_t altera_build;
1724        efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1725        return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1726}
1727
1728void efx_farch_init_common(struct efx_nic *efx)
1729{
1730        efx_oword_t temp;
1731
1732        /* Set positions of descriptor caches in SRAM. */
1733        EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1734        efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1735        EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1736        efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1737
1738        /* Set TX descriptor cache size. */
1739        BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1740        EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1741        efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1742
1743        /* Set RX descriptor cache size.  Set low watermark to size-8, as
1744         * this allows most efficient prefetching.
1745         */
1746        BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1747        EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1748        efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1749        EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1750        efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1751
1752        /* Program INT_KER address */
1753        EFX_POPULATE_OWORD_2(temp,
1754                             FRF_AZ_NORM_INT_VEC_DIS_KER,
1755                             EFX_INT_MODE_USE_MSI(efx),
1756                             FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1757        efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1758
1759        if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1760                /* Use an interrupt level unused by event queues */
1761                efx->irq_level = 0x1f;
1762        else
1763                /* Use a valid MSI-X vector */
1764                efx->irq_level = 0;
1765
1766        /* Enable all the genuinely fatal interrupts.  (They are still
1767         * masked by the overall interrupt mask, controlled by
1768         * falcon_interrupts()).
1769         *
1770         * Note: All other fatal interrupts are enabled
1771         */
1772        EFX_POPULATE_OWORD_3(temp,
1773                             FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1774                             FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1775                             FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1776        EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1777        EFX_INVERT_OWORD(temp);
1778        efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1779
1780        /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1781         * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1782         */
1783        efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1784        EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1785        EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1786        EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1787        EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1788        EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1789        /* Enable SW_EV to inherit in char driver - assume harmless here */
1790        EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1791        /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1792        EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1793        /* Disable hardware watchdog which can misfire */
1794        EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1795        /* Squash TX of packets of 16 bytes or less */
1796        EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1797        efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1798
1799        EFX_POPULATE_OWORD_4(temp,
1800                             /* Default values */
1801                             FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1802                             FRF_BZ_TX_PACE_SB_AF, 0xb,
1803                             FRF_BZ_TX_PACE_FB_BASE, 0,
1804                             /* Allow large pace values in the fast bin. */
1805                             FRF_BZ_TX_PACE_BIN_TH,
1806                             FFE_BZ_TX_PACE_RESERVED);
1807        efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1808}
1809
1810/**************************************************************************
1811 *
1812 * Filter tables
1813 *
1814 **************************************************************************
1815 */
1816
1817/* "Fudge factors" - difference between programmed value and actual depth.
1818 * Due to pipelined implementation we need to program H/W with a value that
1819 * is larger than the hop limit we want.
1820 */
1821#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1822#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1823
1824/* Hard maximum search limit.  Hardware will time-out beyond 200-something.
1825 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1826 * table is full.
1827 */
1828#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1829
1830/* Don't try very hard to find space for performance hints, as this is
1831 * counter-productive. */
1832#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1833
1834enum efx_farch_filter_type {
1835        EFX_FARCH_FILTER_TCP_FULL = 0,
1836        EFX_FARCH_FILTER_TCP_WILD,
1837        EFX_FARCH_FILTER_UDP_FULL,
1838        EFX_FARCH_FILTER_UDP_WILD,
1839        EFX_FARCH_FILTER_MAC_FULL = 4,
1840        EFX_FARCH_FILTER_MAC_WILD,
1841        EFX_FARCH_FILTER_UC_DEF = 8,
1842        EFX_FARCH_FILTER_MC_DEF,
1843        EFX_FARCH_FILTER_TYPE_COUNT,            /* number of specific types */
1844};
1845
1846enum efx_farch_filter_table_id {
1847        EFX_FARCH_FILTER_TABLE_RX_IP = 0,
1848        EFX_FARCH_FILTER_TABLE_RX_MAC,
1849        EFX_FARCH_FILTER_TABLE_RX_DEF,
1850        EFX_FARCH_FILTER_TABLE_TX_MAC,
1851        EFX_FARCH_FILTER_TABLE_COUNT,
1852};
1853
1854enum efx_farch_filter_index {
1855        EFX_FARCH_FILTER_INDEX_UC_DEF,
1856        EFX_FARCH_FILTER_INDEX_MC_DEF,
1857        EFX_FARCH_FILTER_SIZE_RX_DEF,
1858};
1859
1860struct efx_farch_filter_spec {
1861        u8      type:4;
1862        u8      priority:4;
1863        u8      flags;
1864        u16     dmaq_id;
1865        u32     data[3];
1866};
1867
1868struct efx_farch_filter_table {
1869        enum efx_farch_filter_table_id id;
1870        u32             offset;         /* address of table relative to BAR */
1871        unsigned        size;           /* number of entries */
1872        unsigned        step;           /* step between entries */
1873        unsigned        used;           /* number currently used */
1874        unsigned long   *used_bitmap;
1875        struct efx_farch_filter_spec *spec;
1876        unsigned        search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
1877};
1878
1879struct efx_farch_filter_state {
1880        struct rw_semaphore lock; /* Protects table contents */
1881        struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
1882};
1883
1884static void
1885efx_farch_filter_table_clear_entry(struct efx_nic *efx,
1886                                   struct efx_farch_filter_table *table,
1887                                   unsigned int filter_idx);
1888
1889/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1890 * key derived from the n-tuple.  The initial LFSR state is 0xffff. */
1891static u16 efx_farch_filter_hash(u32 key)
1892{
1893        u16 tmp;
1894
1895        /* First 16 rounds */
1896        tmp = 0x1fff ^ key >> 16;
1897        tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1898        tmp = tmp ^ tmp >> 9;
1899        /* Last 16 rounds */
1900        tmp = tmp ^ tmp << 13 ^ key;
1901        tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1902        return tmp ^ tmp >> 9;
1903}
1904
1905/* To allow for hash collisions, filter search continues at these
1906 * increments from the first possible entry selected by the hash. */
1907static u16 efx_farch_filter_increment(u32 key)
1908{
1909        return key * 2 - 1;
1910}
1911
1912static enum efx_farch_filter_table_id
1913efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
1914{
1915        BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1916                     (EFX_FARCH_FILTER_TCP_FULL >> 2));
1917        BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1918                     (EFX_FARCH_FILTER_TCP_WILD >> 2));
1919        BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1920                     (EFX_FARCH_FILTER_UDP_FULL >> 2));
1921        BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1922                     (EFX_FARCH_FILTER_UDP_WILD >> 2));
1923        BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1924                     (EFX_FARCH_FILTER_MAC_FULL >> 2));
1925        BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1926                     (EFX_FARCH_FILTER_MAC_WILD >> 2));
1927        BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
1928                     EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
1929        return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
1930}
1931
1932static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
1933{
1934        struct efx_farch_filter_state *state = efx->filter_state;
1935        struct efx_farch_filter_table *table;
1936        efx_oword_t filter_ctl;
1937
1938        efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1939
1940        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1941        EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1942                            table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
1943                            EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1944        EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1945                            table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
1946                            EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1947        EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1948                            table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
1949                            EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1950        EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1951                            table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
1952                            EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1953
1954        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
1955        if (table->size) {
1956                EFX_SET_OWORD_FIELD(
1957                        filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1958                        table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1959                        EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1960                EFX_SET_OWORD_FIELD(
1961                        filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1962                        table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1963                        EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1964        }
1965
1966        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
1967        if (table->size) {
1968                EFX_SET_OWORD_FIELD(
1969                        filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1970                        table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1971                EFX_SET_OWORD_FIELD(
1972                        filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1973                        !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1974                           EFX_FILTER_FLAG_RX_RSS));
1975                EFX_SET_OWORD_FIELD(
1976                        filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1977                        table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1978                EFX_SET_OWORD_FIELD(
1979                        filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1980                        !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1981                           EFX_FILTER_FLAG_RX_RSS));
1982
1983                /* There is a single bit to enable RX scatter for all
1984                 * unmatched packets.  Only set it if scatter is
1985                 * enabled in both filter specs.
1986                 */
1987                EFX_SET_OWORD_FIELD(
1988                        filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1989                        !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1990                           table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1991                           EFX_FILTER_FLAG_RX_SCATTER));
1992        } else {
1993                /* We don't expose 'default' filters because unmatched
1994                 * packets always go to the queue number found in the
1995                 * RSS table.  But we still need to set the RX scatter
1996                 * bit here.
1997                 */
1998                EFX_SET_OWORD_FIELD(
1999                        filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
2000                        efx->rx_scatter);
2001        }
2002
2003        efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
2004}
2005
2006static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
2007{
2008        struct efx_farch_filter_state *state = efx->filter_state;
2009        struct efx_farch_filter_table *table;
2010        efx_oword_t tx_cfg;
2011
2012        efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
2013
2014        table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2015        if (table->size) {
2016                EFX_SET_OWORD_FIELD(
2017                        tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
2018                        table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
2019                        EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
2020                EFX_SET_OWORD_FIELD(
2021                        tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
2022                        table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
2023                        EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
2024        }
2025
2026        efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
2027}
2028
2029static int
2030efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
2031                               const struct efx_filter_spec *gen_spec)
2032{
2033        bool is_full = false;
2034
2035        if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && gen_spec->rss_context)
2036                return -EINVAL;
2037
2038        spec->priority = gen_spec->priority;
2039        spec->flags = gen_spec->flags;
2040        spec->dmaq_id = gen_spec->dmaq_id;
2041
2042        switch (gen_spec->match_flags) {
2043        case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2044              EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
2045              EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
2046                is_full = true;
2047                fallthrough;
2048        case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2049              EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
2050                __be32 rhost, host1, host2;
2051                __be16 rport, port1, port2;
2052
2053                EFX_WARN_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
2054
2055                if (gen_spec->ether_type != htons(ETH_P_IP))
2056                        return -EPROTONOSUPPORT;
2057                if (gen_spec->loc_port == 0 ||
2058                    (is_full && gen_spec->rem_port == 0))
2059                        return -EADDRNOTAVAIL;
2060                switch (gen_spec->ip_proto) {
2061                case IPPROTO_TCP:
2062                        spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
2063                                      EFX_FARCH_FILTER_TCP_WILD);
2064                        break;
2065                case IPPROTO_UDP:
2066                        spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
2067                                      EFX_FARCH_FILTER_UDP_WILD);
2068                        break;
2069                default:
2070                        return -EPROTONOSUPPORT;
2071                }
2072
2073                /* Filter is constructed in terms of source and destination,
2074                 * with the odd wrinkle that the ports are swapped in a UDP
2075                 * wildcard filter.  We need to convert from local and remote
2076                 * (= zero for wildcard) addresses.
2077                 */
2078                rhost = is_full ? gen_spec->rem_host[0] : 0;
2079                rport = is_full ? gen_spec->rem_port : 0;
2080                host1 = rhost;
2081                host2 = gen_spec->loc_host[0];
2082                if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2083                        port1 = gen_spec->loc_port;
2084                        port2 = rport;
2085                } else {
2086                        port1 = rport;
2087                        port2 = gen_spec->loc_port;
2088                }
2089                spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2090                spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2091                spec->data[2] = ntohl(host2);
2092
2093                break;
2094        }
2095
2096        case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
2097                is_full = true;
2098                fallthrough;
2099        case EFX_FILTER_MATCH_LOC_MAC:
2100                spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
2101                              EFX_FARCH_FILTER_MAC_WILD);
2102                spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2103                spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2104                                 gen_spec->loc_mac[3] << 16 |
2105                                 gen_spec->loc_mac[4] << 8 |
2106                                 gen_spec->loc_mac[5]);
2107                spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2108                                 gen_spec->loc_mac[1]);
2109                break;
2110
2111        case EFX_FILTER_MATCH_LOC_MAC_IG:
2112                spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2113                              EFX_FARCH_FILTER_MC_DEF :
2114                              EFX_FARCH_FILTER_UC_DEF);
2115                memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2116                break;
2117
2118        default:
2119                return -EPROTONOSUPPORT;
2120        }
2121
2122        return 0;
2123}
2124
2125static void
2126efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
2127                             const struct efx_farch_filter_spec *spec)
2128{
2129        bool is_full = false;
2130
2131        /* *gen_spec should be completely initialised, to be consistent
2132         * with efx_filter_init_{rx,tx}() and in case we want to copy
2133         * it back to userland.
2134         */
2135        memset(gen_spec, 0, sizeof(*gen_spec));
2136
2137        gen_spec->priority = spec->priority;
2138        gen_spec->flags = spec->flags;
2139        gen_spec->dmaq_id = spec->dmaq_id;
2140
2141        switch (spec->type) {
2142        case EFX_FARCH_FILTER_TCP_FULL:
2143        case EFX_FARCH_FILTER_UDP_FULL:
2144                is_full = true;
2145                fallthrough;
2146        case EFX_FARCH_FILTER_TCP_WILD:
2147        case EFX_FARCH_FILTER_UDP_WILD: {
2148                __be32 host1, host2;
2149                __be16 port1, port2;
2150
2151                gen_spec->match_flags =
2152                        EFX_FILTER_MATCH_ETHER_TYPE |
2153                        EFX_FILTER_MATCH_IP_PROTO |
2154                        EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
2155                if (is_full)
2156                        gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
2157                                                  EFX_FILTER_MATCH_REM_PORT);
2158                gen_spec->ether_type = htons(ETH_P_IP);
2159                gen_spec->ip_proto =
2160                        (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
2161                         spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
2162                        IPPROTO_TCP : IPPROTO_UDP;
2163
2164                host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2165                port1 = htons(spec->data[0]);
2166                host2 = htonl(spec->data[2]);
2167                port2 = htons(spec->data[1] >> 16);
2168                if (spec->flags & EFX_FILTER_FLAG_TX) {
2169                        gen_spec->loc_host[0] = host1;
2170                        gen_spec->rem_host[0] = host2;
2171                } else {
2172                        gen_spec->loc_host[0] = host2;
2173                        gen_spec->rem_host[0] = host1;
2174                }
2175                if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
2176                    (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2177                        gen_spec->loc_port = port1;
2178                        gen_spec->rem_port = port2;
2179                } else {
2180                        gen_spec->loc_port = port2;
2181                        gen_spec->rem_port = port1;
2182                }
2183
2184                break;
2185        }
2186
2187        case EFX_FARCH_FILTER_MAC_FULL:
2188                is_full = true;
2189                fallthrough;
2190        case EFX_FARCH_FILTER_MAC_WILD:
2191                gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
2192                if (is_full)
2193                        gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
2194                gen_spec->loc_mac[0] = spec->data[2] >> 8;
2195                gen_spec->loc_mac[1] = spec->data[2];
2196                gen_spec->loc_mac[2] = spec->data[1] >> 24;
2197                gen_spec->loc_mac[3] = spec->data[1] >> 16;
2198                gen_spec->loc_mac[4] = spec->data[1] >> 8;
2199                gen_spec->loc_mac[5] = spec->data[1];
2200                gen_spec->outer_vid = htons(spec->data[0]);
2201                break;
2202
2203        case EFX_FARCH_FILTER_UC_DEF:
2204        case EFX_FARCH_FILTER_MC_DEF:
2205                gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
2206                gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
2207                break;
2208
2209        default:
2210                WARN_ON(1);
2211                break;
2212        }
2213}
2214
2215static void
2216efx_farch_filter_init_rx_auto(struct efx_nic *efx,
2217                              struct efx_farch_filter_spec *spec)
2218{
2219        /* If there's only one channel then disable RSS for non VF
2220         * traffic, thereby allowing VFs to use RSS when the PF can't.
2221         */
2222        spec->priority = EFX_FILTER_PRI_AUTO;
2223        spec->flags = (EFX_FILTER_FLAG_RX |
2224                       (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) |
2225                       (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2226        spec->dmaq_id = 0;
2227}
2228
2229/* Build a filter entry and return its n-tuple key. */
2230static u32 efx_farch_filter_build(efx_oword_t *filter,
2231                                  struct efx_farch_filter_spec *spec)
2232{
2233        u32 data3;
2234
2235        switch (efx_farch_filter_spec_table_id(spec)) {
2236        case EFX_FARCH_FILTER_TABLE_RX_IP: {
2237                bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
2238                               spec->type == EFX_FARCH_FILTER_UDP_WILD);
2239                EFX_POPULATE_OWORD_7(
2240                        *filter,
2241                        FRF_BZ_RSS_EN,
2242                        !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2243                        FRF_BZ_SCATTER_EN,
2244                        !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2245                        FRF_BZ_TCP_UDP, is_udp,
2246                        FRF_BZ_RXQ_ID, spec->dmaq_id,
2247                        EFX_DWORD_2, spec->data[2],
2248                        EFX_DWORD_1, spec->data[1],
2249                        EFX_DWORD_0, spec->data[0]);
2250                data3 = is_udp;
2251                break;
2252        }
2253
2254        case EFX_FARCH_FILTER_TABLE_RX_MAC: {
2255                bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2256                EFX_POPULATE_OWORD_7(
2257                        *filter,
2258                        FRF_CZ_RMFT_RSS_EN,
2259                        !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2260                        FRF_CZ_RMFT_SCATTER_EN,
2261                        !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2262                        FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2263                        FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2264                        FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2265                        FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2266                        FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2267                data3 = is_wild;
2268                break;
2269        }
2270
2271        case EFX_FARCH_FILTER_TABLE_TX_MAC: {
2272                bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2273                EFX_POPULATE_OWORD_5(*filter,
2274                                     FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2275                                     FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2276                                     FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2277                                     FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2278                                     FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2279                data3 = is_wild | spec->dmaq_id << 1;
2280                break;
2281        }
2282
2283        default:
2284                BUG();
2285        }
2286
2287        return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2288}
2289
2290static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
2291                                   const struct efx_farch_filter_spec *right)
2292{
2293        if (left->type != right->type ||
2294            memcmp(left->data, right->data, sizeof(left->data)))
2295                return false;
2296
2297        if (left->flags & EFX_FILTER_FLAG_TX &&
2298            left->dmaq_id != right->dmaq_id)
2299                return false;
2300
2301        return true;
2302}
2303
2304/*
2305 * Construct/deconstruct external filter IDs.  At least the RX filter
2306 * IDs must be ordered by matching priority, for RX NFC semantics.
2307 *
2308 * Deconstruction needs to be robust against invalid IDs so that
2309 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2310 * accept user-provided IDs.
2311 */
2312
2313#define EFX_FARCH_FILTER_MATCH_PRI_COUNT        5
2314
2315static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
2316        [EFX_FARCH_FILTER_TCP_FULL]     = 0,
2317        [EFX_FARCH_FILTER_UDP_FULL]     = 0,
2318        [EFX_FARCH_FILTER_TCP_WILD]     = 1,
2319        [EFX_FARCH_FILTER_UDP_WILD]     = 1,
2320        [EFX_FARCH_FILTER_MAC_FULL]     = 2,
2321        [EFX_FARCH_FILTER_MAC_WILD]     = 3,
2322        [EFX_FARCH_FILTER_UC_DEF]       = 4,
2323        [EFX_FARCH_FILTER_MC_DEF]       = 4,
2324};
2325
2326static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
2327        EFX_FARCH_FILTER_TABLE_RX_IP,   /* RX match pri 0 */
2328        EFX_FARCH_FILTER_TABLE_RX_IP,
2329        EFX_FARCH_FILTER_TABLE_RX_MAC,
2330        EFX_FARCH_FILTER_TABLE_RX_MAC,
2331        EFX_FARCH_FILTER_TABLE_RX_DEF,  /* RX match pri 4 */
2332        EFX_FARCH_FILTER_TABLE_TX_MAC,  /* TX match pri 0 */
2333        EFX_FARCH_FILTER_TABLE_TX_MAC,  /* TX match pri 1 */
2334};
2335
2336#define EFX_FARCH_FILTER_INDEX_WIDTH 13
2337#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2338
2339static inline u32
2340efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
2341                         unsigned int index)
2342{
2343        unsigned int range;
2344
2345        range = efx_farch_filter_type_match_pri[spec->type];
2346        if (!(spec->flags & EFX_FILTER_FLAG_RX))
2347                range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
2348
2349        return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
2350}
2351
2352static inline enum efx_farch_filter_table_id
2353efx_farch_filter_id_table_id(u32 id)
2354{
2355        unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
2356
2357        if (range < ARRAY_SIZE(efx_farch_filter_range_table))
2358                return efx_farch_filter_range_table[range];
2359        else
2360                return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
2361}
2362
2363static inline unsigned int efx_farch_filter_id_index(u32 id)
2364{
2365        return id & EFX_FARCH_FILTER_INDEX_MASK;
2366}
2367
2368u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
2369{
2370        struct efx_farch_filter_state *state = efx->filter_state;
2371        unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2372        enum efx_farch_filter_table_id table_id;
2373
2374        do {
2375                table_id = efx_farch_filter_range_table[range];
2376                if (state->table[table_id].size != 0)
2377                        return range << EFX_FARCH_FILTER_INDEX_WIDTH |
2378                                state->table[table_id].size;
2379        } while (range--);
2380
2381        return 0;
2382}
2383
2384s32 efx_farch_filter_insert(struct efx_nic *efx,
2385                            struct efx_filter_spec *gen_spec,
2386                            bool replace_equal)
2387{
2388        struct efx_farch_filter_state *state = efx->filter_state;
2389        struct efx_farch_filter_table *table;
2390        struct efx_farch_filter_spec spec;
2391        efx_oword_t filter;
2392        int rep_index, ins_index;
2393        unsigned int depth = 0;
2394        int rc;
2395
2396        rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
2397        if (rc)
2398                return rc;
2399
2400        down_write(&state->lock);
2401
2402        table = &state->table[efx_farch_filter_spec_table_id(&spec)];
2403        if (table->size == 0) {
2404                rc = -EINVAL;
2405                goto out_unlock;
2406        }
2407
2408        netif_vdbg(efx, hw, efx->net_dev,
2409                   "%s: type %d search_limit=%d", __func__, spec.type,
2410                   table->search_limit[spec.type]);
2411
2412        if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2413                /* One filter spec per type */
2414                BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
2415                BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
2416                             EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
2417                rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
2418                ins_index = rep_index;
2419        } else {
2420                /* Search concurrently for
2421                 * (1) a filter to be replaced (rep_index): any filter
2422                 *     with the same match values, up to the current
2423                 *     search depth for this type, and
2424                 * (2) the insertion point (ins_index): (1) or any
2425                 *     free slot before it or up to the maximum search
2426                 *     depth for this priority
2427                 * We fail if we cannot find (2).
2428                 *
2429                 * We can stop once either
2430                 * (a) we find (1), in which case we have definitely
2431                 *     found (2) as well; or
2432                 * (b) we have searched exhaustively for (1), and have
2433                 *     either found (2) or searched exhaustively for it
2434                 */
2435                u32 key = efx_farch_filter_build(&filter, &spec);
2436                unsigned int hash = efx_farch_filter_hash(key);
2437                unsigned int incr = efx_farch_filter_increment(key);
2438                unsigned int max_rep_depth = table->search_limit[spec.type];
2439                unsigned int max_ins_depth =
2440                        spec.priority <= EFX_FILTER_PRI_HINT ?
2441                        EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2442                        EFX_FARCH_FILTER_CTL_SRCH_MAX;
2443                unsigned int i = hash & (table->size - 1);
2444
2445                ins_index = -1;
2446                depth = 1;
2447
2448                for (;;) {
2449                        if (!test_bit(i, table->used_bitmap)) {
2450                                if (ins_index < 0)
2451                                        ins_index = i;
2452                        } else if (efx_farch_filter_equal(&spec,
2453                                                          &table->spec[i])) {
2454                                /* Case (a) */
2455                                if (ins_index < 0)
2456                                        ins_index = i;
2457                                rep_index = i;
2458                                break;
2459                        }
2460
2461                        if (depth >= max_rep_depth &&
2462                            (ins_index >= 0 || depth >= max_ins_depth)) {
2463                                /* Case (b) */
2464                                if (ins_index < 0) {
2465                                        rc = -EBUSY;
2466                                        goto out_unlock;
2467                                }
2468                                rep_index = -1;
2469                                break;
2470                        }
2471
2472                        i = (i + incr) & (table->size - 1);
2473                        ++depth;
2474                }
2475        }
2476
2477        /* If we found a filter to be replaced, check whether we
2478         * should do so
2479         */
2480        if (rep_index >= 0) {
2481                struct efx_farch_filter_spec *saved_spec =
2482                        &table->spec[rep_index];
2483
2484                if (spec.priority == saved_spec->priority && !replace_equal) {
2485                        rc = -EEXIST;
2486                        goto out_unlock;
2487                }
2488                if (spec.priority < saved_spec->priority) {
2489                        rc = -EPERM;
2490                        goto out_unlock;
2491                }
2492                if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
2493                    saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
2494                        spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
2495        }
2496
2497        /* Insert the filter */
2498        if (ins_index != rep_index) {
2499                __set_bit(ins_index, table->used_bitmap);
2500                ++table->used;
2501        }
2502        table->spec[ins_index] = spec;
2503
2504        if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2505                efx_farch_filter_push_rx_config(efx);
2506        } else {
2507                if (table->search_limit[spec.type] < depth) {
2508                        table->search_limit[spec.type] = depth;
2509                        if (spec.flags & EFX_FILTER_FLAG_TX)
2510                                efx_farch_filter_push_tx_limits(efx);
2511                        else
2512                                efx_farch_filter_push_rx_config(efx);
2513                }
2514
2515                efx_writeo(efx, &filter,
2516                           table->offset + table->step * ins_index);
2517
2518                /* If we were able to replace a filter by inserting
2519                 * at a lower depth, clear the replaced filter
2520                 */
2521                if (ins_index != rep_index && rep_index >= 0)
2522                        efx_farch_filter_table_clear_entry(efx, table,
2523                                                           rep_index);
2524        }
2525
2526        netif_vdbg(efx, hw, efx->net_dev,
2527                   "%s: filter type %d index %d rxq %u set",
2528                   __func__, spec.type, ins_index, spec.dmaq_id);
2529        rc = efx_farch_filter_make_id(&spec, ins_index);
2530
2531out_unlock:
2532        up_write(&state->lock);
2533        return rc;
2534}
2535
2536static void
2537efx_farch_filter_table_clear_entry(struct efx_nic *efx,
2538                                   struct efx_farch_filter_table *table,
2539                                   unsigned int filter_idx)
2540{
2541        static efx_oword_t filter;
2542
2543        EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2544        BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2545
2546        __clear_bit(filter_idx, table->used_bitmap);
2547        --table->used;
2548        memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2549
2550        efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
2551
2552        /* If this filter required a greater search depth than
2553         * any other, the search limit for its type can now be
2554         * decreased.  However, it is hard to determine that
2555         * unless the table has become completely empty - in
2556         * which case, all its search limits can be set to 0.
2557         */
2558        if (unlikely(table->used == 0)) {
2559                memset(table->search_limit, 0, sizeof(table->search_limit));
2560                if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
2561                        efx_farch_filter_push_tx_limits(efx);
2562                else
2563                        efx_farch_filter_push_rx_config(efx);
2564        }
2565}
2566
2567static int efx_farch_filter_remove(struct efx_nic *efx,
2568                                   struct efx_farch_filter_table *table,
2569                                   unsigned int filter_idx,
2570                                   enum efx_filter_priority priority)
2571{
2572        struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
2573
2574        if (!test_bit(filter_idx, table->used_bitmap) ||
2575            spec->priority != priority)
2576                return -ENOENT;
2577
2578        if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
2579                efx_farch_filter_init_rx_auto(efx, spec);
2580                efx_farch_filter_push_rx_config(efx);
2581        } else {
2582                efx_farch_filter_table_clear_entry(efx, table, filter_idx);
2583        }
2584
2585        return 0;
2586}
2587
2588int efx_farch_filter_remove_safe(struct efx_nic *efx,
2589                                 enum efx_filter_priority priority,
2590                                 u32 filter_id)
2591{
2592        struct efx_farch_filter_state *state = efx->filter_state;
2593        enum efx_farch_filter_table_id table_id;
2594        struct efx_farch_filter_table *table;
2595        unsigned int filter_idx;
2596        int rc;
2597
2598        table_id = efx_farch_filter_id_table_id(filter_id);
2599        if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2600                return -ENOENT;
2601        table = &state->table[table_id];
2602
2603        filter_idx = efx_farch_filter_id_index(filter_id);
2604        if (filter_idx >= table->size)
2605                return -ENOENT;
2606        down_write(&state->lock);
2607
2608        rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
2609        up_write(&state->lock);
2610
2611        return rc;
2612}
2613
2614int efx_farch_filter_get_safe(struct efx_nic *efx,
2615                              enum efx_filter_priority priority,
2616                              u32 filter_id, struct efx_filter_spec *spec_buf)
2617{
2618        struct efx_farch_filter_state *state = efx->filter_state;
2619        enum efx_farch_filter_table_id table_id;
2620        struct efx_farch_filter_table *table;
2621        struct efx_farch_filter_spec *spec;
2622        unsigned int filter_idx;
2623        int rc = -ENOENT;
2624
2625        down_read(&state->lock);
2626
2627        table_id = efx_farch_filter_id_table_id(filter_id);
2628        if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2629                goto out_unlock;
2630        table = &state->table[table_id];
2631
2632        filter_idx = efx_farch_filter_id_index(filter_id);
2633        if (filter_idx >= table->size)
2634                goto out_unlock;
2635        spec = &table->spec[filter_idx];
2636
2637        if (test_bit(filter_idx, table->used_bitmap) &&
2638            spec->priority == priority) {
2639                efx_farch_filter_to_gen_spec(spec_buf, spec);
2640                rc = 0;
2641        }
2642
2643out_unlock:
2644        up_read(&state->lock);
2645        return rc;
2646}
2647
2648static void
2649efx_farch_filter_table_clear(struct efx_nic *efx,
2650                             enum efx_farch_filter_table_id table_id,
2651                             enum efx_filter_priority priority)
2652{
2653        struct efx_farch_filter_state *state = efx->filter_state;
2654        struct efx_farch_filter_table *table = &state->table[table_id];
2655        unsigned int filter_idx;
2656
2657        down_write(&state->lock);
2658        for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
2659                if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
2660                        efx_farch_filter_remove(efx, table,
2661                                                filter_idx, priority);
2662        }
2663        up_write(&state->lock);
2664}
2665
2666int efx_farch_filter_clear_rx(struct efx_nic *efx,
2667                               enum efx_filter_priority priority)
2668{
2669        efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
2670                                     priority);
2671        efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
2672                                     priority);
2673        efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
2674                                     priority);
2675        return 0;
2676}
2677
2678u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
2679                                   enum efx_filter_priority priority)
2680{
2681        struct efx_farch_filter_state *state = efx->filter_state;
2682        enum efx_farch_filter_table_id table_id;
2683        struct efx_farch_filter_table *table;
2684        unsigned int filter_idx;
2685        u32 count = 0;
2686
2687        down_read(&state->lock);
2688
2689        for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2690             table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2691             table_id++) {
2692                table = &state->table[table_id];
2693                for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2694                        if (test_bit(filter_idx, table->used_bitmap) &&
2695                            table->spec[filter_idx].priority == priority)
2696                                ++count;
2697                }
2698        }
2699
2700        up_read(&state->lock);
2701
2702        return count;
2703}
2704
2705s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
2706                                enum efx_filter_priority priority,
2707                                u32 *buf, u32 size)
2708{
2709        struct efx_farch_filter_state *state = efx->filter_state;
2710        enum efx_farch_filter_table_id table_id;
2711        struct efx_farch_filter_table *table;
2712        unsigned int filter_idx;
2713        s32 count = 0;
2714
2715        down_read(&state->lock);
2716
2717        for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2718             table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2719             table_id++) {
2720                table = &state->table[table_id];
2721                for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2722                        if (test_bit(filter_idx, table->used_bitmap) &&
2723                            table->spec[filter_idx].priority == priority) {
2724                                if (count == size) {
2725                                        count = -EMSGSIZE;
2726                                        goto out;
2727                                }
2728                                buf[count++] = efx_farch_filter_make_id(
2729                                        &table->spec[filter_idx], filter_idx);
2730                        }
2731                }
2732        }
2733out:
2734        up_read(&state->lock);
2735
2736        return count;
2737}
2738
2739/* Restore filter stater after reset */
2740void efx_farch_filter_table_restore(struct efx_nic *efx)
2741{
2742        struct efx_farch_filter_state *state = efx->filter_state;
2743        enum efx_farch_filter_table_id table_id;
2744        struct efx_farch_filter_table *table;
2745        efx_oword_t filter;
2746        unsigned int filter_idx;
2747
2748        down_write(&state->lock);
2749
2750        for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2751                table = &state->table[table_id];
2752
2753                /* Check whether this is a regular register table */
2754                if (table->step == 0)
2755                        continue;
2756
2757                for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2758                        if (!test_bit(filter_idx, table->used_bitmap))
2759                                continue;
2760                        efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2761                        efx_writeo(efx, &filter,
2762                                   table->offset + table->step * filter_idx);
2763                }
2764        }
2765
2766        efx_farch_filter_push_rx_config(efx);
2767        efx_farch_filter_push_tx_limits(efx);
2768
2769        up_write(&state->lock);
2770}
2771
2772void efx_farch_filter_table_remove(struct efx_nic *efx)
2773{
2774        struct efx_farch_filter_state *state = efx->filter_state;
2775        enum efx_farch_filter_table_id table_id;
2776
2777        for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2778                kfree(state->table[table_id].used_bitmap);
2779                vfree(state->table[table_id].spec);
2780        }
2781        kfree(state);
2782}
2783
2784int efx_farch_filter_table_probe(struct efx_nic *efx)
2785{
2786        struct efx_farch_filter_state *state;
2787        struct efx_farch_filter_table *table;
2788        unsigned table_id;
2789
2790        state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
2791        if (!state)
2792                return -ENOMEM;
2793        efx->filter_state = state;
2794        init_rwsem(&state->lock);
2795
2796        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2797        table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
2798        table->offset = FR_BZ_RX_FILTER_TBL0;
2799        table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2800        table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2801
2802        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
2803        table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
2804        table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
2805        table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
2806        table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
2807
2808        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2809        table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
2810        table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
2811
2812        table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2813        table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
2814        table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
2815        table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
2816        table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
2817
2818        for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2819                table = &state->table[table_id];
2820                if (table->size == 0)
2821                        continue;
2822                table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2823                                             sizeof(unsigned long),
2824                                             GFP_KERNEL);
2825                if (!table->used_bitmap)
2826                        goto fail;
2827                table->spec = vzalloc(array_size(sizeof(*table->spec),
2828                                                 table->size));
2829                if (!table->spec)
2830                        goto fail;
2831        }
2832
2833        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2834        if (table->size) {
2835                /* RX default filters must always exist */
2836                struct efx_farch_filter_spec *spec;
2837                unsigned i;
2838
2839                for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
2840                        spec = &table->spec[i];
2841                        spec->type = EFX_FARCH_FILTER_UC_DEF + i;
2842                        efx_farch_filter_init_rx_auto(efx, spec);
2843                        __set_bit(i, table->used_bitmap);
2844                }
2845        }
2846
2847        efx_farch_filter_push_rx_config(efx);
2848
2849        return 0;
2850
2851fail:
2852        efx_farch_filter_table_remove(efx);
2853        return -ENOMEM;
2854}
2855
2856/* Update scatter enable flags for filters pointing to our own RX queues */
2857void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
2858{
2859        struct efx_farch_filter_state *state = efx->filter_state;
2860        enum efx_farch_filter_table_id table_id;
2861        struct efx_farch_filter_table *table;
2862        efx_oword_t filter;
2863        unsigned int filter_idx;
2864
2865        down_write(&state->lock);
2866
2867        for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2868             table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2869             table_id++) {
2870                table = &state->table[table_id];
2871
2872                for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2873                        if (!test_bit(filter_idx, table->used_bitmap) ||
2874                            table->spec[filter_idx].dmaq_id >=
2875                            efx->n_rx_channels)
2876                                continue;
2877
2878                        if (efx->rx_scatter)
2879                                table->spec[filter_idx].flags |=
2880                                        EFX_FILTER_FLAG_RX_SCATTER;
2881                        else
2882                                table->spec[filter_idx].flags &=
2883                                        ~EFX_FILTER_FLAG_RX_SCATTER;
2884
2885                        if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
2886                                /* Pushed by efx_farch_filter_push_rx_config() */
2887                                continue;
2888
2889                        efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2890                        efx_writeo(efx, &filter,
2891                                   table->offset + table->step * filter_idx);
2892                }
2893        }
2894
2895        efx_farch_filter_push_rx_config(efx);
2896
2897        up_write(&state->lock);
2898}
2899
2900#ifdef CONFIG_RFS_ACCEL
2901
2902bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2903                                     unsigned int index)
2904{
2905        struct efx_farch_filter_state *state = efx->filter_state;
2906        struct efx_farch_filter_table *table;
2907        bool ret = false, force = false;
2908        u16 arfs_id;
2909
2910        down_write(&state->lock);
2911        spin_lock_bh(&efx->rps_hash_lock);
2912        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2913        if (test_bit(index, table->used_bitmap) &&
2914            table->spec[index].priority == EFX_FILTER_PRI_HINT) {
2915                struct efx_arfs_rule *rule = NULL;
2916                struct efx_filter_spec spec;
2917
2918                efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
2919                if (!efx->rps_hash_table) {
2920                        /* In the absence of the table, we always returned 0 to
2921                         * ARFS, so use the same to query it.
2922                         */
2923                        arfs_id = 0;
2924                } else {
2925                        rule = efx_rps_hash_find(efx, &spec);
2926                        if (!rule) {
2927                                /* ARFS table doesn't know of this filter, remove it */
2928                                force = true;
2929                        } else {
2930                                arfs_id = rule->arfs_id;
2931                                if (!efx_rps_check_rule(rule, index, &force))
2932                                        goto out_unlock;
2933                        }
2934                }
2935                if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
2936                                                 flow_id, arfs_id)) {
2937                        if (rule)
2938                                rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
2939                        efx_rps_hash_del(efx, &spec);
2940                        efx_farch_filter_table_clear_entry(efx, table, index);
2941                        ret = true;
2942                }
2943        }
2944out_unlock:
2945        spin_unlock_bh(&efx->rps_hash_lock);
2946        up_write(&state->lock);
2947        return ret;
2948}
2949
2950#endif /* CONFIG_RFS_ACCEL */
2951
2952void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
2953{
2954        struct net_device *net_dev = efx->net_dev;
2955        struct netdev_hw_addr *ha;
2956        union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2957        u32 crc;
2958        int bit;
2959
2960        if (!efx_dev_registered(efx))
2961                return;
2962
2963        netif_addr_lock_bh(net_dev);
2964
2965        efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2966
2967        /* Build multicast hash table */
2968        if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2969                memset(mc_hash, 0xff, sizeof(*mc_hash));
2970        } else {
2971                memset(mc_hash, 0x00, sizeof(*mc_hash));
2972                netdev_for_each_mc_addr(ha, net_dev) {
2973                        crc = ether_crc_le(ETH_ALEN, ha->addr);
2974                        bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2975                        __set_bit_le(bit, mc_hash);
2976                }
2977
2978                /* Broadcast packets go through the multicast hash filter.
2979                 * ether_crc_le() of the broadcast address is 0xbe2612ff
2980                 * so we always add bit 0xff to the mask.
2981                 */
2982                __set_bit_le(0xff, mc_hash);
2983        }
2984
2985        netif_addr_unlock_bh(net_dev);
2986}
2987