linux/drivers/net/ethernet/qlogic/qed/qed_int.c
<<
>>
Prefs
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/types.h>
  34#include <asm/byteorder.h>
  35#include <linux/io.h>
  36#include <linux/bitops.h>
  37#include <linux/delay.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/errno.h>
  40#include <linux/interrupt.h>
  41#include <linux/kernel.h>
  42#include <linux/pci.h>
  43#include <linux/slab.h>
  44#include <linux/string.h>
  45#include "qed.h"
  46#include "qed_hsi.h"
  47#include "qed_hw.h"
  48#include "qed_init_ops.h"
  49#include "qed_int.h"
  50#include "qed_mcp.h"
  51#include "qed_reg_addr.h"
  52#include "qed_sp.h"
  53#include "qed_sriov.h"
  54#include "qed_vf.h"
  55
  56struct qed_pi_info {
  57        qed_int_comp_cb_t       comp_cb;
  58        void                    *cookie;
  59};
  60
  61struct qed_sb_sp_info {
  62        struct qed_sb_info      sb_info;
  63
  64        /* per protocol index data */
  65        struct qed_pi_info      pi_info_arr[PIS_PER_SB];
  66};
  67
  68enum qed_attention_type {
  69        QED_ATTN_TYPE_ATTN,
  70        QED_ATTN_TYPE_PARITY,
  71};
  72
  73#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
  74        ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
  75
  76struct aeu_invert_reg_bit {
  77        char bit_name[30];
  78
  79#define ATTENTION_PARITY                (1 << 0)
  80
  81#define ATTENTION_LENGTH_MASK           (0x00000ff0)
  82#define ATTENTION_LENGTH_SHIFT          (4)
  83#define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
  84                                         ATTENTION_LENGTH_SHIFT)
  85#define ATTENTION_SINGLE                (1 << ATTENTION_LENGTH_SHIFT)
  86#define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
  87#define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
  88                                         ATTENTION_PARITY)
  89
  90/* Multiple bits start with this offset */
  91#define ATTENTION_OFFSET_MASK           (0x000ff000)
  92#define ATTENTION_OFFSET_SHIFT          (12)
  93
  94#define ATTENTION_BB_MASK               (0x00700000)
  95#define ATTENTION_BB_SHIFT              (20)
  96#define ATTENTION_BB(value)             (value << ATTENTION_BB_SHIFT)
  97#define ATTENTION_BB_DIFFERENT          BIT(23)
  98
  99        unsigned int flags;
 100
 101        /* Callback to call if attention will be triggered */
 102        int (*cb)(struct qed_hwfn *p_hwfn);
 103
 104        enum block_id block_index;
 105};
 106
 107struct aeu_invert_reg {
 108        struct aeu_invert_reg_bit bits[32];
 109};
 110
 111#define MAX_ATTN_GRPS           (8)
 112#define NUM_ATTN_REGS           (9)
 113
 114/* Specific HW attention callbacks */
 115static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
 116{
 117        u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
 118
 119        /* This might occur on certain instances; Log it once then mask it */
 120        DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
 121                tmp);
 122        qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
 123               0xffffffff);
 124
 125        return 0;
 126}
 127
 128#define QED_PSWHST_ATTENTION_INCORRECT_ACCESS           (0x1)
 129#define ATTENTION_INCORRECT_ACCESS_WR_MASK              (0x1)
 130#define ATTENTION_INCORRECT_ACCESS_WR_SHIFT             (0)
 131#define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK          (0xf)
 132#define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT         (1)
 133#define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK        (0x1)
 134#define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT       (5)
 135#define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK           (0xff)
 136#define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT          (6)
 137#define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK           (0xf)
 138#define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT          (14)
 139#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK         (0xff)
 140#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT        (18)
 141static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
 142{
 143        u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 144                         PSWHST_REG_INCORRECT_ACCESS_VALID);
 145
 146        if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
 147                u32 addr, data, length;
 148
 149                addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 150                              PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
 151                data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 152                              PSWHST_REG_INCORRECT_ACCESS_DATA);
 153                length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 154                                PSWHST_REG_INCORRECT_ACCESS_LENGTH);
 155
 156                DP_INFO(p_hwfn->cdev,
 157                        "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
 158                        addr, length,
 159                        (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
 160                        (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
 161                        (u8) GET_FIELD(data,
 162                                       ATTENTION_INCORRECT_ACCESS_VF_VALID),
 163                        (u8) GET_FIELD(data,
 164                                       ATTENTION_INCORRECT_ACCESS_CLIENT),
 165                        (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
 166                        (u8) GET_FIELD(data,
 167                                       ATTENTION_INCORRECT_ACCESS_BYTE_EN),
 168                        data);
 169        }
 170
 171        return 0;
 172}
 173
 174#define QED_GRC_ATTENTION_VALID_BIT     (1 << 0)
 175#define QED_GRC_ATTENTION_ADDRESS_MASK  (0x7fffff)
 176#define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
 177#define QED_GRC_ATTENTION_RDWR_BIT      (1 << 23)
 178#define QED_GRC_ATTENTION_MASTER_MASK   (0xf)
 179#define QED_GRC_ATTENTION_MASTER_SHIFT  (24)
 180#define QED_GRC_ATTENTION_PF_MASK       (0xf)
 181#define QED_GRC_ATTENTION_PF_SHIFT      (0)
 182#define QED_GRC_ATTENTION_VF_MASK       (0xff)
 183#define QED_GRC_ATTENTION_VF_SHIFT      (4)
 184#define QED_GRC_ATTENTION_PRIV_MASK     (0x3)
 185#define QED_GRC_ATTENTION_PRIV_SHIFT    (14)
 186#define QED_GRC_ATTENTION_PRIV_VF       (0)
 187static const char *attn_master_to_str(u8 master)
 188{
 189        switch (master) {
 190        case 1: return "PXP";
 191        case 2: return "MCP";
 192        case 3: return "MSDM";
 193        case 4: return "PSDM";
 194        case 5: return "YSDM";
 195        case 6: return "USDM";
 196        case 7: return "TSDM";
 197        case 8: return "XSDM";
 198        case 9: return "DBU";
 199        case 10: return "DMAE";
 200        default:
 201                return "Unknown";
 202        }
 203}
 204
 205static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
 206{
 207        u32 tmp, tmp2;
 208
 209        /* We've already cleared the timeout interrupt register, so we learn
 210         * of interrupts via the validity register
 211         */
 212        tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 213                     GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
 214        if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
 215                goto out;
 216
 217        /* Read the GRC timeout information */
 218        tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 219                     GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
 220        tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 221                      GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
 222
 223        DP_INFO(p_hwfn->cdev,
 224                "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
 225                tmp2, tmp,
 226                (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
 227                GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
 228                attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
 229                GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
 230                (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
 231                 QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
 232                GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
 233
 234out:
 235        /* Regardles of anything else, clean the validity bit */
 236        qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
 237               GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
 238        return 0;
 239}
 240
 241#define PGLUE_ATTENTION_VALID                   (1 << 29)
 242#define PGLUE_ATTENTION_RD_VALID                (1 << 26)
 243#define PGLUE_ATTENTION_DETAILS_PFID_MASK       (0xf)
 244#define PGLUE_ATTENTION_DETAILS_PFID_SHIFT      (20)
 245#define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK   (0x1)
 246#define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT  (19)
 247#define PGLUE_ATTENTION_DETAILS_VFID_MASK       (0xff)
 248#define PGLUE_ATTENTION_DETAILS_VFID_SHIFT      (24)
 249#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK   (0x1)
 250#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT  (21)
 251#define PGLUE_ATTENTION_DETAILS2_BME_MASK       (0x1)
 252#define PGLUE_ATTENTION_DETAILS2_BME_SHIFT      (22)
 253#define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK    (0x1)
 254#define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT   (23)
 255#define PGLUE_ATTENTION_ICPL_VALID              (1 << 23)
 256#define PGLUE_ATTENTION_ZLR_VALID               (1 << 25)
 257#define PGLUE_ATTENTION_ILT_VALID               (1 << 23)
 258static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn)
 259{
 260        u32 tmp;
 261
 262        tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 263                     PGLUE_B_REG_TX_ERR_WR_DETAILS2);
 264        if (tmp & PGLUE_ATTENTION_VALID) {
 265                u32 addr_lo, addr_hi, details;
 266
 267                addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 268                                 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
 269                addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 270                                 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
 271                details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 272                                 PGLUE_B_REG_TX_ERR_WR_DETAILS);
 273
 274                DP_INFO(p_hwfn,
 275                        "Illegal write by chip to [%08x:%08x] blocked.\n"
 276                        "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
 277                        "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
 278                        addr_hi, addr_lo, details,
 279                        (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
 280                        (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
 281                        GET_FIELD(details,
 282                                  PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
 283                        tmp,
 284                        GET_FIELD(tmp,
 285                                  PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
 286                        GET_FIELD(tmp,
 287                                  PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
 288                        GET_FIELD(tmp,
 289                                  PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
 290        }
 291
 292        tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 293                     PGLUE_B_REG_TX_ERR_RD_DETAILS2);
 294        if (tmp & PGLUE_ATTENTION_RD_VALID) {
 295                u32 addr_lo, addr_hi, details;
 296
 297                addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 298                                 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
 299                addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 300                                 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
 301                details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 302                                 PGLUE_B_REG_TX_ERR_RD_DETAILS);
 303
 304                DP_INFO(p_hwfn,
 305                        "Illegal read by chip from [%08x:%08x] blocked.\n"
 306                        " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
 307                        " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
 308                        addr_hi, addr_lo, details,
 309                        (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
 310                        (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
 311                        GET_FIELD(details,
 312                                  PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
 313                        tmp,
 314                        GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
 315                                                                         : 0,
 316                        GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
 317                        GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
 318                                                                        : 0);
 319        }
 320
 321        tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 322                     PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
 323        if (tmp & PGLUE_ATTENTION_ICPL_VALID)
 324                DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp);
 325
 326        tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 327                     PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
 328        if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
 329                u32 addr_hi, addr_lo;
 330
 331                addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 332                                 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
 333                addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 334                                 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
 335
 336                DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n",
 337                        tmp, addr_hi, addr_lo);
 338        }
 339
 340        tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 341                     PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
 342        if (tmp & PGLUE_ATTENTION_ILT_VALID) {
 343                u32 addr_hi, addr_lo, details;
 344
 345                addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 346                                 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
 347                addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 348                                 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
 349                details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 350                                 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
 351
 352                DP_INFO(p_hwfn,
 353                        "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
 354                        details, tmp, addr_hi, addr_lo);
 355        }
 356
 357        /* Clear the indications */
 358        qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
 359               PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
 360
 361        return 0;
 362}
 363
 364#define QED_DORQ_ATTENTION_REASON_MASK  (0xfffff)
 365#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
 366#define QED_DORQ_ATTENTION_SIZE_MASK    (0x7f)
 367#define QED_DORQ_ATTENTION_SIZE_SHIFT   (16)
 368static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
 369{
 370        u32 reason;
 371
 372        reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
 373                        QED_DORQ_ATTENTION_REASON_MASK;
 374        if (reason) {
 375                u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 376                                     DORQ_REG_DB_DROP_DETAILS);
 377
 378                DP_INFO(p_hwfn->cdev,
 379                        "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
 380                        qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 381                               DORQ_REG_DB_DROP_DETAILS_ADDRESS),
 382                        (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
 383                        GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
 384                        reason);
 385        }
 386
 387        return -EINVAL;
 388}
 389
 390/* Instead of major changes to the data-structure, we have a some 'special'
 391 * identifiers for sources that changed meaning between adapters.
 392 */
 393enum aeu_invert_reg_special_type {
 394        AEU_INVERT_REG_SPECIAL_CNIG_0,
 395        AEU_INVERT_REG_SPECIAL_CNIG_1,
 396        AEU_INVERT_REG_SPECIAL_CNIG_2,
 397        AEU_INVERT_REG_SPECIAL_CNIG_3,
 398        AEU_INVERT_REG_SPECIAL_MAX,
 399};
 400
 401static struct aeu_invert_reg_bit
 402aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
 403        {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
 404        {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
 405        {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
 406        {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
 407};
 408
 409/* Notice aeu_invert_reg must be defined in the same order of bits as HW;  */
 410static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
 411        {
 412                {       /* After Invert 1 */
 413                        {"GPIO0 function%d",
 414                         (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
 415                }
 416        },
 417
 418        {
 419                {       /* After Invert 2 */
 420                        {"PGLUE config_space", ATTENTION_SINGLE,
 421                         NULL, MAX_BLOCK_ID},
 422                        {"PGLUE misc_flr", ATTENTION_SINGLE,
 423                         NULL, MAX_BLOCK_ID},
 424                        {"PGLUE B RBC", ATTENTION_PAR_INT,
 425                         qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B},
 426                        {"PGLUE misc_mctp", ATTENTION_SINGLE,
 427                         NULL, MAX_BLOCK_ID},
 428                        {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
 429                        {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
 430                        {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
 431                        {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
 432                                          (1 << ATTENTION_OFFSET_SHIFT),
 433                         NULL, MAX_BLOCK_ID},
 434                        {"PCIE glue/PXP VPD %d",
 435                         (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
 436                }
 437        },
 438
 439        {
 440                {       /* After Invert 3 */
 441                        {"General Attention %d",
 442                         (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
 443                }
 444        },
 445
 446        {
 447                {       /* After Invert 4 */
 448                        {"General Attention 32", ATTENTION_SINGLE,
 449                         NULL, MAX_BLOCK_ID},
 450                        {"General Attention %d",
 451                         (2 << ATTENTION_LENGTH_SHIFT) |
 452                         (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
 453                        {"General Attention 35", ATTENTION_SINGLE,
 454                         NULL, MAX_BLOCK_ID},
 455                        {"NWS Parity",
 456                         ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
 457                         ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
 458                         NULL, BLOCK_NWS},
 459                        {"NWS Interrupt",
 460                         ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
 461                         ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
 462                         NULL, BLOCK_NWS},
 463                        {"NWM Parity",
 464                         ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
 465                         ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
 466                         NULL, BLOCK_NWM},
 467                        {"NWM Interrupt",
 468                         ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
 469                         ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
 470                         NULL, BLOCK_NWM},
 471                        {"MCP CPU", ATTENTION_SINGLE,
 472                         qed_mcp_attn_cb, MAX_BLOCK_ID},
 473                        {"MCP Watchdog timer", ATTENTION_SINGLE,
 474                         NULL, MAX_BLOCK_ID},
 475                        {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
 476                        {"AVS stop status ready", ATTENTION_SINGLE,
 477                         NULL, MAX_BLOCK_ID},
 478                        {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
 479                        {"MSTAT per-path", ATTENTION_PAR_INT,
 480                         NULL, MAX_BLOCK_ID},
 481                        {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
 482                         NULL, MAX_BLOCK_ID},
 483                        {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
 484                        {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
 485                        {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB},
 486                        {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB},
 487                        {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS},
 488                }
 489        },
 490
 491        {
 492                {       /* After Invert 5 */
 493                        {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
 494                        {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
 495                        {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
 496                        {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
 497                        {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
 498                        {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
 499                        {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
 500                        {"MCM",  ATTENTION_PAR_INT, NULL, BLOCK_MCM},
 501                        {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
 502                        {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
 503                        {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
 504                        {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
 505                        {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
 506                        {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
 507                        {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
 508                        {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
 509                }
 510        },
 511
 512        {
 513                {       /* After Invert 6 */
 514                        {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
 515                        {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
 516                        {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
 517                        {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM},
 518                        {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
 519                        {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
 520                        {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM},
 521                        {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
 522                        {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
 523                        {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
 524                        {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
 525                        {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
 526                        {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
 527                        {"DORQ", ATTENTION_PAR_INT,
 528                         qed_dorq_attn_cb, BLOCK_DORQ},
 529                        {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
 530                        {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC},
 531                }
 532        },
 533
 534        {
 535                {       /* After Invert 7 */
 536                        {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
 537                        {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
 538                        {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
 539                        {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
 540                        {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
 541                        {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
 542                        {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
 543                        {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
 544                        {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
 545                        {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
 546                        {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
 547                        {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
 548                        {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
 549                        {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
 550                        {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
 551                        {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
 552                        {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
 553                }
 554        },
 555
 556        {
 557                {       /* After Invert 8 */
 558                        {"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
 559                         NULL, BLOCK_PSWRQ2},
 560                        {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
 561                        {"PSWWR (pci_clk)", ATTENTION_PAR_INT,
 562                         NULL, BLOCK_PSWWR2},
 563                        {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
 564                        {"PSWRD (pci_clk)", ATTENTION_PAR_INT,
 565                         NULL, BLOCK_PSWRD2},
 566                        {"PSWHST", ATTENTION_PAR_INT,
 567                         qed_pswhst_attn_cb, BLOCK_PSWHST},
 568                        {"PSWHST (pci_clk)", ATTENTION_PAR_INT,
 569                         NULL, BLOCK_PSWHST2},
 570                        {"GRC", ATTENTION_PAR_INT,
 571                         qed_grc_attn_cb, BLOCK_GRC},
 572                        {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
 573                        {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
 574                        {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
 575                        {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
 576                        {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
 577                        {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
 578                        {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
 579                        {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
 580                        {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
 581                        {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
 582                         NULL, BLOCK_PGLCS},
 583                        {"PERST_B assertion", ATTENTION_SINGLE,
 584                         NULL, MAX_BLOCK_ID},
 585                        {"PERST_B deassertion", ATTENTION_SINGLE,
 586                         NULL, MAX_BLOCK_ID},
 587                        {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
 588                         NULL, MAX_BLOCK_ID},
 589                }
 590        },
 591
 592        {
 593                {       /* After Invert 9 */
 594                        {"MCP Latched memory", ATTENTION_PAR,
 595                         NULL, MAX_BLOCK_ID},
 596                        {"MCP Latched scratchpad cache", ATTENTION_SINGLE,
 597                         NULL, MAX_BLOCK_ID},
 598                        {"MCP Latched ump_tx", ATTENTION_PAR,
 599                         NULL, MAX_BLOCK_ID},
 600                        {"MCP Latched scratchpad", ATTENTION_PAR,
 601                         NULL, MAX_BLOCK_ID},
 602                        {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
 603                         NULL, MAX_BLOCK_ID},
 604                }
 605        },
 606};
 607
 608static struct aeu_invert_reg_bit *
 609qed_int_aeu_translate(struct qed_hwfn *p_hwfn,
 610                      struct aeu_invert_reg_bit *p_bit)
 611{
 612        if (!QED_IS_BB(p_hwfn->cdev))
 613                return p_bit;
 614
 615        if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
 616                return p_bit;
 617
 618        return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
 619                                  ATTENTION_BB_SHIFT];
 620}
 621
 622static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn,
 623                                   struct aeu_invert_reg_bit *p_bit)
 624{
 625        return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags &
 626                   ATTENTION_PARITY);
 627}
 628
 629#define ATTN_STATE_BITS         (0xfff)
 630#define ATTN_BITS_MASKABLE      (0x3ff)
 631struct qed_sb_attn_info {
 632        /* Virtual & Physical address of the SB */
 633        struct atten_status_block       *sb_attn;
 634        dma_addr_t                      sb_phys;
 635
 636        /* Last seen running index */
 637        u16                             index;
 638
 639        /* A mask of the AEU bits resulting in a parity error */
 640        u32                             parity_mask[NUM_ATTN_REGS];
 641
 642        /* A pointer to the attention description structure */
 643        struct aeu_invert_reg           *p_aeu_desc;
 644
 645        /* Previously asserted attentions, which are still unasserted */
 646        u16                             known_attn;
 647
 648        /* Cleanup address for the link's general hw attention */
 649        u32                             mfw_attn_addr;
 650};
 651
 652static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
 653                                      struct qed_sb_attn_info *p_sb_desc)
 654{
 655        u16 rc = 0, index;
 656
 657        /* Make certain HW write took affect */
 658        mmiowb();
 659
 660        index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
 661        if (p_sb_desc->index != index) {
 662                p_sb_desc->index        = index;
 663                rc                    = QED_SB_ATT_IDX;
 664        }
 665
 666        /* Make certain we got a consistent view with HW */
 667        mmiowb();
 668
 669        return rc;
 670}
 671
 672/**
 673 *  @brief qed_int_assertion - handles asserted attention bits
 674 *
 675 *  @param p_hwfn
 676 *  @param asserted_bits newly asserted bits
 677 *  @return int
 678 */
 679static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
 680{
 681        struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
 682        u32 igu_mask;
 683
 684        /* Mask the source of the attention in the IGU */
 685        igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
 686        DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
 687                   igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
 688        igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
 689        qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
 690
 691        DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
 692                   "inner known ATTN state: 0x%04x --> 0x%04x\n",
 693                   sb_attn_sw->known_attn,
 694                   sb_attn_sw->known_attn | asserted_bits);
 695        sb_attn_sw->known_attn |= asserted_bits;
 696
 697        /* Handle MCP events */
 698        if (asserted_bits & 0x100) {
 699                qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
 700                /* Clean the MCP attention */
 701                qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
 702                       sb_attn_sw->mfw_attn_addr, 0);
 703        }
 704
 705        DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
 706                      GTT_BAR0_MAP_REG_IGU_CMD +
 707                      ((IGU_CMD_ATTN_BIT_SET_UPPER -
 708                        IGU_CMD_INT_ACK_BASE) << 3),
 709                      (u32)asserted_bits);
 710
 711        DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
 712                   asserted_bits);
 713
 714        return 0;
 715}
 716
 717static void qed_int_attn_print(struct qed_hwfn *p_hwfn,
 718                               enum block_id id,
 719                               enum dbg_attn_type type, bool b_clear)
 720{
 721        struct dbg_attn_block_result attn_results;
 722        enum dbg_status status;
 723
 724        memset(&attn_results, 0, sizeof(attn_results));
 725
 726        status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
 727                                   b_clear, &attn_results);
 728        if (status != DBG_STATUS_OK)
 729                DP_NOTICE(p_hwfn,
 730                          "Failed to parse attention information [status: %s]\n",
 731                          qed_dbg_get_status_str(status));
 732        else
 733                qed_dbg_parse_attn(p_hwfn, &attn_results);
 734}
 735
 736/**
 737 * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
 738 * cause of the attention
 739 *
 740 * @param p_hwfn
 741 * @param p_aeu - descriptor of an AEU bit which caused the attention
 742 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
 743 *  this bit to this group.
 744 * @param bit_index - index of this bit in the aeu_en_reg
 745 *
 746 * @return int
 747 */
 748static int
 749qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
 750                            struct aeu_invert_reg_bit *p_aeu,
 751                            u32 aeu_en_reg,
 752                            const char *p_bit_name, u32 bitmask)
 753{
 754        bool b_fatal = false;
 755        int rc = -EINVAL;
 756        u32 val;
 757
 758        DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
 759                p_bit_name, bitmask);
 760
 761        /* Call callback before clearing the interrupt status */
 762        if (p_aeu->cb) {
 763                DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
 764                        p_bit_name);
 765                rc = p_aeu->cb(p_hwfn);
 766        }
 767
 768        if (rc)
 769                b_fatal = true;
 770
 771        /* Print HW block interrupt registers */
 772        if (p_aeu->block_index != MAX_BLOCK_ID)
 773                qed_int_attn_print(p_hwfn, p_aeu->block_index,
 774                                   ATTN_TYPE_INTERRUPT, !b_fatal);
 775
 776
 777        /* If the attention is benign, no need to prevent it */
 778        if (!rc)
 779                goto out;
 780
 781        /* Prevent this Attention from being asserted in the future */
 782        val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
 783        qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
 784        DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
 785                p_bit_name);
 786
 787out:
 788        return rc;
 789}
 790
 791/**
 792 * @brief qed_int_deassertion_parity - handle a single parity AEU source
 793 *
 794 * @param p_hwfn
 795 * @param p_aeu - descriptor of an AEU bit which caused the parity
 796 * @param aeu_en_reg - address of the AEU enable register
 797 * @param bit_index
 798 */
 799static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
 800                                       struct aeu_invert_reg_bit *p_aeu,
 801                                       u32 aeu_en_reg, u8 bit_index)
 802{
 803        u32 block_id = p_aeu->block_index, mask, val;
 804
 805        DP_NOTICE(p_hwfn->cdev,
 806                  "%s parity attention is set [address 0x%08x, bit %d]\n",
 807                  p_aeu->bit_name, aeu_en_reg, bit_index);
 808
 809        if (block_id != MAX_BLOCK_ID) {
 810                qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
 811
 812                /* In BB, there's a single parity bit for several blocks */
 813                if (block_id == BLOCK_BTB) {
 814                        qed_int_attn_print(p_hwfn, BLOCK_OPTE,
 815                                           ATTN_TYPE_PARITY, false);
 816                        qed_int_attn_print(p_hwfn, BLOCK_MCP,
 817                                           ATTN_TYPE_PARITY, false);
 818                }
 819        }
 820
 821        /* Prevent this parity error from being re-asserted */
 822        mask = ~BIT(bit_index);
 823        val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
 824        qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
 825        DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
 826                p_aeu->bit_name);
 827}
 828
 829/**
 830 * @brief - handles deassertion of previously asserted attentions.
 831 *
 832 * @param p_hwfn
 833 * @param deasserted_bits - newly deasserted bits
 834 * @return int
 835 *
 836 */
 837static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
 838                               u16 deasserted_bits)
 839{
 840        struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
 841        u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
 842        u8 i, j, k, bit_idx;
 843        int rc = 0;
 844
 845        /* Read the attention registers in the AEU */
 846        for (i = 0; i < NUM_ATTN_REGS; i++) {
 847                aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
 848                                        MISC_REG_AEU_AFTER_INVERT_1_IGU +
 849                                        i * 0x4);
 850                DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
 851                           "Deasserted bits [%d]: %08x\n",
 852                           i, aeu_inv_arr[i]);
 853        }
 854
 855        /* Find parity attentions first */
 856        for (i = 0; i < NUM_ATTN_REGS; i++) {
 857                struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
 858                u32 parities;
 859
 860                aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
 861                en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
 862
 863                /* Skip register in which no parity bit is currently set */
 864                parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
 865                if (!parities)
 866                        continue;
 867
 868                for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
 869                        struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
 870
 871                        if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
 872                            !!(parities & BIT(bit_idx)))
 873                                qed_int_deassertion_parity(p_hwfn, p_bit,
 874                                                           aeu_en, bit_idx);
 875
 876                        bit_idx += ATTENTION_LENGTH(p_bit->flags);
 877                }
 878        }
 879
 880        /* Find non-parity cause for attention and act */
 881        for (k = 0; k < MAX_ATTN_GRPS; k++) {
 882                struct aeu_invert_reg_bit *p_aeu;
 883
 884                /* Handle only groups whose attention is currently deasserted */
 885                if (!(deasserted_bits & (1 << k)))
 886                        continue;
 887
 888                for (i = 0; i < NUM_ATTN_REGS; i++) {
 889                        u32 bits;
 890
 891                        aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
 892                                 i * sizeof(u32) +
 893                                 k * sizeof(u32) * NUM_ATTN_REGS;
 894
 895                        en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
 896                        bits = aeu_inv_arr[i] & en;
 897
 898                        /* Skip if no bit from this group is currently set */
 899                        if (!bits)
 900                                continue;
 901
 902                        /* Find all set bits from current register which belong
 903                         * to current group, making them responsible for the
 904                         * previous assertion.
 905                         */
 906                        for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
 907                                long unsigned int bitmask;
 908                                u8 bit, bit_len;
 909
 910                                p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
 911                                p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu);
 912
 913                                bit = bit_idx;
 914                                bit_len = ATTENTION_LENGTH(p_aeu->flags);
 915                                if (qed_int_is_parity_flag(p_hwfn, p_aeu)) {
 916                                        /* Skip Parity */
 917                                        bit++;
 918                                        bit_len--;
 919                                }
 920
 921                                bitmask = bits & (((1 << bit_len) - 1) << bit);
 922                                bitmask >>= bit;
 923
 924                                if (bitmask) {
 925                                        u32 flags = p_aeu->flags;
 926                                        char bit_name[30];
 927                                        u8 num;
 928
 929                                        num = (u8)find_first_bit(&bitmask,
 930                                                                 bit_len);
 931
 932                                        /* Some bits represent more than a
 933                                         * a single interrupt. Correctly print
 934                                         * their name.
 935                                         */
 936                                        if (ATTENTION_LENGTH(flags) > 2 ||
 937                                            ((flags & ATTENTION_PAR_INT) &&
 938                                             ATTENTION_LENGTH(flags) > 1))
 939                                                snprintf(bit_name, 30,
 940                                                         p_aeu->bit_name, num);
 941                                        else
 942                                                strncpy(bit_name,
 943                                                        p_aeu->bit_name, 30);
 944
 945                                        /* We now need to pass bitmask in its
 946                                         * correct position.
 947                                         */
 948                                        bitmask <<= bit;
 949
 950                                        /* Handle source of the attention */
 951                                        qed_int_deassertion_aeu_bit(p_hwfn,
 952                                                                    p_aeu,
 953                                                                    aeu_en,
 954                                                                    bit_name,
 955                                                                    bitmask);
 956                                }
 957
 958                                bit_idx += ATTENTION_LENGTH(p_aeu->flags);
 959                        }
 960                }
 961        }
 962
 963        /* Clear IGU indication for the deasserted bits */
 964        DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
 965                                    GTT_BAR0_MAP_REG_IGU_CMD +
 966                                    ((IGU_CMD_ATTN_BIT_CLR_UPPER -
 967                                      IGU_CMD_INT_ACK_BASE) << 3),
 968                                    ~((u32)deasserted_bits));
 969
 970        /* Unmask deasserted attentions in IGU */
 971        aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
 972        aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
 973        qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
 974
 975        /* Clear deassertion from inner state */
 976        sb_attn_sw->known_attn &= ~deasserted_bits;
 977
 978        return rc;
 979}
 980
 981static int qed_int_attentions(struct qed_hwfn *p_hwfn)
 982{
 983        struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
 984        struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
 985        u32 attn_bits = 0, attn_acks = 0;
 986        u16 asserted_bits, deasserted_bits;
 987        __le16 index;
 988        int rc = 0;
 989
 990        /* Read current attention bits/acks - safeguard against attentions
 991         * by guaranting work on a synchronized timeframe
 992         */
 993        do {
 994                index = p_sb_attn->sb_index;
 995                attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
 996                attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
 997        } while (index != p_sb_attn->sb_index);
 998        p_sb_attn->sb_index = index;
 999
1000        /* Attention / Deassertion are meaningful (and in correct state)
1001         * only when they differ and consistent with known state - deassertion
1002         * when previous attention & current ack, and assertion when current
1003         * attention with no previous attention
1004         */
1005        asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1006                ~p_sb_attn_sw->known_attn;
1007        deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1008                p_sb_attn_sw->known_attn;
1009
1010        if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
1011                DP_INFO(p_hwfn,
1012                        "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1013                        index, attn_bits, attn_acks, asserted_bits,
1014                        deasserted_bits, p_sb_attn_sw->known_attn);
1015        } else if (asserted_bits == 0x100) {
1016                DP_INFO(p_hwfn, "MFW indication via attention\n");
1017        } else {
1018                DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1019                           "MFW indication [deassertion]\n");
1020        }
1021
1022        if (asserted_bits) {
1023                rc = qed_int_assertion(p_hwfn, asserted_bits);
1024                if (rc)
1025                        return rc;
1026        }
1027
1028        if (deasserted_bits)
1029                rc = qed_int_deassertion(p_hwfn, deasserted_bits);
1030
1031        return rc;
1032}
1033
1034static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
1035                            void __iomem *igu_addr, u32 ack_cons)
1036{
1037        struct igu_prod_cons_update igu_ack = { 0 };
1038
1039        igu_ack.sb_id_and_flags =
1040                ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1041                 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1042                 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1043                 (IGU_SEG_ACCESS_ATTN <<
1044                  IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1045
1046        DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
1047
1048        /* Both segments (interrupts & acks) are written to same place address;
1049         * Need to guarantee all commands will be received (in-order) by HW.
1050         */
1051        mmiowb();
1052        barrier();
1053}
1054
1055void qed_int_sp_dpc(unsigned long hwfn_cookie)
1056{
1057        struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
1058        struct qed_pi_info *pi_info = NULL;
1059        struct qed_sb_attn_info *sb_attn;
1060        struct qed_sb_info *sb_info;
1061        int arr_size;
1062        u16 rc = 0;
1063
1064        if (!p_hwfn->p_sp_sb) {
1065                DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
1066                return;
1067        }
1068
1069        sb_info = &p_hwfn->p_sp_sb->sb_info;
1070        arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1071        if (!sb_info) {
1072                DP_ERR(p_hwfn->cdev,
1073                       "Status block is NULL - cannot ack interrupts\n");
1074                return;
1075        }
1076
1077        if (!p_hwfn->p_sb_attn) {
1078                DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
1079                return;
1080        }
1081        sb_attn = p_hwfn->p_sb_attn;
1082
1083        DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1084                   p_hwfn, p_hwfn->my_id);
1085
1086        /* Disable ack for def status block. Required both for msix +
1087         * inta in non-mask mode, in inta does no harm.
1088         */
1089        qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1090
1091        /* Gather Interrupts/Attentions information */
1092        if (!sb_info->sb_virt) {
1093                DP_ERR(p_hwfn->cdev,
1094                       "Interrupt Status block is NULL - cannot check for new interrupts!\n");
1095        } else {
1096                u32 tmp_index = sb_info->sb_ack;
1097
1098                rc = qed_sb_update_sb_idx(sb_info);
1099                DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
1100                           "Interrupt indices: 0x%08x --> 0x%08x\n",
1101                           tmp_index, sb_info->sb_ack);
1102        }
1103
1104        if (!sb_attn || !sb_attn->sb_attn) {
1105                DP_ERR(p_hwfn->cdev,
1106                       "Attentions Status block is NULL - cannot check for new attentions!\n");
1107        } else {
1108                u16 tmp_index = sb_attn->index;
1109
1110                rc |= qed_attn_update_idx(p_hwfn, sb_attn);
1111                DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
1112                           "Attention indices: 0x%08x --> 0x%08x\n",
1113                           tmp_index, sb_attn->index);
1114        }
1115
1116        /* Check if we expect interrupts at this time. if not just ack them */
1117        if (!(rc & QED_SB_EVENT_MASK)) {
1118                qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1119                return;
1120        }
1121
1122        /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1123        if (!p_hwfn->p_dpc_ptt) {
1124                DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
1125                qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1126                return;
1127        }
1128
1129        if (rc & QED_SB_ATT_IDX)
1130                qed_int_attentions(p_hwfn);
1131
1132        if (rc & QED_SB_IDX) {
1133                int pi;
1134
1135                /* Look for a free index */
1136                for (pi = 0; pi < arr_size; pi++) {
1137                        pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1138                        if (pi_info->comp_cb)
1139                                pi_info->comp_cb(p_hwfn, pi_info->cookie);
1140                }
1141        }
1142
1143        if (sb_attn && (rc & QED_SB_ATT_IDX))
1144                /* This should be done before the interrupts are enabled,
1145                 * since otherwise a new attention will be generated.
1146                 */
1147                qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1148
1149        qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1150}
1151
1152static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
1153{
1154        struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1155
1156        if (!p_sb)
1157                return;
1158
1159        if (p_sb->sb_attn)
1160                dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1161                                  SB_ATTN_ALIGNED_SIZE(p_hwfn),
1162                                  p_sb->sb_attn, p_sb->sb_phys);
1163        kfree(p_sb);
1164        p_hwfn->p_sb_attn = NULL;
1165}
1166
1167static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
1168                                  struct qed_ptt *p_ptt)
1169{
1170        struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1171
1172        memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1173
1174        sb_info->index = 0;
1175        sb_info->known_attn = 0;
1176
1177        /* Configure Attention Status Block in IGU */
1178        qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1179               lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
1180        qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1181               upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
1182}
1183
1184static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
1185                                 struct qed_ptt *p_ptt,
1186                                 void *sb_virt_addr, dma_addr_t sb_phy_addr)
1187{
1188        struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1189        int i, j, k;
1190
1191        sb_info->sb_attn = sb_virt_addr;
1192        sb_info->sb_phys = sb_phy_addr;
1193
1194        /* Set the pointer to the AEU descriptors */
1195        sb_info->p_aeu_desc = aeu_descs;
1196
1197        /* Calculate Parity Masks */
1198        memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1199        for (i = 0; i < NUM_ATTN_REGS; i++) {
1200                /* j is array index, k is bit index */
1201                for (j = 0, k = 0; k < 32; j++) {
1202                        struct aeu_invert_reg_bit *p_aeu;
1203
1204                        p_aeu = &aeu_descs[i].bits[j];
1205                        if (qed_int_is_parity_flag(p_hwfn, p_aeu))
1206                                sb_info->parity_mask[i] |= 1 << k;
1207
1208                        k += ATTENTION_LENGTH(p_aeu->flags);
1209                }
1210                DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1211                           "Attn Mask [Reg %d]: 0x%08x\n",
1212                           i, sb_info->parity_mask[i]);
1213        }
1214
1215        /* Set the address of cleanup for the mcp attention */
1216        sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1217                                 MISC_REG_AEU_GENERAL_ATTN_0;
1218
1219        qed_int_sb_attn_setup(p_hwfn, p_ptt);
1220}
1221
1222static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
1223                                 struct qed_ptt *p_ptt)
1224{
1225        struct qed_dev *cdev = p_hwfn->cdev;
1226        struct qed_sb_attn_info *p_sb;
1227        dma_addr_t p_phys = 0;
1228        void *p_virt;
1229
1230        /* SB struct */
1231        p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
1232        if (!p_sb)
1233                return -ENOMEM;
1234
1235        /* SB ring  */
1236        p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1237                                    SB_ATTN_ALIGNED_SIZE(p_hwfn),
1238                                    &p_phys, GFP_KERNEL);
1239
1240        if (!p_virt) {
1241                kfree(p_sb);
1242                return -ENOMEM;
1243        }
1244
1245        /* Attention setup */
1246        p_hwfn->p_sb_attn = p_sb;
1247        qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1248
1249        return 0;
1250}
1251
1252/* coalescing timeout = timeset << (timer_res + 1) */
1253#define QED_CAU_DEF_RX_USECS 24
1254#define QED_CAU_DEF_TX_USECS 48
1255
1256void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
1257                           struct cau_sb_entry *p_sb_entry,
1258                           u8 pf_id, u16 vf_number, u8 vf_valid)
1259{
1260        struct qed_dev *cdev = p_hwfn->cdev;
1261        u32 cau_state;
1262        u8 timer_res;
1263
1264        memset(p_sb_entry, 0, sizeof(*p_sb_entry));
1265
1266        SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1267        SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1268        SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1269        SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1270        SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1271
1272        cau_state = CAU_HC_DISABLE_STATE;
1273
1274        if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
1275                cau_state = CAU_HC_ENABLE_STATE;
1276                if (!cdev->rx_coalesce_usecs)
1277                        cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
1278                if (!cdev->tx_coalesce_usecs)
1279                        cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
1280        }
1281
1282        /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1283        if (cdev->rx_coalesce_usecs <= 0x7F)
1284                timer_res = 0;
1285        else if (cdev->rx_coalesce_usecs <= 0xFF)
1286                timer_res = 1;
1287        else
1288                timer_res = 2;
1289        SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1290
1291        if (cdev->tx_coalesce_usecs <= 0x7F)
1292                timer_res = 0;
1293        else if (cdev->tx_coalesce_usecs <= 0xFF)
1294                timer_res = 1;
1295        else
1296                timer_res = 2;
1297        SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1298
1299        SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1300        SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1301}
1302
1303static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
1304                                struct qed_ptt *p_ptt,
1305                                u16 igu_sb_id,
1306                                u32 pi_index,
1307                                enum qed_coalescing_fsm coalescing_fsm,
1308                                u8 timeset)
1309{
1310        struct cau_pi_entry pi_entry;
1311        u32 sb_offset, pi_offset;
1312
1313        if (IS_VF(p_hwfn->cdev))
1314                return;
1315
1316        sb_offset = igu_sb_id * PIS_PER_SB;
1317        memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
1318
1319        SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1320        if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
1321                SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1322        else
1323                SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1324
1325        pi_offset = sb_offset + pi_index;
1326        if (p_hwfn->hw_init_done) {
1327                qed_wr(p_hwfn, p_ptt,
1328                       CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1329                       *((u32 *)&(pi_entry)));
1330        } else {
1331                STORE_RT_REG(p_hwfn,
1332                             CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1333                             *((u32 *)&(pi_entry)));
1334        }
1335}
1336
1337void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
1338                         struct qed_ptt *p_ptt,
1339                         dma_addr_t sb_phys,
1340                         u16 igu_sb_id, u16 vf_number, u8 vf_valid)
1341{
1342        struct cau_sb_entry sb_entry;
1343
1344        qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1345                              vf_number, vf_valid);
1346
1347        if (p_hwfn->hw_init_done) {
1348                /* Wide-bus, initialize via DMAE */
1349                u64 phys_addr = (u64)sb_phys;
1350
1351                qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
1352                                  CAU_REG_SB_ADDR_MEMORY +
1353                                  igu_sb_id * sizeof(u64), 2, 0);
1354                qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
1355                                  CAU_REG_SB_VAR_MEMORY +
1356                                  igu_sb_id * sizeof(u64), 2, 0);
1357        } else {
1358                /* Initialize Status Block Address */
1359                STORE_RT_REG_AGG(p_hwfn,
1360                                 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1361                                 igu_sb_id * 2,
1362                                 sb_phys);
1363
1364                STORE_RT_REG_AGG(p_hwfn,
1365                                 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1366                                 igu_sb_id * 2,
1367                                 sb_entry);
1368        }
1369
1370        /* Configure pi coalescing if set */
1371        if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
1372                u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1373                u8 timeset, timer_res;
1374                u8 i;
1375
1376                /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1377                if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
1378                        timer_res = 0;
1379                else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
1380                        timer_res = 1;
1381                else
1382                        timer_res = 2;
1383                timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
1384                qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1385                                    QED_COAL_RX_STATE_MACHINE, timeset);
1386
1387                if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
1388                        timer_res = 0;
1389                else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
1390                        timer_res = 1;
1391                else
1392                        timer_res = 2;
1393                timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
1394                for (i = 0; i < num_tc; i++) {
1395                        qed_int_cau_conf_pi(p_hwfn, p_ptt,
1396                                            igu_sb_id, TX_PI(i),
1397                                            QED_COAL_TX_STATE_MACHINE,
1398                                            timeset);
1399                }
1400        }
1401}
1402
1403void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
1404                      struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
1405{
1406        /* zero status block and ack counter */
1407        sb_info->sb_ack = 0;
1408        memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1409
1410        if (IS_PF(p_hwfn->cdev))
1411                qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1412                                    sb_info->igu_sb_id, 0, 0);
1413}
1414
1415struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf)
1416{
1417        struct qed_igu_block *p_block;
1418        u16 igu_id;
1419
1420        for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
1421             igu_id++) {
1422                p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1423
1424                if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1425                    !(p_block->status & QED_IGU_STATUS_FREE))
1426                        continue;
1427
1428                if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf)
1429                        return p_block;
1430        }
1431
1432        return NULL;
1433}
1434
1435static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id)
1436{
1437        struct qed_igu_block *p_block;
1438        u16 igu_id;
1439
1440        for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
1441             igu_id++) {
1442                p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1443
1444                if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1445                    !p_block->is_pf ||
1446                    p_block->vector_number != vector_id)
1447                        continue;
1448
1449                return igu_id;
1450        }
1451
1452        return QED_SB_INVALID_IDX;
1453}
1454
1455u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
1456{
1457        u16 igu_sb_id;
1458
1459        /* Assuming continuous set of IGU SBs dedicated for given PF */
1460        if (sb_id == QED_SP_SB_ID)
1461                igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1462        else if (IS_PF(p_hwfn->cdev))
1463                igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1464        else
1465                igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
1466
1467        if (sb_id == QED_SP_SB_ID)
1468                DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1469                           "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1470        else
1471                DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1472                           "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1473
1474        return igu_sb_id;
1475}
1476
1477int qed_int_sb_init(struct qed_hwfn *p_hwfn,
1478                    struct qed_ptt *p_ptt,
1479                    struct qed_sb_info *sb_info,
1480                    void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
1481{
1482        sb_info->sb_virt = sb_virt_addr;
1483        sb_info->sb_phys = sb_phy_addr;
1484
1485        sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
1486
1487        if (sb_id != QED_SP_SB_ID) {
1488                if (IS_PF(p_hwfn->cdev)) {
1489                        struct qed_igu_info *p_info;
1490                        struct qed_igu_block *p_block;
1491
1492                        p_info = p_hwfn->hw_info.p_igu_info;
1493                        p_block = &p_info->entry[sb_info->igu_sb_id];
1494
1495                        p_block->sb_info = sb_info;
1496                        p_block->status &= ~QED_IGU_STATUS_FREE;
1497                        p_info->usage.free_cnt--;
1498                } else {
1499                        qed_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1500                }
1501        }
1502
1503        sb_info->cdev = p_hwfn->cdev;
1504
1505        /* The igu address will hold the absolute address that needs to be
1506         * written to for a specific status block
1507         */
1508        if (IS_PF(p_hwfn->cdev)) {
1509                sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
1510                                                  GTT_BAR0_MAP_REG_IGU_CMD +
1511                                                  (sb_info->igu_sb_id << 3);
1512        } else {
1513                sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
1514                                                  PXP_VF_BAR0_START_IGU +
1515                                                  ((IGU_CMD_INT_ACK_BASE +
1516                                                    sb_info->igu_sb_id) << 3);
1517        }
1518
1519        sb_info->flags |= QED_SB_INFO_INIT;
1520
1521        qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
1522
1523        return 0;
1524}
1525
1526int qed_int_sb_release(struct qed_hwfn *p_hwfn,
1527                       struct qed_sb_info *sb_info, u16 sb_id)
1528{
1529        struct qed_igu_block *p_block;
1530        struct qed_igu_info *p_info;
1531
1532        if (!sb_info)
1533                return 0;
1534
1535        /* zero status block and ack counter */
1536        sb_info->sb_ack = 0;
1537        memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1538
1539        if (IS_VF(p_hwfn->cdev)) {
1540                qed_vf_set_sb_info(p_hwfn, sb_id, NULL);
1541                return 0;
1542        }
1543
1544        p_info = p_hwfn->hw_info.p_igu_info;
1545        p_block = &p_info->entry[sb_info->igu_sb_id];
1546
1547        /* Vector 0 is reserved to Default SB */
1548        if (!p_block->vector_number) {
1549                DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1550                return -EINVAL;
1551        }
1552
1553        /* Lose reference to client's SB info, and fix counters */
1554        p_block->sb_info = NULL;
1555        p_block->status |= QED_IGU_STATUS_FREE;
1556        p_info->usage.free_cnt++;
1557
1558        return 0;
1559}
1560
1561static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
1562{
1563        struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1564
1565        if (!p_sb)
1566                return;
1567
1568        if (p_sb->sb_info.sb_virt)
1569                dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1570                                  SB_ALIGNED_SIZE(p_hwfn),
1571                                  p_sb->sb_info.sb_virt,
1572                                  p_sb->sb_info.sb_phys);
1573        kfree(p_sb);
1574        p_hwfn->p_sp_sb = NULL;
1575}
1576
1577static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1578{
1579        struct qed_sb_sp_info *p_sb;
1580        dma_addr_t p_phys = 0;
1581        void *p_virt;
1582
1583        /* SB struct */
1584        p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
1585        if (!p_sb)
1586                return -ENOMEM;
1587
1588        /* SB ring  */
1589        p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1590                                    SB_ALIGNED_SIZE(p_hwfn),
1591                                    &p_phys, GFP_KERNEL);
1592        if (!p_virt) {
1593                kfree(p_sb);
1594                return -ENOMEM;
1595        }
1596
1597        /* Status Block setup */
1598        p_hwfn->p_sp_sb = p_sb;
1599        qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
1600                        p_phys, QED_SP_SB_ID);
1601
1602        memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1603
1604        return 0;
1605}
1606
1607int qed_int_register_cb(struct qed_hwfn *p_hwfn,
1608                        qed_int_comp_cb_t comp_cb,
1609                        void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
1610{
1611        struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1612        int rc = -ENOMEM;
1613        u8 pi;
1614
1615        /* Look for a free index */
1616        for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1617                if (p_sp_sb->pi_info_arr[pi].comp_cb)
1618                        continue;
1619
1620                p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1621                p_sp_sb->pi_info_arr[pi].cookie = cookie;
1622                *sb_idx = pi;
1623                *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1624                rc = 0;
1625                break;
1626        }
1627
1628        return rc;
1629}
1630
1631int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
1632{
1633        struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1634
1635        if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
1636                return -ENOMEM;
1637
1638        p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
1639        p_sp_sb->pi_info_arr[pi].cookie = NULL;
1640
1641        return 0;
1642}
1643
1644u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
1645{
1646        return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1647}
1648
1649void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
1650                            struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
1651{
1652        u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1653
1654        p_hwfn->cdev->int_mode = int_mode;
1655        switch (p_hwfn->cdev->int_mode) {
1656        case QED_INT_MODE_INTA:
1657                igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1658                igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1659                break;
1660
1661        case QED_INT_MODE_MSI:
1662                igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1663                igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1664                break;
1665
1666        case QED_INT_MODE_MSIX:
1667                igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1668                break;
1669        case QED_INT_MODE_POLL:
1670                break;
1671        }
1672
1673        qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1674}
1675
1676static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
1677                                    struct qed_ptt *p_ptt)
1678{
1679
1680        /* Configure AEU signal change to produce attentions */
1681        qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1682        qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1683        qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1684        qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1685
1686        /* Flush the writes to IGU */
1687        mmiowb();
1688
1689        /* Unmask AEU signals toward IGU */
1690        qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1691}
1692
1693int
1694qed_int_igu_enable(struct qed_hwfn *p_hwfn,
1695                   struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
1696{
1697        int rc = 0;
1698
1699        qed_int_igu_enable_attn(p_hwfn, p_ptt);
1700
1701        if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1702                rc = qed_slowpath_irq_req(p_hwfn);
1703                if (rc) {
1704                        DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
1705                        return -EINVAL;
1706                }
1707                p_hwfn->b_int_requested = true;
1708        }
1709        /* Enable interrupt Generation */
1710        qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1711        p_hwfn->b_int_enabled = 1;
1712
1713        return rc;
1714}
1715
1716void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1717{
1718        p_hwfn->b_int_enabled = 0;
1719
1720        if (IS_VF(p_hwfn->cdev))
1721                return;
1722
1723        qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1724}
1725
1726#define IGU_CLEANUP_SLEEP_LENGTH                (1000)
1727static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
1728                                   struct qed_ptt *p_ptt,
1729                                   u16 igu_sb_id,
1730                                   bool cleanup_set, u16 opaque_fid)
1731{
1732        u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1733        u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1734        u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1735
1736        /* Set the data field */
1737        SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1738        SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
1739        SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1740
1741        /* Set the control register */
1742        SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1743        SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1744        SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1745
1746        qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1747
1748        barrier();
1749
1750        qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1751
1752        /* Flush the write to IGU */
1753        mmiowb();
1754
1755        /* calculate where to read the status bit from */
1756        sb_bit = 1 << (igu_sb_id % 32);
1757        sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
1758
1759        sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
1760
1761        /* Now wait for the command to complete */
1762        do {
1763                val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
1764
1765                if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1766                        break;
1767
1768                usleep_range(5000, 10000);
1769        } while (--sleep_cnt);
1770
1771        if (!sleep_cnt)
1772                DP_NOTICE(p_hwfn,
1773                          "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1774                          val, igu_sb_id);
1775}
1776
1777void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
1778                                     struct qed_ptt *p_ptt,
1779                                     u16 igu_sb_id, u16 opaque, bool b_set)
1780{
1781        struct qed_igu_block *p_block;
1782        int pi, i;
1783
1784        p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
1785        DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1786                   "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
1787                   igu_sb_id,
1788                   p_block->function_id,
1789                   p_block->is_pf, p_block->vector_number);
1790
1791        /* Set */
1792        if (b_set)
1793                qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
1794
1795        /* Clear */
1796        qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
1797
1798        /* Wait for the IGU SB to cleanup */
1799        for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
1800                u32 val;
1801
1802                val = qed_rd(p_hwfn, p_ptt,
1803                             IGU_REG_WRITE_DONE_PENDING +
1804                             ((igu_sb_id / 32) * 4));
1805                if (val & BIT((igu_sb_id % 32)))
1806                        usleep_range(10, 20);
1807                else
1808                        break;
1809        }
1810        if (i == IGU_CLEANUP_SLEEP_LENGTH)
1811                DP_NOTICE(p_hwfn,
1812                          "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1813                          igu_sb_id);
1814
1815        /* Clear the CAU for the SB */
1816        for (pi = 0; pi < 12; pi++)
1817                qed_wr(p_hwfn, p_ptt,
1818                       CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
1819}
1820
1821void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
1822                              struct qed_ptt *p_ptt,
1823                              bool b_set, bool b_slowpath)
1824{
1825        struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1826        struct qed_igu_block *p_block;
1827        u16 igu_sb_id = 0;
1828        u32 val = 0;
1829
1830        val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1831        val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1832        val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1833        qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1834
1835        for (igu_sb_id = 0;
1836             igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
1837                p_block = &p_info->entry[igu_sb_id];
1838
1839                if (!(p_block->status & QED_IGU_STATUS_VALID) ||
1840                    !p_block->is_pf ||
1841                    (p_block->status & QED_IGU_STATUS_DSB))
1842                        continue;
1843
1844                qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
1845                                                p_hwfn->hw_info.opaque_fid,
1846                                                b_set);
1847        }
1848
1849        if (b_slowpath)
1850                qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
1851                                                p_info->igu_dsb_id,
1852                                                p_hwfn->hw_info.opaque_fid,
1853                                                b_set);
1854}
1855
1856int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1857{
1858        struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1859        struct qed_igu_block *p_block;
1860        int pf_sbs, vf_sbs;
1861        u16 igu_sb_id;
1862        u32 val, rval;
1863
1864        if (!RESC_NUM(p_hwfn, QED_SB)) {
1865                p_info->b_allow_pf_vf_change = false;
1866        } else {
1867                /* Use the numbers the MFW have provided -
1868                 * don't forget MFW accounts for the default SB as well.
1869                 */
1870                p_info->b_allow_pf_vf_change = true;
1871
1872                if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
1873                        DP_INFO(p_hwfn,
1874                                "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
1875                                RESC_NUM(p_hwfn, QED_SB) - 1,
1876                                p_info->usage.cnt);
1877                        p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
1878                }
1879
1880                if (IS_PF_SRIOV(p_hwfn)) {
1881                        u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
1882
1883                        if (vfs != p_info->usage.iov_cnt)
1884                                DP_VERBOSE(p_hwfn,
1885                                           NETIF_MSG_INTR,
1886                                           "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
1887                                           p_info->usage.iov_cnt, vfs);
1888
1889                        /* At this point we know how many SBs we have totally
1890                         * in IGU + number of PF SBs. So we can validate that
1891                         * we'd have sufficient for VF.
1892                         */
1893                        if (vfs > p_info->usage.free_cnt +
1894                            p_info->usage.free_cnt_iov - p_info->usage.cnt) {
1895                                DP_NOTICE(p_hwfn,
1896                                          "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
1897                                          p_info->usage.free_cnt +
1898                                          p_info->usage.free_cnt_iov,
1899                                          p_info->usage.cnt, vfs);
1900                                return -EINVAL;
1901                        }
1902
1903                        /* Currently cap the number of VFs SBs by the
1904                         * number of VFs.
1905                         */
1906                        p_info->usage.iov_cnt = vfs;
1907                }
1908        }
1909
1910        /* Mark all SBs as free, now in the right PF/VFs division */
1911        p_info->usage.free_cnt = p_info->usage.cnt;
1912        p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
1913        p_info->usage.orig = p_info->usage.cnt;
1914        p_info->usage.iov_orig = p_info->usage.iov_cnt;
1915
1916        /* We now proceed to re-configure the IGU cam to reflect the initial
1917         * configuration. We can start with the Default SB.
1918         */
1919        pf_sbs = p_info->usage.cnt;
1920        vf_sbs = p_info->usage.iov_cnt;
1921
1922        for (igu_sb_id = p_info->igu_dsb_id;
1923             igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
1924                p_block = &p_info->entry[igu_sb_id];
1925                val = 0;
1926
1927                if (!(p_block->status & QED_IGU_STATUS_VALID))
1928                        continue;
1929
1930                if (p_block->status & QED_IGU_STATUS_DSB) {
1931                        p_block->function_id = p_hwfn->rel_pf_id;
1932                        p_block->is_pf = 1;
1933                        p_block->vector_number = 0;
1934                        p_block->status = QED_IGU_STATUS_VALID |
1935                                          QED_IGU_STATUS_PF |
1936                                          QED_IGU_STATUS_DSB;
1937                } else if (pf_sbs) {
1938                        pf_sbs--;
1939                        p_block->function_id = p_hwfn->rel_pf_id;
1940                        p_block->is_pf = 1;
1941                        p_block->vector_number = p_info->usage.cnt - pf_sbs;
1942                        p_block->status = QED_IGU_STATUS_VALID |
1943                                          QED_IGU_STATUS_PF |
1944                                          QED_IGU_STATUS_FREE;
1945                } else if (vf_sbs) {
1946                        p_block->function_id =
1947                            p_hwfn->cdev->p_iov_info->first_vf_in_pf +
1948                            p_info->usage.iov_cnt - vf_sbs;
1949                        p_block->is_pf = 0;
1950                        p_block->vector_number = 0;
1951                        p_block->status = QED_IGU_STATUS_VALID |
1952                                          QED_IGU_STATUS_FREE;
1953                        vf_sbs--;
1954                } else {
1955                        p_block->function_id = 0;
1956                        p_block->is_pf = 0;
1957                        p_block->vector_number = 0;
1958                }
1959
1960                SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
1961                          p_block->function_id);
1962                SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
1963                SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
1964                          p_block->vector_number);
1965
1966                /* VF entries would be enabled when VF is initializaed */
1967                SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
1968
1969                rval = qed_rd(p_hwfn, p_ptt,
1970                              IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
1971
1972                if (rval != val) {
1973                        qed_wr(p_hwfn, p_ptt,
1974                               IGU_REG_MAPPING_MEMORY +
1975                               sizeof(u32) * igu_sb_id, val);
1976
1977                        DP_VERBOSE(p_hwfn,
1978                                   NETIF_MSG_INTR,
1979                                   "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
1980                                   igu_sb_id,
1981                                   p_block->function_id,
1982                                   p_block->is_pf,
1983                                   p_block->vector_number, rval, val);
1984                }
1985        }
1986
1987        return 0;
1988}
1989
1990static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
1991                                       struct qed_ptt *p_ptt, u16 igu_sb_id)
1992{
1993        u32 val = qed_rd(p_hwfn, p_ptt,
1994                         IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
1995        struct qed_igu_block *p_block;
1996
1997        p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
1998
1999        /* Fill the block information */
2000        p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
2001        p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2002        p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
2003        p_block->igu_sb_id = igu_sb_id;
2004}
2005
2006int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2007{
2008        struct qed_igu_info *p_igu_info;
2009        struct qed_igu_block *p_block;
2010        u32 min_vf = 0, max_vf = 0;
2011        u16 igu_sb_id;
2012
2013        p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
2014        if (!p_hwfn->hw_info.p_igu_info)
2015                return -ENOMEM;
2016
2017        p_igu_info = p_hwfn->hw_info.p_igu_info;
2018
2019        /* Distinguish between existent and non-existent default SB */
2020        p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX;
2021
2022        /* Find the range of VF ids whose SB belong to this PF */
2023        if (p_hwfn->cdev->p_iov_info) {
2024                struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
2025
2026                min_vf  = p_iov->first_vf_in_pf;
2027                max_vf  = p_iov->first_vf_in_pf + p_iov->total_vfs;
2028        }
2029
2030        for (igu_sb_id = 0;
2031             igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
2032                /* Read current entry; Notice it might not belong to this PF */
2033                qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2034                p_block = &p_igu_info->entry[igu_sb_id];
2035
2036                if ((p_block->is_pf) &&
2037                    (p_block->function_id == p_hwfn->rel_pf_id)) {
2038                        p_block->status = QED_IGU_STATUS_PF |
2039                                          QED_IGU_STATUS_VALID |
2040                                          QED_IGU_STATUS_FREE;
2041
2042                        if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
2043                                p_igu_info->usage.cnt++;
2044                } else if (!(p_block->is_pf) &&
2045                           (p_block->function_id >= min_vf) &&
2046                           (p_block->function_id < max_vf)) {
2047                        /* Available for VFs of this PF */
2048                        p_block->status = QED_IGU_STATUS_VALID |
2049                                          QED_IGU_STATUS_FREE;
2050
2051                        if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
2052                                p_igu_info->usage.iov_cnt++;
2053                }
2054
2055                /* Mark the First entry belonging to the PF or its VFs
2056                 * as the default SB [we'll reset IGU prior to first usage].
2057                 */
2058                if ((p_block->status & QED_IGU_STATUS_VALID) &&
2059                    (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
2060                        p_igu_info->igu_dsb_id = igu_sb_id;
2061                        p_block->status |= QED_IGU_STATUS_DSB;
2062                }
2063
2064                /* limit number of prints by having each PF print only its
2065                 * entries with the exception of PF0 which would print
2066                 * everything.
2067                 */
2068                if ((p_block->status & QED_IGU_STATUS_VALID) ||
2069                    (p_hwfn->abs_pf_id == 0)) {
2070                        DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2071                                   "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2072                                   igu_sb_id, p_block->function_id,
2073                                   p_block->is_pf, p_block->vector_number);
2074                }
2075        }
2076
2077        if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) {
2078                DP_NOTICE(p_hwfn,
2079                          "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2080                          p_igu_info->igu_dsb_id);
2081                return -EINVAL;
2082        }
2083
2084        /* All non default SB are considered free at this point */
2085        p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2086        p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2087
2088        DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
2089                   "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2090                   p_igu_info->igu_dsb_id,
2091                   p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
2092
2093        return 0;
2094}
2095
2096/**
2097 * @brief Initialize igu runtime registers
2098 *
2099 * @param p_hwfn
2100 */
2101void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
2102{
2103        u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2104
2105        STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2106}
2107
2108u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
2109{
2110        u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
2111                               IGU_CMD_INT_ACK_BASE;
2112        u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
2113                               IGU_CMD_INT_ACK_BASE;
2114        u32 intr_status_hi = 0, intr_status_lo = 0;
2115        u64 intr_status = 0;
2116
2117        intr_status_lo = REG_RD(p_hwfn,
2118                                GTT_BAR0_MAP_REG_IGU_CMD +
2119                                lsb_igu_cmd_addr * 8);
2120        intr_status_hi = REG_RD(p_hwfn,
2121                                GTT_BAR0_MAP_REG_IGU_CMD +
2122                                msb_igu_cmd_addr * 8);
2123        intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2124
2125        return intr_status;
2126}
2127
2128static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
2129{
2130        tasklet_init(p_hwfn->sp_dpc,
2131                     qed_int_sp_dpc, (unsigned long)p_hwfn);
2132        p_hwfn->b_sp_dpc_enabled = true;
2133}
2134
2135static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
2136{
2137        p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
2138        if (!p_hwfn->sp_dpc)
2139                return -ENOMEM;
2140
2141        return 0;
2142}
2143
2144static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
2145{
2146        kfree(p_hwfn->sp_dpc);
2147        p_hwfn->sp_dpc = NULL;
2148}
2149
2150int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2151{
2152        int rc = 0;
2153
2154        rc = qed_int_sp_dpc_alloc(p_hwfn);
2155        if (rc)
2156                return rc;
2157
2158        rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
2159        if (rc)
2160                return rc;
2161
2162        rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
2163
2164        return rc;
2165}
2166
2167void qed_int_free(struct qed_hwfn *p_hwfn)
2168{
2169        qed_int_sp_sb_free(p_hwfn);
2170        qed_int_sb_attn_free(p_hwfn);
2171        qed_int_sp_dpc_free(p_hwfn);
2172}
2173
2174void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2175{
2176        qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2177        qed_int_sb_attn_setup(p_hwfn, p_ptt);
2178        qed_int_sp_dpc_setup(p_hwfn);
2179}
2180
2181void qed_int_get_num_sbs(struct qed_hwfn        *p_hwfn,
2182                         struct qed_sb_cnt_info *p_sb_cnt_info)
2183{
2184        struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
2185
2186        if (!info || !p_sb_cnt_info)
2187                return;
2188
2189        memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
2190}
2191
2192void qed_int_disable_post_isr_release(struct qed_dev *cdev)
2193{
2194        int i;
2195
2196        for_each_hwfn(cdev, i)
2197                cdev->hwfns[i].b_int_requested = false;
2198}
2199
2200int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2201                          u8 timer_res, u16 sb_id, bool tx)
2202{
2203        struct cau_sb_entry sb_entry;
2204        int rc;
2205
2206        if (!p_hwfn->hw_init_done) {
2207                DP_ERR(p_hwfn, "hardware not initialized yet\n");
2208                return -EINVAL;
2209        }
2210
2211        rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2212                               sb_id * sizeof(u64),
2213                               (u64)(uintptr_t)&sb_entry, 2, 0);
2214        if (rc) {
2215                DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2216                return rc;
2217        }
2218
2219        if (tx)
2220                SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2221        else
2222                SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2223
2224        rc = qed_dmae_host2grc(p_hwfn, p_ptt,
2225                               (u64)(uintptr_t)&sb_entry,
2226                               CAU_REG_SB_VAR_MEMORY +
2227                               sb_id * sizeof(u64), 2, 0);
2228        if (rc) {
2229                DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2230                return rc;
2231        }
2232
2233        return rc;
2234}
2235