linux/drivers/net/ethernet/qlogic/qed/qed_mcp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <asm/byteorder.h>
   9#include <linux/delay.h>
  10#include <linux/errno.h>
  11#include <linux/kernel.h>
  12#include <linux/slab.h>
  13#include <linux/spinlock.h>
  14#include <linux/string.h>
  15#include <linux/etherdevice.h>
  16#include "qed.h"
  17#include "qed_cxt.h"
  18#include "qed_dcbx.h"
  19#include "qed_hsi.h"
  20#include "qed_hw.h"
  21#include "qed_mcp.h"
  22#include "qed_reg_addr.h"
  23#include "qed_sriov.h"
  24
  25#define GRCBASE_MCP     0xe00000
  26
  27#define QED_MCP_RESP_ITER_US    10
  28
  29#define QED_DRV_MB_MAX_RETRIES  (500 * 1000)    /* Account for 5 sec */
  30#define QED_MCP_RESET_RETRIES   (50 * 1000)     /* Account for 500 msec */
  31
  32#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val)           \
  33        qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
  34               _val)
  35
  36#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
  37        qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
  38
  39#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val)  \
  40        DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
  41                     offsetof(struct public_drv_mb, _field), _val)
  42
  43#define DRV_MB_RD(_p_hwfn, _p_ptt, _field)         \
  44        DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
  45                     offsetof(struct public_drv_mb, _field))
  46
  47#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
  48                  DRV_ID_PDA_COMP_VER_SHIFT)
  49
  50#define MCP_BYTES_PER_MBIT_SHIFT 17
  51
  52bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
  53{
  54        if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
  55                return false;
  56        return true;
  57}
  58
  59void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  60{
  61        u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  62                                        PUBLIC_PORT);
  63        u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
  64
  65        p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
  66                                                   MFW_PORT(p_hwfn));
  67        DP_VERBOSE(p_hwfn, QED_MSG_SP,
  68                   "port_addr = 0x%x, port_id 0x%02x\n",
  69                   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
  70}
  71
  72void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  73{
  74        u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
  75        u32 tmp, i;
  76
  77        if (!p_hwfn->mcp_info->public_base)
  78                return;
  79
  80        for (i = 0; i < length; i++) {
  81                tmp = qed_rd(p_hwfn, p_ptt,
  82                             p_hwfn->mcp_info->mfw_mb_addr +
  83                             (i << 2) + sizeof(u32));
  84
  85                /* The MB data is actually BE; Need to force it to cpu */
  86                ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
  87                        be32_to_cpu((__force __be32)tmp);
  88        }
  89}
  90
  91struct qed_mcp_cmd_elem {
  92        struct list_head list;
  93        struct qed_mcp_mb_params *p_mb_params;
  94        u16 expected_seq_num;
  95        bool b_is_completed;
  96};
  97
  98/* Must be called while cmd_lock is acquired */
  99static struct qed_mcp_cmd_elem *
 100qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
 101                     struct qed_mcp_mb_params *p_mb_params,
 102                     u16 expected_seq_num)
 103{
 104        struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
 105
 106        p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
 107        if (!p_cmd_elem)
 108                goto out;
 109
 110        p_cmd_elem->p_mb_params = p_mb_params;
 111        p_cmd_elem->expected_seq_num = expected_seq_num;
 112        list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
 113out:
 114        return p_cmd_elem;
 115}
 116
 117/* Must be called while cmd_lock is acquired */
 118static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
 119                                 struct qed_mcp_cmd_elem *p_cmd_elem)
 120{
 121        list_del(&p_cmd_elem->list);
 122        kfree(p_cmd_elem);
 123}
 124
 125/* Must be called while cmd_lock is acquired */
 126static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
 127                                                     u16 seq_num)
 128{
 129        struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
 130
 131        list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
 132                if (p_cmd_elem->expected_seq_num == seq_num)
 133                        return p_cmd_elem;
 134        }
 135
 136        return NULL;
 137}
 138
 139int qed_mcp_free(struct qed_hwfn *p_hwfn)
 140{
 141        if (p_hwfn->mcp_info) {
 142                struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
 143
 144                kfree(p_hwfn->mcp_info->mfw_mb_cur);
 145                kfree(p_hwfn->mcp_info->mfw_mb_shadow);
 146
 147                spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 148                list_for_each_entry_safe(p_cmd_elem,
 149                                         p_tmp,
 150                                         &p_hwfn->mcp_info->cmd_list, list) {
 151                        qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
 152                }
 153                spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 154        }
 155
 156        kfree(p_hwfn->mcp_info);
 157        p_hwfn->mcp_info = NULL;
 158
 159        return 0;
 160}
 161
 162/* Maximum of 1 sec to wait for the SHMEM ready indication */
 163#define QED_MCP_SHMEM_RDY_MAX_RETRIES   20
 164#define QED_MCP_SHMEM_RDY_ITER_MS       50
 165
 166static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 167{
 168        struct qed_mcp_info *p_info = p_hwfn->mcp_info;
 169        u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
 170        u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
 171        u32 drv_mb_offsize, mfw_mb_offsize;
 172        u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
 173
 174        p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
 175        if (!p_info->public_base) {
 176                DP_NOTICE(p_hwfn,
 177                          "The address of the MCP scratch-pad is not configured\n");
 178                return -EINVAL;
 179        }
 180
 181        p_info->public_base |= GRCBASE_MCP;
 182
 183        /* Get the MFW MB address and number of supported messages */
 184        mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
 185                                SECTION_OFFSIZE_ADDR(p_info->public_base,
 186                                                     PUBLIC_MFW_MB));
 187        p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
 188        p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
 189                                            p_info->mfw_mb_addr +
 190                                            offsetof(struct public_mfw_mb,
 191                                                     sup_msgs));
 192
 193        /* The driver can notify that there was an MCP reset, and might read the
 194         * SHMEM values before the MFW has completed initializing them.
 195         * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
 196         * data ready indication.
 197         */
 198        while (!p_info->mfw_mb_length && --cnt) {
 199                msleep(msec);
 200                p_info->mfw_mb_length =
 201                        (u16)qed_rd(p_hwfn, p_ptt,
 202                                    p_info->mfw_mb_addr +
 203                                    offsetof(struct public_mfw_mb, sup_msgs));
 204        }
 205
 206        if (!cnt) {
 207                DP_NOTICE(p_hwfn,
 208                          "Failed to get the SHMEM ready notification after %d msec\n",
 209                          QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
 210                return -EBUSY;
 211        }
 212
 213        /* Calculate the driver and MFW mailbox address */
 214        drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
 215                                SECTION_OFFSIZE_ADDR(p_info->public_base,
 216                                                     PUBLIC_DRV_MB));
 217        p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
 218        DP_VERBOSE(p_hwfn, QED_MSG_SP,
 219                   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
 220                   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
 221
 222        /* Get the current driver mailbox sequence before sending
 223         * the first command
 224         */
 225        p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
 226                             DRV_MSG_SEQ_NUMBER_MASK;
 227
 228        /* Get current FW pulse sequence */
 229        p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
 230                                DRV_PULSE_SEQ_MASK;
 231
 232        p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
 233
 234        return 0;
 235}
 236
 237int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 238{
 239        struct qed_mcp_info *p_info;
 240        u32 size;
 241
 242        /* Allocate mcp_info structure */
 243        p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
 244        if (!p_hwfn->mcp_info)
 245                goto err;
 246        p_info = p_hwfn->mcp_info;
 247
 248        /* Initialize the MFW spinlock */
 249        spin_lock_init(&p_info->cmd_lock);
 250        spin_lock_init(&p_info->link_lock);
 251
 252        INIT_LIST_HEAD(&p_info->cmd_list);
 253
 254        if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
 255                DP_NOTICE(p_hwfn, "MCP is not initialized\n");
 256                /* Do not free mcp_info here, since public_base indicate that
 257                 * the MCP is not initialized
 258                 */
 259                return 0;
 260        }
 261
 262        size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
 263        p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
 264        p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
 265        if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
 266                goto err;
 267
 268        return 0;
 269
 270err:
 271        qed_mcp_free(p_hwfn);
 272        return -ENOMEM;
 273}
 274
 275static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
 276                                   struct qed_ptt *p_ptt)
 277{
 278        u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
 279
 280        /* Use MCP history register to check if MCP reset occurred between init
 281         * time and now.
 282         */
 283        if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
 284                DP_VERBOSE(p_hwfn,
 285                           QED_MSG_SP,
 286                           "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
 287                           p_hwfn->mcp_info->mcp_hist, generic_por_0);
 288
 289                qed_load_mcp_offsets(p_hwfn, p_ptt);
 290                qed_mcp_cmd_port_init(p_hwfn, p_ptt);
 291        }
 292}
 293
 294int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 295{
 296        u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
 297        int rc = 0;
 298
 299        if (p_hwfn->mcp_info->b_block_cmd) {
 300                DP_NOTICE(p_hwfn,
 301                          "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
 302                return -EBUSY;
 303        }
 304
 305        /* Ensure that only a single thread is accessing the mailbox */
 306        spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 307
 308        org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
 309
 310        /* Set drv command along with the updated sequence */
 311        qed_mcp_reread_offsets(p_hwfn, p_ptt);
 312        seq = ++p_hwfn->mcp_info->drv_mb_seq;
 313        DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
 314
 315        do {
 316                /* Wait for MFW response */
 317                udelay(delay);
 318                /* Give the FW up to 500 second (50*1000*10usec) */
 319        } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
 320                                              MISCS_REG_GENERIC_POR_0)) &&
 321                 (cnt++ < QED_MCP_RESET_RETRIES));
 322
 323        if (org_mcp_reset_seq !=
 324            qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
 325                DP_VERBOSE(p_hwfn, QED_MSG_SP,
 326                           "MCP was reset after %d usec\n", cnt * delay);
 327        } else {
 328                DP_ERR(p_hwfn, "Failed to reset MCP\n");
 329                rc = -EAGAIN;
 330        }
 331
 332        spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 333
 334        return rc;
 335}
 336
 337/* Must be called while cmd_lock is acquired */
 338static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
 339{
 340        struct qed_mcp_cmd_elem *p_cmd_elem;
 341
 342        /* There is at most one pending command at a certain time, and if it
 343         * exists - it is placed at the HEAD of the list.
 344         */
 345        if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
 346                p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
 347                                              struct qed_mcp_cmd_elem, list);
 348                return !p_cmd_elem->b_is_completed;
 349        }
 350
 351        return false;
 352}
 353
 354/* Must be called while cmd_lock is acquired */
 355static int
 356qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 357{
 358        struct qed_mcp_mb_params *p_mb_params;
 359        struct qed_mcp_cmd_elem *p_cmd_elem;
 360        u32 mcp_resp;
 361        u16 seq_num;
 362
 363        mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
 364        seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
 365
 366        /* Return if no new non-handled response has been received */
 367        if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
 368                return -EAGAIN;
 369
 370        p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
 371        if (!p_cmd_elem) {
 372                DP_ERR(p_hwfn,
 373                       "Failed to find a pending mailbox cmd that expects sequence number %d\n",
 374                       seq_num);
 375                return -EINVAL;
 376        }
 377
 378        p_mb_params = p_cmd_elem->p_mb_params;
 379
 380        /* Get the MFW response along with the sequence number */
 381        p_mb_params->mcp_resp = mcp_resp;
 382
 383        /* Get the MFW param */
 384        p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
 385
 386        /* Get the union data */
 387        if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
 388                u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
 389                                      offsetof(struct public_drv_mb,
 390                                               union_data);
 391                qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
 392                                union_data_addr, p_mb_params->data_dst_size);
 393        }
 394
 395        p_cmd_elem->b_is_completed = true;
 396
 397        return 0;
 398}
 399
 400/* Must be called while cmd_lock is acquired */
 401static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
 402                                    struct qed_ptt *p_ptt,
 403                                    struct qed_mcp_mb_params *p_mb_params,
 404                                    u16 seq_num)
 405{
 406        union drv_union_data union_data;
 407        u32 union_data_addr;
 408
 409        /* Set the union data */
 410        union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
 411                          offsetof(struct public_drv_mb, union_data);
 412        memset(&union_data, 0, sizeof(union_data));
 413        if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
 414                memcpy(&union_data, p_mb_params->p_data_src,
 415                       p_mb_params->data_src_size);
 416        qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
 417                      sizeof(union_data));
 418
 419        /* Set the drv param */
 420        DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
 421
 422        /* Set the drv command along with the sequence number */
 423        DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
 424
 425        DP_VERBOSE(p_hwfn, QED_MSG_SP,
 426                   "MFW mailbox: command 0x%08x param 0x%08x\n",
 427                   (p_mb_params->cmd | seq_num), p_mb_params->param);
 428}
 429
 430static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
 431{
 432        p_hwfn->mcp_info->b_block_cmd = block_cmd;
 433
 434        DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
 435                block_cmd ? "Block" : "Unblock");
 436}
 437
 438static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
 439                                   struct qed_ptt *p_ptt)
 440{
 441        u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
 442        u32 delay = QED_MCP_RESP_ITER_US;
 443
 444        cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
 445        cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
 446        cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
 447        udelay(delay);
 448        cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
 449        udelay(delay);
 450        cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
 451
 452        DP_NOTICE(p_hwfn,
 453                  "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
 454                  cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
 455}
 456
 457static int
 458_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
 459                       struct qed_ptt *p_ptt,
 460                       struct qed_mcp_mb_params *p_mb_params,
 461                       u32 max_retries, u32 usecs)
 462{
 463        u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
 464        struct qed_mcp_cmd_elem *p_cmd_elem;
 465        u16 seq_num;
 466        int rc = 0;
 467
 468        /* Wait until the mailbox is non-occupied */
 469        do {
 470                /* Exit the loop if there is no pending command, or if the
 471                 * pending command is completed during this iteration.
 472                 * The spinlock stays locked until the command is sent.
 473                 */
 474
 475                spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 476
 477                if (!qed_mcp_has_pending_cmd(p_hwfn))
 478                        break;
 479
 480                rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
 481                if (!rc)
 482                        break;
 483                else if (rc != -EAGAIN)
 484                        goto err;
 485
 486                spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 487
 488                if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
 489                        msleep(msecs);
 490                else
 491                        udelay(usecs);
 492        } while (++cnt < max_retries);
 493
 494        if (cnt >= max_retries) {
 495                DP_NOTICE(p_hwfn,
 496                          "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
 497                          p_mb_params->cmd, p_mb_params->param);
 498                return -EAGAIN;
 499        }
 500
 501        /* Send the mailbox command */
 502        qed_mcp_reread_offsets(p_hwfn, p_ptt);
 503        seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
 504        p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
 505        if (!p_cmd_elem) {
 506                rc = -ENOMEM;
 507                goto err;
 508        }
 509
 510        __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
 511        spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 512
 513        /* Wait for the MFW response */
 514        do {
 515                /* Exit the loop if the command is already completed, or if the
 516                 * command is completed during this iteration.
 517                 * The spinlock stays locked until the list element is removed.
 518                 */
 519
 520                if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
 521                        msleep(msecs);
 522                else
 523                        udelay(usecs);
 524
 525                spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 526
 527                if (p_cmd_elem->b_is_completed)
 528                        break;
 529
 530                rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
 531                if (!rc)
 532                        break;
 533                else if (rc != -EAGAIN)
 534                        goto err;
 535
 536                spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 537        } while (++cnt < max_retries);
 538
 539        if (cnt >= max_retries) {
 540                DP_NOTICE(p_hwfn,
 541                          "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
 542                          p_mb_params->cmd, p_mb_params->param);
 543                qed_mcp_print_cpu_info(p_hwfn, p_ptt);
 544
 545                spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 546                qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
 547                spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 548
 549                if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
 550                        qed_mcp_cmd_set_blocking(p_hwfn, true);
 551
 552                qed_hw_err_notify(p_hwfn, p_ptt,
 553                                  QED_HW_ERR_MFW_RESP_FAIL, NULL);
 554                return -EAGAIN;
 555        }
 556
 557        qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
 558        spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 559
 560        DP_VERBOSE(p_hwfn,
 561                   QED_MSG_SP,
 562                   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
 563                   p_mb_params->mcp_resp,
 564                   p_mb_params->mcp_param,
 565                   (cnt * usecs) / 1000, (cnt * usecs) % 1000);
 566
 567        /* Clear the sequence number from the MFW response */
 568        p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
 569
 570        return 0;
 571
 572err:
 573        spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 574        return rc;
 575}
 576
 577static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
 578                                 struct qed_ptt *p_ptt,
 579                                 struct qed_mcp_mb_params *p_mb_params)
 580{
 581        size_t union_data_size = sizeof(union drv_union_data);
 582        u32 max_retries = QED_DRV_MB_MAX_RETRIES;
 583        u32 usecs = QED_MCP_RESP_ITER_US;
 584
 585        /* MCP not initialized */
 586        if (!qed_mcp_is_init(p_hwfn)) {
 587                DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
 588                return -EBUSY;
 589        }
 590
 591        if (p_hwfn->mcp_info->b_block_cmd) {
 592                DP_NOTICE(p_hwfn,
 593                          "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
 594                          p_mb_params->cmd, p_mb_params->param);
 595                return -EBUSY;
 596        }
 597
 598        if (p_mb_params->data_src_size > union_data_size ||
 599            p_mb_params->data_dst_size > union_data_size) {
 600                DP_ERR(p_hwfn,
 601                       "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
 602                       p_mb_params->data_src_size,
 603                       p_mb_params->data_dst_size, union_data_size);
 604                return -EINVAL;
 605        }
 606
 607        if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
 608                max_retries = DIV_ROUND_UP(max_retries, 1000);
 609                usecs *= 1000;
 610        }
 611
 612        return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
 613                                      usecs);
 614}
 615
 616int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
 617                struct qed_ptt *p_ptt,
 618                u32 cmd,
 619                u32 param,
 620                u32 *o_mcp_resp,
 621                u32 *o_mcp_param)
 622{
 623        struct qed_mcp_mb_params mb_params;
 624        int rc;
 625
 626        memset(&mb_params, 0, sizeof(mb_params));
 627        mb_params.cmd = cmd;
 628        mb_params.param = param;
 629
 630        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
 631        if (rc)
 632                return rc;
 633
 634        *o_mcp_resp = mb_params.mcp_resp;
 635        *o_mcp_param = mb_params.mcp_param;
 636
 637        return 0;
 638}
 639
 640static int
 641qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
 642                   struct qed_ptt *p_ptt,
 643                   u32 cmd,
 644                   u32 param,
 645                   u32 *o_mcp_resp,
 646                   u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
 647{
 648        struct qed_mcp_mb_params mb_params;
 649        int rc;
 650
 651        memset(&mb_params, 0, sizeof(mb_params));
 652        mb_params.cmd = cmd;
 653        mb_params.param = param;
 654        mb_params.p_data_src = i_buf;
 655        mb_params.data_src_size = (u8)i_txn_size;
 656        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
 657        if (rc)
 658                return rc;
 659
 660        *o_mcp_resp = mb_params.mcp_resp;
 661        *o_mcp_param = mb_params.mcp_param;
 662
 663        /* nvm_info needs to be updated */
 664        p_hwfn->nvm_info.valid = false;
 665
 666        return 0;
 667}
 668
 669int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
 670                       struct qed_ptt *p_ptt,
 671                       u32 cmd,
 672                       u32 param,
 673                       u32 *o_mcp_resp,
 674                       u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
 675{
 676        struct qed_mcp_mb_params mb_params;
 677        u8 raw_data[MCP_DRV_NVM_BUF_LEN];
 678        int rc;
 679
 680        memset(&mb_params, 0, sizeof(mb_params));
 681        mb_params.cmd = cmd;
 682        mb_params.param = param;
 683        mb_params.p_data_dst = raw_data;
 684
 685        /* Use the maximal value since the actual one is part of the response */
 686        mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
 687
 688        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
 689        if (rc)
 690                return rc;
 691
 692        *o_mcp_resp = mb_params.mcp_resp;
 693        *o_mcp_param = mb_params.mcp_param;
 694
 695        *o_txn_size = *o_mcp_param;
 696        memcpy(o_buf, raw_data, *o_txn_size);
 697
 698        return 0;
 699}
 700
 701static bool
 702qed_mcp_can_force_load(u8 drv_role,
 703                       u8 exist_drv_role,
 704                       enum qed_override_force_load override_force_load)
 705{
 706        bool can_force_load = false;
 707
 708        switch (override_force_load) {
 709        case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
 710                can_force_load = true;
 711                break;
 712        case QED_OVERRIDE_FORCE_LOAD_NEVER:
 713                can_force_load = false;
 714                break;
 715        default:
 716                can_force_load = (drv_role == DRV_ROLE_OS &&
 717                                  exist_drv_role == DRV_ROLE_PREBOOT) ||
 718                                 (drv_role == DRV_ROLE_KDUMP &&
 719                                  exist_drv_role == DRV_ROLE_OS);
 720                break;
 721        }
 722
 723        return can_force_load;
 724}
 725
 726static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
 727                                   struct qed_ptt *p_ptt)
 728{
 729        u32 resp = 0, param = 0;
 730        int rc;
 731
 732        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
 733                         &resp, &param);
 734        if (rc)
 735                DP_NOTICE(p_hwfn,
 736                          "Failed to send cancel load request, rc = %d\n", rc);
 737
 738        return rc;
 739}
 740
 741#define CONFIG_QEDE_BITMAP_IDX          BIT(0)
 742#define CONFIG_QED_SRIOV_BITMAP_IDX     BIT(1)
 743#define CONFIG_QEDR_BITMAP_IDX          BIT(2)
 744#define CONFIG_QEDF_BITMAP_IDX          BIT(4)
 745#define CONFIG_QEDI_BITMAP_IDX          BIT(5)
 746#define CONFIG_QED_LL2_BITMAP_IDX       BIT(6)
 747
 748static u32 qed_get_config_bitmap(void)
 749{
 750        u32 config_bitmap = 0x0;
 751
 752        if (IS_ENABLED(CONFIG_QEDE))
 753                config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
 754
 755        if (IS_ENABLED(CONFIG_QED_SRIOV))
 756                config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
 757
 758        if (IS_ENABLED(CONFIG_QED_RDMA))
 759                config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
 760
 761        if (IS_ENABLED(CONFIG_QED_FCOE))
 762                config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
 763
 764        if (IS_ENABLED(CONFIG_QED_ISCSI))
 765                config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
 766
 767        if (IS_ENABLED(CONFIG_QED_LL2))
 768                config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
 769
 770        return config_bitmap;
 771}
 772
 773struct qed_load_req_in_params {
 774        u8 hsi_ver;
 775#define QED_LOAD_REQ_HSI_VER_DEFAULT    0
 776#define QED_LOAD_REQ_HSI_VER_1          1
 777        u32 drv_ver_0;
 778        u32 drv_ver_1;
 779        u32 fw_ver;
 780        u8 drv_role;
 781        u8 timeout_val;
 782        u8 force_cmd;
 783        bool avoid_eng_reset;
 784};
 785
 786struct qed_load_req_out_params {
 787        u32 load_code;
 788        u32 exist_drv_ver_0;
 789        u32 exist_drv_ver_1;
 790        u32 exist_fw_ver;
 791        u8 exist_drv_role;
 792        u8 mfw_hsi_ver;
 793        bool drv_exists;
 794};
 795
 796static int
 797__qed_mcp_load_req(struct qed_hwfn *p_hwfn,
 798                   struct qed_ptt *p_ptt,
 799                   struct qed_load_req_in_params *p_in_params,
 800                   struct qed_load_req_out_params *p_out_params)
 801{
 802        struct qed_mcp_mb_params mb_params;
 803        struct load_req_stc load_req;
 804        struct load_rsp_stc load_rsp;
 805        u32 hsi_ver;
 806        int rc;
 807
 808        memset(&load_req, 0, sizeof(load_req));
 809        load_req.drv_ver_0 = p_in_params->drv_ver_0;
 810        load_req.drv_ver_1 = p_in_params->drv_ver_1;
 811        load_req.fw_ver = p_in_params->fw_ver;
 812        QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
 813        QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
 814                          p_in_params->timeout_val);
 815        QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
 816                          p_in_params->force_cmd);
 817        QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
 818                          p_in_params->avoid_eng_reset);
 819
 820        hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
 821                  DRV_ID_MCP_HSI_VER_CURRENT :
 822                  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
 823
 824        memset(&mb_params, 0, sizeof(mb_params));
 825        mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
 826        mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
 827        mb_params.p_data_src = &load_req;
 828        mb_params.data_src_size = sizeof(load_req);
 829        mb_params.p_data_dst = &load_rsp;
 830        mb_params.data_dst_size = sizeof(load_rsp);
 831        mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
 832
 833        DP_VERBOSE(p_hwfn, QED_MSG_SP,
 834                   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
 835                   mb_params.param,
 836                   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
 837                   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
 838                   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
 839                   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
 840
 841        if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
 842                DP_VERBOSE(p_hwfn, QED_MSG_SP,
 843                           "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
 844                           load_req.drv_ver_0,
 845                           load_req.drv_ver_1,
 846                           load_req.fw_ver,
 847                           load_req.misc0,
 848                           QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
 849                           QED_MFW_GET_FIELD(load_req.misc0,
 850                                             LOAD_REQ_LOCK_TO),
 851                           QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
 852                           QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
 853        }
 854
 855        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
 856        if (rc) {
 857                DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
 858                return rc;
 859        }
 860
 861        DP_VERBOSE(p_hwfn, QED_MSG_SP,
 862                   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
 863        p_out_params->load_code = mb_params.mcp_resp;
 864
 865        if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
 866            p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
 867                DP_VERBOSE(p_hwfn,
 868                           QED_MSG_SP,
 869                           "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
 870                           load_rsp.drv_ver_0,
 871                           load_rsp.drv_ver_1,
 872                           load_rsp.fw_ver,
 873                           load_rsp.misc0,
 874                           QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
 875                           QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
 876                           QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
 877
 878                p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
 879                p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
 880                p_out_params->exist_fw_ver = load_rsp.fw_ver;
 881                p_out_params->exist_drv_role =
 882                    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
 883                p_out_params->mfw_hsi_ver =
 884                    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
 885                p_out_params->drv_exists =
 886                    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
 887                    LOAD_RSP_FLAGS0_DRV_EXISTS;
 888        }
 889
 890        return 0;
 891}
 892
 893static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
 894                                  enum qed_drv_role drv_role,
 895                                  u8 *p_mfw_drv_role)
 896{
 897        switch (drv_role) {
 898        case QED_DRV_ROLE_OS:
 899                *p_mfw_drv_role = DRV_ROLE_OS;
 900                break;
 901        case QED_DRV_ROLE_KDUMP:
 902                *p_mfw_drv_role = DRV_ROLE_KDUMP;
 903                break;
 904        default:
 905                DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
 906                return -EINVAL;
 907        }
 908
 909        return 0;
 910}
 911
 912enum qed_load_req_force {
 913        QED_LOAD_REQ_FORCE_NONE,
 914        QED_LOAD_REQ_FORCE_PF,
 915        QED_LOAD_REQ_FORCE_ALL,
 916};
 917
 918static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
 919
 920                                  enum qed_load_req_force force_cmd,
 921                                  u8 *p_mfw_force_cmd)
 922{
 923        switch (force_cmd) {
 924        case QED_LOAD_REQ_FORCE_NONE:
 925                *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
 926                break;
 927        case QED_LOAD_REQ_FORCE_PF:
 928                *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
 929                break;
 930        case QED_LOAD_REQ_FORCE_ALL:
 931                *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
 932                break;
 933        }
 934}
 935
 936int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
 937                     struct qed_ptt *p_ptt,
 938                     struct qed_load_req_params *p_params)
 939{
 940        struct qed_load_req_out_params out_params;
 941        struct qed_load_req_in_params in_params;
 942        u8 mfw_drv_role, mfw_force_cmd;
 943        int rc;
 944
 945        memset(&in_params, 0, sizeof(in_params));
 946        in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
 947        in_params.drv_ver_1 = qed_get_config_bitmap();
 948        in_params.fw_ver = STORM_FW_VERSION;
 949        rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
 950        if (rc)
 951                return rc;
 952
 953        in_params.drv_role = mfw_drv_role;
 954        in_params.timeout_val = p_params->timeout_val;
 955        qed_get_mfw_force_cmd(p_hwfn,
 956                              QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
 957
 958        in_params.force_cmd = mfw_force_cmd;
 959        in_params.avoid_eng_reset = p_params->avoid_eng_reset;
 960
 961        memset(&out_params, 0, sizeof(out_params));
 962        rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
 963        if (rc)
 964                return rc;
 965
 966        /* First handle cases where another load request should/might be sent:
 967         * - MFW expects the old interface [HSI version = 1]
 968         * - MFW responds that a force load request is required
 969         */
 970        if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
 971                DP_INFO(p_hwfn,
 972                        "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
 973
 974                in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
 975                memset(&out_params, 0, sizeof(out_params));
 976                rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
 977                if (rc)
 978                        return rc;
 979        } else if (out_params.load_code ==
 980                   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
 981                if (qed_mcp_can_force_load(in_params.drv_role,
 982                                           out_params.exist_drv_role,
 983                                           p_params->override_force_load)) {
 984                        DP_INFO(p_hwfn,
 985                                "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
 986                                in_params.drv_role, in_params.fw_ver,
 987                                in_params.drv_ver_0, in_params.drv_ver_1,
 988                                out_params.exist_drv_role,
 989                                out_params.exist_fw_ver,
 990                                out_params.exist_drv_ver_0,
 991                                out_params.exist_drv_ver_1);
 992
 993                        qed_get_mfw_force_cmd(p_hwfn,
 994                                              QED_LOAD_REQ_FORCE_ALL,
 995                                              &mfw_force_cmd);
 996
 997                        in_params.force_cmd = mfw_force_cmd;
 998                        memset(&out_params, 0, sizeof(out_params));
 999                        rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
1000                                                &out_params);
1001                        if (rc)
1002                                return rc;
1003                } else {
1004                        DP_NOTICE(p_hwfn,
1005                                  "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1006                                  in_params.drv_role, in_params.fw_ver,
1007                                  in_params.drv_ver_0, in_params.drv_ver_1,
1008                                  out_params.exist_drv_role,
1009                                  out_params.exist_fw_ver,
1010                                  out_params.exist_drv_ver_0,
1011                                  out_params.exist_drv_ver_1);
1012                        DP_NOTICE(p_hwfn,
1013                                  "Avoid sending a force load request to prevent disruption of active PFs\n");
1014
1015                        qed_mcp_cancel_load_req(p_hwfn, p_ptt);
1016                        return -EBUSY;
1017                }
1018        }
1019
1020        /* Now handle the other types of responses.
1021         * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1022         * expected here after the additional revised load requests were sent.
1023         */
1024        switch (out_params.load_code) {
1025        case FW_MSG_CODE_DRV_LOAD_ENGINE:
1026        case FW_MSG_CODE_DRV_LOAD_PORT:
1027        case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1028                if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
1029                    out_params.drv_exists) {
1030                        /* The role and fw/driver version match, but the PF is
1031                         * already loaded and has not been unloaded gracefully.
1032                         */
1033                        DP_NOTICE(p_hwfn,
1034                                  "PF is already loaded\n");
1035                        return -EINVAL;
1036                }
1037                break;
1038        default:
1039                DP_NOTICE(p_hwfn,
1040                          "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1041                          out_params.load_code);
1042                return -EBUSY;
1043        }
1044
1045        p_params->load_code = out_params.load_code;
1046
1047        return 0;
1048}
1049
1050int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1051{
1052        u32 resp = 0, param = 0;
1053        int rc;
1054
1055        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1056                         &param);
1057        if (rc) {
1058                DP_NOTICE(p_hwfn,
1059                          "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1060                return rc;
1061        }
1062
1063        /* Check if there is a DID mismatch between nvm-cfg/efuse */
1064        if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1065                DP_NOTICE(p_hwfn,
1066                          "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1067
1068        return 0;
1069}
1070
1071int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1072{
1073        struct qed_mcp_mb_params mb_params;
1074        u32 wol_param;
1075
1076        switch (p_hwfn->cdev->wol_config) {
1077        case QED_OV_WOL_DISABLED:
1078                wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1079                break;
1080        case QED_OV_WOL_ENABLED:
1081                wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1082                break;
1083        default:
1084                DP_NOTICE(p_hwfn,
1085                          "Unknown WoL configuration %02x\n",
1086                          p_hwfn->cdev->wol_config);
1087                fallthrough;
1088        case QED_OV_WOL_DEFAULT:
1089                wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1090        }
1091
1092        memset(&mb_params, 0, sizeof(mb_params));
1093        mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1094        mb_params.param = wol_param;
1095        mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1096
1097        return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1098}
1099
1100int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1101{
1102        struct qed_mcp_mb_params mb_params;
1103        struct mcp_mac wol_mac;
1104
1105        memset(&mb_params, 0, sizeof(mb_params));
1106        mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1107
1108        /* Set the primary MAC if WoL is enabled */
1109        if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1110                u8 *p_mac = p_hwfn->cdev->wol_mac;
1111
1112                memset(&wol_mac, 0, sizeof(wol_mac));
1113                wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1114                wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1115                                    p_mac[4] << 8 | p_mac[5];
1116
1117                DP_VERBOSE(p_hwfn,
1118                           (QED_MSG_SP | NETIF_MSG_IFDOWN),
1119                           "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1120                           p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1121
1122                mb_params.p_data_src = &wol_mac;
1123                mb_params.data_src_size = sizeof(wol_mac);
1124        }
1125
1126        return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1127}
1128
1129static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1130                                  struct qed_ptt *p_ptt)
1131{
1132        u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1133                                        PUBLIC_PATH);
1134        u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1135        u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1136                                     QED_PATH_ID(p_hwfn));
1137        u32 disabled_vfs[VF_MAX_STATIC / 32];
1138        int i;
1139
1140        DP_VERBOSE(p_hwfn,
1141                   QED_MSG_SP,
1142                   "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1143                   mfw_path_offsize, path_addr);
1144
1145        for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1146                disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1147                                         path_addr +
1148                                         offsetof(struct public_path,
1149                                                  mcp_vf_disabled) +
1150                                         sizeof(u32) * i);
1151                DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1152                           "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1153                           i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1154        }
1155
1156        if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1157                qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1158}
1159
1160int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1161                       struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1162{
1163        u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1164                                        PUBLIC_FUNC);
1165        u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1166        u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1167                                     MCP_PF_ID(p_hwfn));
1168        struct qed_mcp_mb_params mb_params;
1169        int rc;
1170        int i;
1171
1172        for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1173                DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1174                           "Acking VFs [%08x,...,%08x] - %08x\n",
1175                           i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1176
1177        memset(&mb_params, 0, sizeof(mb_params));
1178        mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1179        mb_params.p_data_src = vfs_to_ack;
1180        mb_params.data_src_size = VF_MAX_STATIC / 8;
1181        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1182        if (rc) {
1183                DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1184                return -EBUSY;
1185        }
1186
1187        /* Clear the ACK bits */
1188        for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1189                qed_wr(p_hwfn, p_ptt,
1190                       func_addr +
1191                       offsetof(struct public_func, drv_ack_vf_disabled) +
1192                       i * sizeof(u32), 0);
1193
1194        return rc;
1195}
1196
1197static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1198                                              struct qed_ptt *p_ptt)
1199{
1200        u32 transceiver_state;
1201
1202        transceiver_state = qed_rd(p_hwfn, p_ptt,
1203                                   p_hwfn->mcp_info->port_addr +
1204                                   offsetof(struct public_port,
1205                                            transceiver_data));
1206
1207        DP_VERBOSE(p_hwfn,
1208                   (NETIF_MSG_HW | QED_MSG_SP),
1209                   "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1210                   transceiver_state,
1211                   (u32)(p_hwfn->mcp_info->port_addr +
1212                          offsetof(struct public_port, transceiver_data)));
1213
1214        transceiver_state = GET_FIELD(transceiver_state,
1215                                      ETH_TRANSCEIVER_STATE);
1216
1217        if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1218                DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1219        else
1220                DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1221}
1222
1223static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1224                                    struct qed_ptt *p_ptt,
1225                                    struct qed_mcp_link_state *p_link)
1226{
1227        u32 eee_status, val;
1228
1229        p_link->eee_adv_caps = 0;
1230        p_link->eee_lp_adv_caps = 0;
1231        eee_status = qed_rd(p_hwfn,
1232                            p_ptt,
1233                            p_hwfn->mcp_info->port_addr +
1234                            offsetof(struct public_port, eee_status));
1235        p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1236        val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1237        if (val & EEE_1G_ADV)
1238                p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1239        if (val & EEE_10G_ADV)
1240                p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1241        val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1242        if (val & EEE_1G_ADV)
1243                p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1244        if (val & EEE_10G_ADV)
1245                p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1246}
1247
1248static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1249                                  struct qed_ptt *p_ptt,
1250                                  struct public_func *p_data, int pfid)
1251{
1252        u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1253                                        PUBLIC_FUNC);
1254        u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1255        u32 func_addr;
1256        u32 i, size;
1257
1258        func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1259        memset(p_data, 0, sizeof(*p_data));
1260
1261        size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1262        for (i = 0; i < size / sizeof(u32); i++)
1263                ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1264                                            func_addr + (i << 2));
1265        return size;
1266}
1267
1268static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1269                                  struct public_func *p_shmem_info)
1270{
1271        struct qed_mcp_function_info *p_info;
1272
1273        p_info = &p_hwfn->mcp_info->func_info;
1274
1275        p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
1276                                                  FUNC_MF_CFG_MIN_BW);
1277        if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1278                DP_INFO(p_hwfn,
1279                        "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1280                        p_info->bandwidth_min);
1281                p_info->bandwidth_min = 1;
1282        }
1283
1284        p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
1285                                                  FUNC_MF_CFG_MAX_BW);
1286        if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1287                DP_INFO(p_hwfn,
1288                        "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1289                        p_info->bandwidth_max);
1290                p_info->bandwidth_max = 100;
1291        }
1292}
1293
1294static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1295                                       struct qed_ptt *p_ptt, bool b_reset)
1296{
1297        struct qed_mcp_link_state *p_link;
1298        u8 max_bw, min_bw;
1299        u32 status = 0;
1300
1301        /* Prevent SW/attentions from doing this at the same time */
1302        spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1303
1304        p_link = &p_hwfn->mcp_info->link_output;
1305        memset(p_link, 0, sizeof(*p_link));
1306        if (!b_reset) {
1307                status = qed_rd(p_hwfn, p_ptt,
1308                                p_hwfn->mcp_info->port_addr +
1309                                offsetof(struct public_port, link_status));
1310                DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1311                           "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1312                           status,
1313                           (u32)(p_hwfn->mcp_info->port_addr +
1314                                 offsetof(struct public_port, link_status)));
1315        } else {
1316                DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1317                           "Resetting link indications\n");
1318                goto out;
1319        }
1320
1321        if (p_hwfn->b_drv_link_init) {
1322                /* Link indication with modern MFW arrives as per-PF
1323                 * indication.
1324                 */
1325                if (p_hwfn->mcp_info->capabilities &
1326                    FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1327                        struct public_func shmem_info;
1328
1329                        qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1330                                               MCP_PF_ID(p_hwfn));
1331                        p_link->link_up = !!(shmem_info.status &
1332                                             FUNC_STATUS_VIRTUAL_LINK_UP);
1333                        qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1334                        DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1335                                   "Virtual link_up = %d\n", p_link->link_up);
1336                } else {
1337                        p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1338                        DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1339                                   "Physical link_up = %d\n", p_link->link_up);
1340                }
1341        } else {
1342                p_link->link_up = false;
1343        }
1344
1345        p_link->full_duplex = true;
1346        switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1347        case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1348                p_link->speed = 100000;
1349                break;
1350        case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1351                p_link->speed = 50000;
1352                break;
1353        case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1354                p_link->speed = 40000;
1355                break;
1356        case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1357                p_link->speed = 25000;
1358                break;
1359        case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1360                p_link->speed = 20000;
1361                break;
1362        case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1363                p_link->speed = 10000;
1364                break;
1365        case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1366                p_link->full_duplex = false;
1367                fallthrough;
1368        case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1369                p_link->speed = 1000;
1370                break;
1371        default:
1372                p_link->speed = 0;
1373                p_link->link_up = 0;
1374        }
1375
1376        if (p_link->link_up && p_link->speed)
1377                p_link->line_speed = p_link->speed;
1378        else
1379                p_link->line_speed = 0;
1380
1381        max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1382        min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1383
1384        /* Max bandwidth configuration */
1385        __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1386
1387        /* Min bandwidth configuration */
1388        __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1389        qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1390                                            p_link->min_pf_rate);
1391
1392        p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1393        p_link->an_complete = !!(status &
1394                                 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1395        p_link->parallel_detection = !!(status &
1396                                        LINK_STATUS_PARALLEL_DETECTION_USED);
1397        p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1398
1399        p_link->partner_adv_speed |=
1400                (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1401                QED_LINK_PARTNER_SPEED_1G_FD : 0;
1402        p_link->partner_adv_speed |=
1403                (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1404                QED_LINK_PARTNER_SPEED_1G_HD : 0;
1405        p_link->partner_adv_speed |=
1406                (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1407                QED_LINK_PARTNER_SPEED_10G : 0;
1408        p_link->partner_adv_speed |=
1409                (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1410                QED_LINK_PARTNER_SPEED_20G : 0;
1411        p_link->partner_adv_speed |=
1412                (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1413                QED_LINK_PARTNER_SPEED_25G : 0;
1414        p_link->partner_adv_speed |=
1415                (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1416                QED_LINK_PARTNER_SPEED_40G : 0;
1417        p_link->partner_adv_speed |=
1418                (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1419                QED_LINK_PARTNER_SPEED_50G : 0;
1420        p_link->partner_adv_speed |=
1421                (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1422                QED_LINK_PARTNER_SPEED_100G : 0;
1423
1424        p_link->partner_tx_flow_ctrl_en =
1425                !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1426        p_link->partner_rx_flow_ctrl_en =
1427                !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1428
1429        switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1430        case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1431                p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1432                break;
1433        case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1434                p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1435                break;
1436        case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1437                p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1438                break;
1439        default:
1440                p_link->partner_adv_pause = 0;
1441        }
1442
1443        p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1444
1445        if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1446                qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1447
1448        if (p_hwfn->mcp_info->capabilities &
1449            FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
1450                switch (status & LINK_STATUS_FEC_MODE_MASK) {
1451                case LINK_STATUS_FEC_MODE_NONE:
1452                        p_link->fec_active = QED_FEC_MODE_NONE;
1453                        break;
1454                case LINK_STATUS_FEC_MODE_FIRECODE_CL74:
1455                        p_link->fec_active = QED_FEC_MODE_FIRECODE;
1456                        break;
1457                case LINK_STATUS_FEC_MODE_RS_CL91:
1458                        p_link->fec_active = QED_FEC_MODE_RS;
1459                        break;
1460                default:
1461                        p_link->fec_active = QED_FEC_MODE_AUTO;
1462                }
1463        } else {
1464                p_link->fec_active = QED_FEC_MODE_UNSUPPORTED;
1465        }
1466
1467        qed_link_update(p_hwfn, p_ptt);
1468out:
1469        spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1470}
1471
1472int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1473{
1474        struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1475        struct qed_mcp_mb_params mb_params;
1476        struct eth_phy_cfg phy_cfg;
1477        u32 cmd, fec_bit = 0;
1478        u32 val, ext_speed;
1479        int rc = 0;
1480
1481        /* Set the shmem configuration according to params */
1482        memset(&phy_cfg, 0, sizeof(phy_cfg));
1483        cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1484        if (!params->speed.autoneg)
1485                phy_cfg.speed = params->speed.forced_speed;
1486        phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1487        phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1488        phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1489        phy_cfg.adv_speed = params->speed.advertised_speeds;
1490        phy_cfg.loopback_mode = params->loopback_mode;
1491
1492        /* There are MFWs that share this capability regardless of whether
1493         * this is feasible or not. And given that at the very least adv_caps
1494         * would be set internally by qed, we want to make sure LFA would
1495         * still work.
1496         */
1497        if ((p_hwfn->mcp_info->capabilities &
1498             FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1499                phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1500                if (params->eee.tx_lpi_enable)
1501                        phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1502                if (params->eee.adv_caps & QED_EEE_1G_ADV)
1503                        phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1504                if (params->eee.adv_caps & QED_EEE_10G_ADV)
1505                        phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1506                phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1507                                    EEE_TX_TIMER_USEC_OFFSET) &
1508                                   EEE_TX_TIMER_USEC_MASK;
1509        }
1510
1511        if (p_hwfn->mcp_info->capabilities &
1512            FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
1513                if (params->fec & QED_FEC_MODE_NONE)
1514                        fec_bit |= FEC_FORCE_MODE_NONE;
1515                else if (params->fec & QED_FEC_MODE_FIRECODE)
1516                        fec_bit |= FEC_FORCE_MODE_FIRECODE;
1517                else if (params->fec & QED_FEC_MODE_RS)
1518                        fec_bit |= FEC_FORCE_MODE_RS;
1519                else if (params->fec & QED_FEC_MODE_AUTO)
1520                        fec_bit |= FEC_FORCE_MODE_AUTO;
1521
1522                SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit);
1523        }
1524
1525        if (p_hwfn->mcp_info->capabilities &
1526            FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
1527                ext_speed = 0;
1528                if (params->ext_speed.autoneg)
1529                        ext_speed |= ETH_EXT_SPEED_AN;
1530
1531                val = params->ext_speed.forced_speed;
1532                if (val & QED_EXT_SPEED_1G)
1533                        ext_speed |= ETH_EXT_SPEED_1G;
1534                if (val & QED_EXT_SPEED_10G)
1535                        ext_speed |= ETH_EXT_SPEED_10G;
1536                if (val & QED_EXT_SPEED_20G)
1537                        ext_speed |= ETH_EXT_SPEED_20G;
1538                if (val & QED_EXT_SPEED_25G)
1539                        ext_speed |= ETH_EXT_SPEED_25G;
1540                if (val & QED_EXT_SPEED_40G)
1541                        ext_speed |= ETH_EXT_SPEED_40G;
1542                if (val & QED_EXT_SPEED_50G_R)
1543                        ext_speed |= ETH_EXT_SPEED_50G_BASE_R;
1544                if (val & QED_EXT_SPEED_50G_R2)
1545                        ext_speed |= ETH_EXT_SPEED_50G_BASE_R2;
1546                if (val & QED_EXT_SPEED_100G_R2)
1547                        ext_speed |= ETH_EXT_SPEED_100G_BASE_R2;
1548                if (val & QED_EXT_SPEED_100G_R4)
1549                        ext_speed |= ETH_EXT_SPEED_100G_BASE_R4;
1550                if (val & QED_EXT_SPEED_100G_P4)
1551                        ext_speed |= ETH_EXT_SPEED_100G_BASE_P4;
1552
1553                SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED,
1554                              ext_speed);
1555
1556                ext_speed = 0;
1557
1558                val = params->ext_speed.advertised_speeds;
1559                if (val & QED_EXT_SPEED_MASK_1G)
1560                        ext_speed |= ETH_EXT_ADV_SPEED_1G;
1561                if (val & QED_EXT_SPEED_MASK_10G)
1562                        ext_speed |= ETH_EXT_ADV_SPEED_10G;
1563                if (val & QED_EXT_SPEED_MASK_20G)
1564                        ext_speed |= ETH_EXT_ADV_SPEED_20G;
1565                if (val & QED_EXT_SPEED_MASK_25G)
1566                        ext_speed |= ETH_EXT_ADV_SPEED_25G;
1567                if (val & QED_EXT_SPEED_MASK_40G)
1568                        ext_speed |= ETH_EXT_ADV_SPEED_40G;
1569                if (val & QED_EXT_SPEED_MASK_50G_R)
1570                        ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R;
1571                if (val & QED_EXT_SPEED_MASK_50G_R2)
1572                        ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2;
1573                if (val & QED_EXT_SPEED_MASK_100G_R2)
1574                        ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2;
1575                if (val & QED_EXT_SPEED_MASK_100G_R4)
1576                        ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4;
1577                if (val & QED_EXT_SPEED_MASK_100G_P4)
1578                        ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4;
1579
1580                phy_cfg.extended_speed |= ext_speed;
1581
1582                SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE,
1583                              params->ext_fec_mode);
1584        }
1585
1586        p_hwfn->b_drv_link_init = b_up;
1587
1588        if (b_up) {
1589                DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1590                           "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n",
1591                           phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1592                           phy_cfg.loopback_mode, phy_cfg.fec_mode,
1593                           phy_cfg.extended_speed);
1594        } else {
1595                DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n");
1596        }
1597
1598        memset(&mb_params, 0, sizeof(mb_params));
1599        mb_params.cmd = cmd;
1600        mb_params.p_data_src = &phy_cfg;
1601        mb_params.data_src_size = sizeof(phy_cfg);
1602        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1603
1604        /* if mcp fails to respond we must abort */
1605        if (rc) {
1606                DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1607                return rc;
1608        }
1609
1610        /* Mimic link-change attention, done for several reasons:
1611         *  - On reset, there's no guarantee MFW would trigger
1612         *    an attention.
1613         *  - On initialization, older MFWs might not indicate link change
1614         *    during LFA, so we'll never get an UP indication.
1615         */
1616        qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1617
1618        return 0;
1619}
1620
1621u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
1622                                 struct qed_ptt *p_ptt)
1623{
1624        u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1625
1626        if (IS_VF(p_hwfn->cdev))
1627                return -EINVAL;
1628
1629        path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1630                                                 PUBLIC_PATH);
1631        path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
1632        path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
1633
1634        proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
1635                               path_addr +
1636                               offsetof(struct public_path, process_kill)) &
1637                        PROCESS_KILL_COUNTER_MASK;
1638
1639        return proc_kill_cnt;
1640}
1641
1642static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
1643                                        struct qed_ptt *p_ptt)
1644{
1645        struct qed_dev *cdev = p_hwfn->cdev;
1646        u32 proc_kill_cnt;
1647
1648        /* Prevent possible attentions/interrupts during the recovery handling
1649         * and till its load phase, during which they will be re-enabled.
1650         */
1651        qed_int_igu_disable_int(p_hwfn, p_ptt);
1652
1653        DP_NOTICE(p_hwfn, "Received a process kill indication\n");
1654
1655        /* The following operations should be done once, and thus in CMT mode
1656         * are carried out by only the first HW function.
1657         */
1658        if (p_hwfn != QED_LEADING_HWFN(cdev))
1659                return;
1660
1661        if (cdev->recov_in_prog) {
1662                DP_NOTICE(p_hwfn,
1663                          "Ignoring the indication since a recovery process is already in progress\n");
1664                return;
1665        }
1666
1667        cdev->recov_in_prog = true;
1668
1669        proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
1670        DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
1671
1672        qed_schedule_recovery_handler(p_hwfn);
1673}
1674
1675static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1676                                        struct qed_ptt *p_ptt,
1677                                        enum MFW_DRV_MSG_TYPE type)
1678{
1679        enum qed_mcp_protocol_type stats_type;
1680        union qed_mcp_protocol_stats stats;
1681        struct qed_mcp_mb_params mb_params;
1682        u32 hsi_param;
1683
1684        switch (type) {
1685        case MFW_DRV_MSG_GET_LAN_STATS:
1686                stats_type = QED_MCP_LAN_STATS;
1687                hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1688                break;
1689        case MFW_DRV_MSG_GET_FCOE_STATS:
1690                stats_type = QED_MCP_FCOE_STATS;
1691                hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1692                break;
1693        case MFW_DRV_MSG_GET_ISCSI_STATS:
1694                stats_type = QED_MCP_ISCSI_STATS;
1695                hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1696                break;
1697        case MFW_DRV_MSG_GET_RDMA_STATS:
1698                stats_type = QED_MCP_RDMA_STATS;
1699                hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1700                break;
1701        default:
1702                DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1703                return;
1704        }
1705
1706        qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1707
1708        memset(&mb_params, 0, sizeof(mb_params));
1709        mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1710        mb_params.param = hsi_param;
1711        mb_params.p_data_src = &stats;
1712        mb_params.data_src_size = sizeof(stats);
1713        qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1714}
1715
1716static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1717{
1718        struct qed_mcp_function_info *p_info;
1719        struct public_func shmem_info;
1720        u32 resp = 0, param = 0;
1721
1722        qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1723
1724        qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1725
1726        p_info = &p_hwfn->mcp_info->func_info;
1727
1728        qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1729        qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1730
1731        /* Acknowledge the MFW */
1732        qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1733                    &param);
1734}
1735
1736static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1737{
1738        struct public_func shmem_info;
1739        u32 resp = 0, param = 0;
1740
1741        qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1742
1743        p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1744                                                 FUNC_MF_CFG_OV_STAG_MASK;
1745        p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1746        if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1747                if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1748                        qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1749                               p_hwfn->hw_info.ovlan);
1750                        qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1751
1752                        /* Configure DB to add external vlan to EDPM packets */
1753                        qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1754                        qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1755                               p_hwfn->hw_info.ovlan);
1756                } else {
1757                        qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1758                        qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1759                        qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1760                        qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1761                }
1762
1763                qed_sp_pf_update_stag(p_hwfn);
1764        }
1765
1766        DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1767                   p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1768
1769        /* Acknowledge the MFW */
1770        qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1771                    &resp, &param);
1772}
1773
1774static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
1775                                       struct qed_ptt *p_ptt)
1776{
1777        /* A single notification should be sent to upper driver in CMT mode */
1778        if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1779                return;
1780
1781        qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
1782                          "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1783}
1784
1785struct qed_mdump_cmd_params {
1786        u32 cmd;
1787        void *p_data_src;
1788        u8 data_src_size;
1789        void *p_data_dst;
1790        u8 data_dst_size;
1791        u32 mcp_resp;
1792};
1793
1794static int
1795qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
1796                  struct qed_ptt *p_ptt,
1797                  struct qed_mdump_cmd_params *p_mdump_cmd_params)
1798{
1799        struct qed_mcp_mb_params mb_params;
1800        int rc;
1801
1802        memset(&mb_params, 0, sizeof(mb_params));
1803        mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1804        mb_params.param = p_mdump_cmd_params->cmd;
1805        mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1806        mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1807        mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1808        mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1809        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1810        if (rc)
1811                return rc;
1812
1813        p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1814
1815        if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1816                DP_INFO(p_hwfn,
1817                        "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1818                        p_mdump_cmd_params->cmd);
1819                rc = -EOPNOTSUPP;
1820        } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1821                DP_INFO(p_hwfn,
1822                        "The mdump command is not supported by the MFW\n");
1823                rc = -EOPNOTSUPP;
1824        }
1825
1826        return rc;
1827}
1828
1829static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1830{
1831        struct qed_mdump_cmd_params mdump_cmd_params;
1832
1833        memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1834        mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1835
1836        return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1837}
1838
1839int
1840qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
1841                         struct qed_ptt *p_ptt,
1842                         struct mdump_retain_data_stc *p_mdump_retain)
1843{
1844        struct qed_mdump_cmd_params mdump_cmd_params;
1845        int rc;
1846
1847        memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1848        mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1849        mdump_cmd_params.p_data_dst = p_mdump_retain;
1850        mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
1851
1852        rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1853        if (rc)
1854                return rc;
1855
1856        if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1857                DP_INFO(p_hwfn,
1858                        "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1859                        mdump_cmd_params.mcp_resp);
1860                return -EINVAL;
1861        }
1862
1863        return 0;
1864}
1865
1866static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
1867                                          struct qed_ptt *p_ptt)
1868{
1869        struct mdump_retain_data_stc mdump_retain;
1870        int rc;
1871
1872        /* In CMT mode - no need for more than a single acknowledgment to the
1873         * MFW, and no more than a single notification to the upper driver.
1874         */
1875        if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1876                return;
1877
1878        rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1879        if (rc == 0 && mdump_retain.valid)
1880                DP_NOTICE(p_hwfn,
1881                          "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1882                          mdump_retain.epoch,
1883                          mdump_retain.pf, mdump_retain.status);
1884        else
1885                DP_NOTICE(p_hwfn,
1886                          "The MFW notified that a critical error occurred in the device\n");
1887
1888        DP_NOTICE(p_hwfn,
1889                  "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1890        qed_mcp_mdump_ack(p_hwfn, p_ptt);
1891
1892        qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
1893}
1894
1895void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1896{
1897        struct public_func shmem_info;
1898        u32 port_cfg, val;
1899
1900        if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1901                return;
1902
1903        memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1904        port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1905                          offsetof(struct public_port, oem_cfg_port));
1906        val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1907                OEM_CFG_CHANNEL_TYPE_OFFSET;
1908        if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1909                DP_NOTICE(p_hwfn,
1910                          "Incorrect UFP Channel type  %d port_id 0x%02x\n",
1911                          val, MFW_PORT(p_hwfn));
1912
1913        val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1914        if (val == OEM_CFG_SCHED_TYPE_ETS) {
1915                p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1916        } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1917                p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1918        } else {
1919                p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1920                DP_NOTICE(p_hwfn,
1921                          "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1922                          val, MFW_PORT(p_hwfn));
1923        }
1924
1925        qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1926        val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
1927                OEM_CFG_FUNC_TC_OFFSET;
1928        p_hwfn->ufp_info.tc = (u8)val;
1929        val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1930                OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1931        if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1932                p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1933        } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1934                p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1935        } else {
1936                p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1937                DP_NOTICE(p_hwfn,
1938                          "Unknown Host priority control %d port_id 0x%02x\n",
1939                          val, MFW_PORT(p_hwfn));
1940        }
1941
1942        DP_NOTICE(p_hwfn,
1943                  "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1944                  p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1945                  p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
1946}
1947
1948static int
1949qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1950{
1951        qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1952
1953        if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1954                p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1955                qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
1956                                           p_hwfn->ufp_info.tc);
1957
1958                qed_qm_reconf(p_hwfn, p_ptt);
1959        } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
1960                /* Merge UFP TC with the dcbx TC data */
1961                qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1962                                          QED_DCBX_OPERATIONAL_MIB);
1963        } else {
1964                DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
1965                return -EINVAL;
1966        }
1967
1968        /* update storm FW with negotiation results */
1969        qed_sp_pf_update_ufp(p_hwfn);
1970
1971        /* update stag pcp value */
1972        qed_sp_pf_update_stag(p_hwfn);
1973
1974        return 0;
1975}
1976
1977int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1978                          struct qed_ptt *p_ptt)
1979{
1980        struct qed_mcp_info *info = p_hwfn->mcp_info;
1981        int rc = 0;
1982        bool found = false;
1983        u16 i;
1984
1985        DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1986
1987        /* Read Messages from MFW */
1988        qed_mcp_read_mb(p_hwfn, p_ptt);
1989
1990        /* Compare current messages to old ones */
1991        for (i = 0; i < info->mfw_mb_length; i++) {
1992                if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1993                        continue;
1994
1995                found = true;
1996
1997                DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1998                           "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1999                           i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2000
2001                switch (i) {
2002                case MFW_DRV_MSG_LINK_CHANGE:
2003                        qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
2004                        break;
2005                case MFW_DRV_MSG_VF_DISABLED:
2006                        qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
2007                        break;
2008                case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2009                        qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2010                                                  QED_DCBX_REMOTE_LLDP_MIB);
2011                        break;
2012                case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2013                        qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2014                                                  QED_DCBX_REMOTE_MIB);
2015                        break;
2016                case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2017                        qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2018                                                  QED_DCBX_OPERATIONAL_MIB);
2019                        break;
2020                case MFW_DRV_MSG_OEM_CFG_UPDATE:
2021                        qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
2022                        break;
2023                case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2024                        qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2025                        break;
2026                case MFW_DRV_MSG_ERROR_RECOVERY:
2027                        qed_mcp_handle_process_kill(p_hwfn, p_ptt);
2028                        break;
2029                case MFW_DRV_MSG_GET_LAN_STATS:
2030                case MFW_DRV_MSG_GET_FCOE_STATS:
2031                case MFW_DRV_MSG_GET_ISCSI_STATS:
2032                case MFW_DRV_MSG_GET_RDMA_STATS:
2033                        qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2034                        break;
2035                case MFW_DRV_MSG_BW_UPDATE:
2036                        qed_mcp_update_bw(p_hwfn, p_ptt);
2037                        break;
2038                case MFW_DRV_MSG_S_TAG_UPDATE:
2039                        qed_mcp_update_stag(p_hwfn, p_ptt);
2040                        break;
2041                case MFW_DRV_MSG_FAILURE_DETECTED:
2042                        qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
2043                        break;
2044                case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2045                        qed_mcp_handle_critical_error(p_hwfn, p_ptt);
2046                        break;
2047                case MFW_DRV_MSG_GET_TLV_REQ:
2048                        qed_mfw_tlv_req(p_hwfn);
2049                        break;
2050                default:
2051                        DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2052                        rc = -EINVAL;
2053                }
2054        }
2055
2056        /* ACK everything */
2057        for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2058                __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
2059
2060                /* MFW expect answer in BE, so we force write in that format */
2061                qed_wr(p_hwfn, p_ptt,
2062                       info->mfw_mb_addr + sizeof(u32) +
2063                       MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2064                       sizeof(u32) + i * sizeof(u32),
2065                       (__force u32)val);
2066        }
2067
2068        if (!found) {
2069                DP_NOTICE(p_hwfn,
2070                          "Received an MFW message indication but no new message!\n");
2071                rc = -EINVAL;
2072        }
2073
2074        /* Copy the new mfw messages into the shadow */
2075        memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2076
2077        return rc;
2078}
2079
2080int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
2081                        struct qed_ptt *p_ptt,
2082                        u32 *p_mfw_ver, u32 *p_running_bundle_id)
2083{
2084        u32 global_offsize;
2085
2086        if (IS_VF(p_hwfn->cdev)) {
2087                if (p_hwfn->vf_iov_info) {
2088                        struct pfvf_acquire_resp_tlv *p_resp;
2089
2090                        p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2091                        *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2092                        return 0;
2093                } else {
2094                        DP_VERBOSE(p_hwfn,
2095                                   QED_MSG_IOV,
2096                                   "VF requested MFW version prior to ACQUIRE\n");
2097                        return -EINVAL;
2098                }
2099        }
2100
2101        global_offsize = qed_rd(p_hwfn, p_ptt,
2102                                SECTION_OFFSIZE_ADDR(p_hwfn->
2103                                                     mcp_info->public_base,
2104                                                     PUBLIC_GLOBAL));
2105        *p_mfw_ver =
2106            qed_rd(p_hwfn, p_ptt,
2107                   SECTION_ADDR(global_offsize,
2108                                0) + offsetof(struct public_global, mfw_ver));
2109
2110        if (p_running_bundle_id != NULL) {
2111                *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
2112                                              SECTION_ADDR(global_offsize, 0) +
2113                                              offsetof(struct public_global,
2114                                                       running_bundle_id));
2115        }
2116
2117        return 0;
2118}
2119
2120int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
2121                        struct qed_ptt *p_ptt, u32 *p_mbi_ver)
2122{
2123        u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2124
2125        if (IS_VF(p_hwfn->cdev))
2126                return -EINVAL;
2127
2128        /* Read the address of the nvm_cfg */
2129        nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2130        if (!nvm_cfg_addr) {
2131                DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
2132                return -EINVAL;
2133        }
2134
2135        /* Read the offset of nvm_cfg1 */
2136        nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2137
2138        mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2139                       offsetof(struct nvm_cfg1, glob) +
2140                       offsetof(struct nvm_cfg1_glob, mbi_version);
2141        *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
2142                            mbi_ver_addr) &
2143                     (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2144                      NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2145                      NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2146
2147        return 0;
2148}
2149
2150int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
2151                           struct qed_ptt *p_ptt, u32 *p_media_type)
2152{
2153        *p_media_type = MEDIA_UNSPECIFIED;
2154
2155        if (IS_VF(p_hwfn->cdev))
2156                return -EINVAL;
2157
2158        if (!qed_mcp_is_init(p_hwfn)) {
2159                DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2160                return -EBUSY;
2161        }
2162
2163        if (!p_ptt) {
2164                *p_media_type = MEDIA_UNSPECIFIED;
2165                return -EINVAL;
2166        }
2167
2168        *p_media_type = qed_rd(p_hwfn, p_ptt,
2169                               p_hwfn->mcp_info->port_addr +
2170                               offsetof(struct public_port,
2171                                        media_type));
2172
2173        return 0;
2174}
2175
2176int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
2177                                 struct qed_ptt *p_ptt,
2178                                 u32 *p_transceiver_state,
2179                                 u32 *p_transceiver_type)
2180{
2181        u32 transceiver_info;
2182
2183        *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2184        *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2185
2186        if (IS_VF(p_hwfn->cdev))
2187                return -EINVAL;
2188
2189        if (!qed_mcp_is_init(p_hwfn)) {
2190                DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2191                return -EBUSY;
2192        }
2193
2194        transceiver_info = qed_rd(p_hwfn, p_ptt,
2195                                  p_hwfn->mcp_info->port_addr +
2196                                  offsetof(struct public_port,
2197                                           transceiver_data));
2198
2199        *p_transceiver_state = (transceiver_info &
2200                                ETH_TRANSCEIVER_STATE_MASK) >>
2201                                ETH_TRANSCEIVER_STATE_OFFSET;
2202
2203        if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
2204                *p_transceiver_type = (transceiver_info &
2205                                       ETH_TRANSCEIVER_TYPE_MASK) >>
2206                                       ETH_TRANSCEIVER_TYPE_OFFSET;
2207        else
2208                *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2209
2210        return 0;
2211}
2212static bool qed_is_transceiver_ready(u32 transceiver_state,
2213                                     u32 transceiver_type)
2214{
2215        if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2216            ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2217            (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2218                return true;
2219
2220        return false;
2221}
2222
2223int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
2224                             struct qed_ptt *p_ptt, u32 *p_speed_mask)
2225{
2226        u32 transceiver_type, transceiver_state;
2227        int ret;
2228
2229        ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2230                                           &transceiver_type);
2231        if (ret)
2232                return ret;
2233
2234        if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
2235                                     false)
2236                return -EINVAL;
2237
2238        switch (transceiver_type) {
2239        case ETH_TRANSCEIVER_TYPE_1G_LX:
2240        case ETH_TRANSCEIVER_TYPE_1G_SX:
2241        case ETH_TRANSCEIVER_TYPE_1G_PCC:
2242        case ETH_TRANSCEIVER_TYPE_1G_ACC:
2243        case ETH_TRANSCEIVER_TYPE_1000BASET:
2244                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2245                break;
2246        case ETH_TRANSCEIVER_TYPE_10G_SR:
2247        case ETH_TRANSCEIVER_TYPE_10G_LR:
2248        case ETH_TRANSCEIVER_TYPE_10G_LRM:
2249        case ETH_TRANSCEIVER_TYPE_10G_ER:
2250        case ETH_TRANSCEIVER_TYPE_10G_PCC:
2251        case ETH_TRANSCEIVER_TYPE_10G_ACC:
2252        case ETH_TRANSCEIVER_TYPE_4x10G:
2253                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2254                break;
2255        case ETH_TRANSCEIVER_TYPE_40G_LR4:
2256        case ETH_TRANSCEIVER_TYPE_40G_SR4:
2257        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2258        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2259                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2260                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2261                break;
2262        case ETH_TRANSCEIVER_TYPE_100G_AOC:
2263        case ETH_TRANSCEIVER_TYPE_100G_SR4:
2264        case ETH_TRANSCEIVER_TYPE_100G_LR4:
2265        case ETH_TRANSCEIVER_TYPE_100G_ER4:
2266        case ETH_TRANSCEIVER_TYPE_100G_ACC:
2267                *p_speed_mask =
2268                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2269                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2270                break;
2271        case ETH_TRANSCEIVER_TYPE_25G_SR:
2272        case ETH_TRANSCEIVER_TYPE_25G_LR:
2273        case ETH_TRANSCEIVER_TYPE_25G_AOC:
2274        case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2275        case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2276        case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2277                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2278                break;
2279        case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2280        case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2281        case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2282        case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2283                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2284                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2285                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2286                break;
2287        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
2288        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
2289                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2290                                NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2291                break;
2292        case ETH_TRANSCEIVER_TYPE_40G_CR4:
2293        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2294                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2295                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2296                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2297                break;
2298        case ETH_TRANSCEIVER_TYPE_100G_CR4:
2299        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2300                *p_speed_mask =
2301                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2302                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2303                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2304                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2305                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2306                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2307                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2308                break;
2309        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2310        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2311        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2312                *p_speed_mask =
2313                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2314                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2315                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2316                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2317                break;
2318        case ETH_TRANSCEIVER_TYPE_XLPPI:
2319                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2320                break;
2321        case ETH_TRANSCEIVER_TYPE_10G_BASET:
2322        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
2323        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
2324                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2325                                NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2326                break;
2327        default:
2328                DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
2329                        transceiver_type);
2330                *p_speed_mask = 0xff;
2331                break;
2332        }
2333
2334        return 0;
2335}
2336
2337int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
2338                             struct qed_ptt *p_ptt, u32 *p_board_config)
2339{
2340        u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2341
2342        if (IS_VF(p_hwfn->cdev))
2343                return -EINVAL;
2344
2345        if (!qed_mcp_is_init(p_hwfn)) {
2346                DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2347                return -EBUSY;
2348        }
2349        if (!p_ptt) {
2350                *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2351                return -EINVAL;
2352        }
2353
2354        nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2355        nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2356        port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2357                        offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2358        *p_board_config = qed_rd(p_hwfn, p_ptt,
2359                                 port_cfg_addr +
2360                                 offsetof(struct nvm_cfg1_port,
2361                                          board_cfg));
2362
2363        return 0;
2364}
2365
2366/* Old MFW has a global configuration for all PFs regarding RDMA support */
2367static void
2368qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
2369                               enum qed_pci_personality *p_proto)
2370{
2371        /* There wasn't ever a legacy MFW that published iwarp.
2372         * So at this point, this is either plain l2 or RoCE.
2373         */
2374        if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
2375                *p_proto = QED_PCI_ETH_ROCE;
2376        else
2377                *p_proto = QED_PCI_ETH;
2378
2379        DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2380                   "According to Legacy capabilities, L2 personality is %08x\n",
2381                   (u32) *p_proto);
2382}
2383
2384static int
2385qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
2386                            struct qed_ptt *p_ptt,
2387                            enum qed_pci_personality *p_proto)
2388{
2389        u32 resp = 0, param = 0;
2390        int rc;
2391
2392        rc = qed_mcp_cmd(p_hwfn, p_ptt,
2393                         DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
2394        if (rc)
2395                return rc;
2396        if (resp != FW_MSG_CODE_OK) {
2397                DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2398                           "MFW lacks support for command; Returns %08x\n",
2399                           resp);
2400                return -EINVAL;
2401        }
2402
2403        switch (param) {
2404        case FW_MB_PARAM_GET_PF_RDMA_NONE:
2405                *p_proto = QED_PCI_ETH;
2406                break;
2407        case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2408                *p_proto = QED_PCI_ETH_ROCE;
2409                break;
2410        case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2411                *p_proto = QED_PCI_ETH_IWARP;
2412                break;
2413        case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2414                *p_proto = QED_PCI_ETH_RDMA;
2415                break;
2416        default:
2417                DP_NOTICE(p_hwfn,
2418                          "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2419                          param);
2420                return -EINVAL;
2421        }
2422
2423        DP_VERBOSE(p_hwfn,
2424                   NETIF_MSG_IFUP,
2425                   "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2426                   (u32) *p_proto, resp, param);
2427        return 0;
2428}
2429
2430static int
2431qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
2432                        struct public_func *p_info,
2433                        struct qed_ptt *p_ptt,
2434                        enum qed_pci_personality *p_proto)
2435{
2436        int rc = 0;
2437
2438        switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2439        case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2440                if (!IS_ENABLED(CONFIG_QED_RDMA))
2441                        *p_proto = QED_PCI_ETH;
2442                else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
2443                        qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2444                break;
2445        case FUNC_MF_CFG_PROTOCOL_ISCSI:
2446                *p_proto = QED_PCI_ISCSI;
2447                break;
2448        case FUNC_MF_CFG_PROTOCOL_NVMETCP:
2449                *p_proto = QED_PCI_NVMETCP;
2450                break;
2451        case FUNC_MF_CFG_PROTOCOL_FCOE:
2452                *p_proto = QED_PCI_FCOE;
2453                break;
2454        case FUNC_MF_CFG_PROTOCOL_ROCE:
2455                DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
2456                fallthrough;
2457        default:
2458                rc = -EINVAL;
2459        }
2460
2461        return rc;
2462}
2463
2464int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
2465                                 struct qed_ptt *p_ptt)
2466{
2467        struct qed_mcp_function_info *info;
2468        struct public_func shmem_info;
2469
2470        qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2471        info = &p_hwfn->mcp_info->func_info;
2472
2473        info->pause_on_host = (shmem_info.config &
2474                               FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2475
2476        if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2477                                    &info->protocol)) {
2478                DP_ERR(p_hwfn, "Unknown personality %08x\n",
2479                       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2480                return -EINVAL;
2481        }
2482
2483        qed_read_pf_bandwidth(p_hwfn, &shmem_info);
2484
2485        if (shmem_info.mac_upper || shmem_info.mac_lower) {
2486                info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2487                info->mac[1] = (u8)(shmem_info.mac_upper);
2488                info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2489                info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2490                info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2491                info->mac[5] = (u8)(shmem_info.mac_lower);
2492
2493                /* Store primary MAC for later possible WoL */
2494                memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
2495        } else {
2496                DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
2497        }
2498
2499        info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2500                         (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2501        info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2502                         (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2503
2504        info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2505
2506        info->mtu = (u16)shmem_info.mtu_size;
2507
2508        p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
2509        p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
2510        if (qed_mcp_is_init(p_hwfn)) {
2511                u32 resp = 0, param = 0;
2512                int rc;
2513
2514                rc = qed_mcp_cmd(p_hwfn, p_ptt,
2515                                 DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
2516                if (rc)
2517                        return rc;
2518                if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2519                        p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
2520        }
2521
2522        DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
2523                   "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n",
2524                info->pause_on_host, info->protocol,
2525                info->bandwidth_min, info->bandwidth_max,
2526                info->mac,
2527                info->wwn_port, info->wwn_node,
2528                info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
2529
2530        return 0;
2531}
2532
2533struct qed_mcp_link_params
2534*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
2535{
2536        if (!p_hwfn || !p_hwfn->mcp_info)
2537                return NULL;
2538        return &p_hwfn->mcp_info->link_input;
2539}
2540
2541struct qed_mcp_link_state
2542*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
2543{
2544        if (!p_hwfn || !p_hwfn->mcp_info)
2545                return NULL;
2546        return &p_hwfn->mcp_info->link_output;
2547}
2548
2549struct qed_mcp_link_capabilities
2550*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
2551{
2552        if (!p_hwfn || !p_hwfn->mcp_info)
2553                return NULL;
2554        return &p_hwfn->mcp_info->link_capabilities;
2555}
2556
2557int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2558{
2559        u32 resp = 0, param = 0;
2560        int rc;
2561
2562        rc = qed_mcp_cmd(p_hwfn, p_ptt,
2563                         DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
2564
2565        /* Wait for the drain to complete before returning */
2566        msleep(1020);
2567
2568        return rc;
2569}
2570
2571int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
2572                           struct qed_ptt *p_ptt, u32 *p_flash_size)
2573{
2574        u32 flash_size;
2575
2576        if (IS_VF(p_hwfn->cdev))
2577                return -EINVAL;
2578
2579        flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2580        flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2581                      MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2582        flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2583
2584        *p_flash_size = flash_size;
2585
2586        return 0;
2587}
2588
2589int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2590{
2591        struct qed_dev *cdev = p_hwfn->cdev;
2592
2593        if (cdev->recov_in_prog) {
2594                DP_NOTICE(p_hwfn,
2595                          "Avoid triggering a recovery since such a process is already in progress\n");
2596                return -EAGAIN;
2597        }
2598
2599        DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
2600        qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2601
2602        return 0;
2603}
2604
2605#define QED_RECOVERY_PROLOG_SLEEP_MS    100
2606
2607int qed_recovery_prolog(struct qed_dev *cdev)
2608{
2609        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2610        struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
2611        int rc;
2612
2613        /* Allow ongoing PCIe transactions to complete */
2614        msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
2615
2616        /* Clear the PF's internal FID_enable in the PXP */
2617        rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2618        if (rc)
2619                DP_NOTICE(p_hwfn,
2620                          "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2621                          rc);
2622
2623        return rc;
2624}
2625
2626static int
2627qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
2628                          struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2629{
2630        u32 resp = 0, param = 0, rc_param = 0;
2631        int rc;
2632
2633        /* Only Leader can configure MSIX, and need to take CMT into account */
2634        if (!IS_LEAD_HWFN(p_hwfn))
2635                return 0;
2636        num *= p_hwfn->cdev->num_hwfns;
2637
2638        param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2639                 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2640        param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2641                 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2642
2643        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2644                         &resp, &rc_param);
2645
2646        if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2647                DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2648                rc = -EINVAL;
2649        } else {
2650                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2651                           "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2652                           num, vf_id);
2653        }
2654
2655        return rc;
2656}
2657
2658static int
2659qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2660                          struct qed_ptt *p_ptt, u8 num)
2661{
2662        u32 resp = 0, param = num, rc_param = 0;
2663        int rc;
2664
2665        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2666                         param, &resp, &rc_param);
2667
2668        if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2669                DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2670                rc = -EINVAL;
2671        } else {
2672                DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2673                           "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2674        }
2675
2676        return rc;
2677}
2678
2679int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2680                           struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2681{
2682        if (QED_IS_BB(p_hwfn->cdev))
2683                return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2684        else
2685                return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2686}
2687
2688int
2689qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2690                         struct qed_ptt *p_ptt,
2691                         struct qed_mcp_drv_version *p_ver)
2692{
2693        struct qed_mcp_mb_params mb_params;
2694        struct drv_version_stc drv_version;
2695        __be32 val;
2696        u32 i;
2697        int rc;
2698
2699        memset(&drv_version, 0, sizeof(drv_version));
2700        drv_version.version = p_ver->version;
2701        for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2702                val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2703                *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2704        }
2705
2706        memset(&mb_params, 0, sizeof(mb_params));
2707        mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2708        mb_params.p_data_src = &drv_version;
2709        mb_params.data_src_size = sizeof(drv_version);
2710        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2711        if (rc)
2712                DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2713
2714        return rc;
2715}
2716
2717/* A maximal 100 msec waiting time for the MCP to halt */
2718#define QED_MCP_HALT_SLEEP_MS           10
2719#define QED_MCP_HALT_MAX_RETRIES        10
2720
2721int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2722{
2723        u32 resp = 0, param = 0, cpu_state, cnt = 0;
2724        int rc;
2725
2726        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2727                         &param);
2728        if (rc) {
2729                DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2730                return rc;
2731        }
2732
2733        do {
2734                msleep(QED_MCP_HALT_SLEEP_MS);
2735                cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2736                if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2737                        break;
2738        } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2739
2740        if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2741                DP_NOTICE(p_hwfn,
2742                          "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2743                          qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2744                return -EBUSY;
2745        }
2746
2747        qed_mcp_cmd_set_blocking(p_hwfn, true);
2748
2749        return 0;
2750}
2751
2752#define QED_MCP_RESUME_SLEEP_MS 10
2753
2754int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2755{
2756        u32 cpu_mode, cpu_state;
2757
2758        qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2759
2760        cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2761        cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2762        qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2763        msleep(QED_MCP_RESUME_SLEEP_MS);
2764        cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2765
2766        if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2767                DP_NOTICE(p_hwfn,
2768                          "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2769                          cpu_mode, cpu_state);
2770                return -EBUSY;
2771        }
2772
2773        qed_mcp_cmd_set_blocking(p_hwfn, false);
2774
2775        return 0;
2776}
2777
2778int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2779                                     struct qed_ptt *p_ptt,
2780                                     enum qed_ov_client client)
2781{
2782        u32 resp = 0, param = 0;
2783        u32 drv_mb_param;
2784        int rc;
2785
2786        switch (client) {
2787        case QED_OV_CLIENT_DRV:
2788                drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2789                break;
2790        case QED_OV_CLIENT_USER:
2791                drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2792                break;
2793        case QED_OV_CLIENT_VENDOR_SPEC:
2794                drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2795                break;
2796        default:
2797                DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2798                return -EINVAL;
2799        }
2800
2801        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2802                         drv_mb_param, &resp, &param);
2803        if (rc)
2804                DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2805
2806        return rc;
2807}
2808
2809int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2810                                   struct qed_ptt *p_ptt,
2811                                   enum qed_ov_driver_state drv_state)
2812{
2813        u32 resp = 0, param = 0;
2814        u32 drv_mb_param;
2815        int rc;
2816
2817        switch (drv_state) {
2818        case QED_OV_DRIVER_STATE_NOT_LOADED:
2819                drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2820                break;
2821        case QED_OV_DRIVER_STATE_DISABLED:
2822                drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2823                break;
2824        case QED_OV_DRIVER_STATE_ACTIVE:
2825                drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2826                break;
2827        default:
2828                DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2829                return -EINVAL;
2830        }
2831
2832        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2833                         drv_mb_param, &resp, &param);
2834        if (rc)
2835                DP_ERR(p_hwfn, "Failed to send driver state\n");
2836
2837        return rc;
2838}
2839
2840int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2841                          struct qed_ptt *p_ptt, u16 mtu)
2842{
2843        u32 resp = 0, param = 0;
2844        u32 drv_mb_param;
2845        int rc;
2846
2847        drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2848        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2849                         drv_mb_param, &resp, &param);
2850        if (rc)
2851                DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2852
2853        return rc;
2854}
2855
2856int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2857                          struct qed_ptt *p_ptt, u8 *mac)
2858{
2859        struct qed_mcp_mb_params mb_params;
2860        u32 mfw_mac[2];
2861        int rc;
2862
2863        memset(&mb_params, 0, sizeof(mb_params));
2864        mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2865        mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2866                          DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2867        mb_params.param |= MCP_PF_ID(p_hwfn);
2868
2869        /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2870         * in 32-bit granularity.
2871         * So the MAC has to be set in native order [and not byte order],
2872         * otherwise it would be read incorrectly by MFW after swap.
2873         */
2874        mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2875        mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2876
2877        mb_params.p_data_src = (u8 *)mfw_mac;
2878        mb_params.data_src_size = 8;
2879        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2880        if (rc)
2881                DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2882
2883        /* Store primary MAC for later possible WoL */
2884        memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2885
2886        return rc;
2887}
2888
2889int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2890                          struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2891{
2892        u32 resp = 0, param = 0;
2893        u32 drv_mb_param;
2894        int rc;
2895
2896        if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2897                DP_VERBOSE(p_hwfn, QED_MSG_SP,
2898                           "Can't change WoL configuration when WoL isn't supported\n");
2899                return -EINVAL;
2900        }
2901
2902        switch (wol) {
2903        case QED_OV_WOL_DEFAULT:
2904                drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2905                break;
2906        case QED_OV_WOL_DISABLED:
2907                drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2908                break;
2909        case QED_OV_WOL_ENABLED:
2910                drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2911                break;
2912        default:
2913                DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2914                return -EINVAL;
2915        }
2916
2917        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2918                         drv_mb_param, &resp, &param);
2919        if (rc)
2920                DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2921
2922        /* Store the WoL update for a future unload */
2923        p_hwfn->cdev->wol_config = (u8)wol;
2924
2925        return rc;
2926}
2927
2928int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2929                              struct qed_ptt *p_ptt,
2930                              enum qed_ov_eswitch eswitch)
2931{
2932        u32 resp = 0, param = 0;
2933        u32 drv_mb_param;
2934        int rc;
2935
2936        switch (eswitch) {
2937        case QED_OV_ESWITCH_NONE:
2938                drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2939                break;
2940        case QED_OV_ESWITCH_VEB:
2941                drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2942                break;
2943        case QED_OV_ESWITCH_VEPA:
2944                drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2945                break;
2946        default:
2947                DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2948                return -EINVAL;
2949        }
2950
2951        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2952                         drv_mb_param, &resp, &param);
2953        if (rc)
2954                DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2955
2956        return rc;
2957}
2958
2959int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2960                    struct qed_ptt *p_ptt, enum qed_led_mode mode)
2961{
2962        u32 resp = 0, param = 0, drv_mb_param;
2963        int rc;
2964
2965        switch (mode) {
2966        case QED_LED_MODE_ON:
2967                drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2968                break;
2969        case QED_LED_MODE_OFF:
2970                drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2971                break;
2972        case QED_LED_MODE_RESTORE:
2973                drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2974                break;
2975        default:
2976                DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2977                return -EINVAL;
2978        }
2979
2980        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2981                         drv_mb_param, &resp, &param);
2982
2983        return rc;
2984}
2985
2986int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2987                          struct qed_ptt *p_ptt, u32 mask_parities)
2988{
2989        u32 resp = 0, param = 0;
2990        int rc;
2991
2992        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2993                         mask_parities, &resp, &param);
2994
2995        if (rc) {
2996                DP_ERR(p_hwfn,
2997                       "MCP response failure for mask parities, aborting\n");
2998        } else if (resp != FW_MSG_CODE_OK) {
2999                DP_ERR(p_hwfn,
3000                       "MCP did not acknowledge mask parity request. Old MFW?\n");
3001                rc = -EINVAL;
3002        }
3003
3004        return rc;
3005}
3006
3007int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
3008{
3009        u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
3010        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3011        u32 resp = 0, resp_param = 0;
3012        struct qed_ptt *p_ptt;
3013        int rc = 0;
3014
3015        p_ptt = qed_ptt_acquire(p_hwfn);
3016        if (!p_ptt)
3017                return -EBUSY;
3018
3019        while (bytes_left > 0) {
3020                bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
3021
3022                rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3023                                        DRV_MSG_CODE_NVM_READ_NVRAM,
3024                                        addr + offset +
3025                                        (bytes_to_copy <<
3026                                         DRV_MB_PARAM_NVM_LEN_OFFSET),
3027                                        &resp, &resp_param,
3028                                        &read_len,
3029                                        (u32 *)(p_buf + offset));
3030
3031                if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
3032                        DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
3033                        break;
3034                }
3035
3036                /* This can be a lengthy process, and it's possible scheduler
3037                 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3038                 */
3039                if (bytes_left % 0x1000 <
3040                    (bytes_left - read_len) % 0x1000)
3041                        usleep_range(1000, 2000);
3042
3043                offset += read_len;
3044                bytes_left -= read_len;
3045        }
3046
3047        cdev->mcp_nvm_resp = resp;
3048        qed_ptt_release(p_hwfn, p_ptt);
3049
3050        return rc;
3051}
3052
3053int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
3054{
3055        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3056        struct qed_ptt *p_ptt;
3057
3058        p_ptt = qed_ptt_acquire(p_hwfn);
3059        if (!p_ptt)
3060                return -EBUSY;
3061
3062        memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
3063        qed_ptt_release(p_hwfn, p_ptt);
3064
3065        return 0;
3066}
3067
3068int qed_mcp_nvm_write(struct qed_dev *cdev,
3069                      u32 cmd, u32 addr, u8 *p_buf, u32 len)
3070{
3071        u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
3072        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3073        struct qed_ptt *p_ptt;
3074        int rc = -EINVAL;
3075
3076        p_ptt = qed_ptt_acquire(p_hwfn);
3077        if (!p_ptt)
3078                return -EBUSY;
3079
3080        switch (cmd) {
3081        case QED_PUT_FILE_BEGIN:
3082                nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
3083                break;
3084        case QED_PUT_FILE_DATA:
3085                nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3086                break;
3087        case QED_NVM_WRITE_NVRAM:
3088                nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3089                break;
3090        default:
3091                DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
3092                rc = -EINVAL;
3093                goto out;
3094        }
3095
3096        buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
3097        while (buf_idx < len) {
3098                if (cmd == QED_PUT_FILE_BEGIN)
3099                        nvm_offset = addr;
3100                else
3101                        nvm_offset = ((buf_size <<
3102                                       DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
3103                                       buf_idx;
3104                rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3105                                        &resp, &param, buf_size,
3106                                        (u32 *)&p_buf[buf_idx]);
3107                if (rc) {
3108                        DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
3109                        resp = FW_MSG_CODE_ERROR;
3110                        break;
3111                }
3112
3113                if (resp != FW_MSG_CODE_OK &&
3114                    resp != FW_MSG_CODE_NVM_OK &&
3115                    resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3116                        DP_NOTICE(cdev,
3117                                  "nvm write failed, resp = 0x%08x\n", resp);
3118                        rc = -EINVAL;
3119                        break;
3120                }
3121
3122                /* This can be a lengthy process, and it's possible scheduler
3123                 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
3124                 */
3125                if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
3126                        usleep_range(1000, 2000);
3127
3128                /* For MBI upgrade, MFW response includes the next buffer offset
3129                 * to be delivered to MFW.
3130                 */
3131                if (param && cmd == QED_PUT_FILE_DATA) {
3132                        buf_idx = QED_MFW_GET_FIELD(param,
3133                                        FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
3134                        buf_size = QED_MFW_GET_FIELD(param,
3135                                         FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
3136                } else {
3137                        buf_idx += buf_size;
3138                        buf_size = min_t(u32, (len - buf_idx),
3139                                         MCP_DRV_NVM_BUF_LEN);
3140                }
3141        }
3142
3143        cdev->mcp_nvm_resp = resp;
3144out:
3145        qed_ptt_release(p_hwfn, p_ptt);
3146
3147        return rc;
3148}
3149
3150int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3151                         u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
3152{
3153        u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
3154        u32 resp, param;
3155        int rc;
3156
3157        nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
3158                       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
3159        nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
3160                       DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
3161
3162        addr = offset;
3163        offset = 0;
3164        bytes_left = len;
3165        while (bytes_left > 0) {
3166                bytes_to_copy = min_t(u32, bytes_left,
3167                                      MAX_I2C_TRANSACTION_SIZE);
3168                nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3169                               DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3170                nvm_offset |= ((addr + offset) <<
3171                               DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
3172                               DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
3173                nvm_offset |= (bytes_to_copy <<
3174                               DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
3175                               DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
3176                rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3177                                        DRV_MSG_CODE_TRANSCEIVER_READ,
3178                                        nvm_offset, &resp, &param, &buf_size,
3179                                        (u32 *)(p_buf + offset));
3180                if (rc) {
3181                        DP_NOTICE(p_hwfn,
3182                                  "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3183                                  rc);
3184                        return rc;
3185                }
3186
3187                if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3188                        return -ENODEV;
3189                else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3190                        return -EINVAL;
3191
3192                offset += buf_size;
3193                bytes_left -= buf_size;
3194        }
3195
3196        return 0;
3197}
3198
3199int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3200{
3201        u32 drv_mb_param = 0, rsp, param;
3202        int rc = 0;
3203
3204        drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3205                        DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3206
3207        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3208                         drv_mb_param, &rsp, &param);
3209
3210        if (rc)
3211                return rc;
3212
3213        if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3214            (param != DRV_MB_PARAM_BIST_RC_PASSED))
3215                rc = -EAGAIN;
3216
3217        return rc;
3218}
3219
3220int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3221{
3222        u32 drv_mb_param, rsp, param;
3223        int rc = 0;
3224
3225        drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3226                        DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3227
3228        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3229                         drv_mb_param, &rsp, &param);
3230
3231        if (rc)
3232                return rc;
3233
3234        if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3235            (param != DRV_MB_PARAM_BIST_RC_PASSED))
3236                rc = -EAGAIN;
3237
3238        return rc;
3239}
3240
3241int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
3242                                    struct qed_ptt *p_ptt,
3243                                    u32 *num_images)
3244{
3245        u32 drv_mb_param = 0, rsp;
3246        int rc = 0;
3247
3248        drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3249                        DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3250
3251        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3252                         drv_mb_param, &rsp, num_images);
3253        if (rc)
3254                return rc;
3255
3256        if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3257                rc = -EINVAL;
3258
3259        return rc;
3260}
3261
3262int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
3263                                   struct qed_ptt *p_ptt,
3264                                   struct bist_nvm_image_att *p_image_att,
3265                                   u32 image_index)
3266{
3267        u32 buf_size = 0, param, resp = 0, resp_param = 0;
3268        int rc;
3269
3270        param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3271                DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
3272        param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
3273
3274        rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3275                                DRV_MSG_CODE_BIST_TEST, param,
3276                                &resp, &resp_param,
3277                                &buf_size,
3278                                (u32 *)p_image_att);
3279        if (rc)
3280                return rc;
3281
3282        if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3283            (p_image_att->return_code != 1))
3284                rc = -EINVAL;
3285
3286        return rc;
3287}
3288
3289int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
3290{
3291        struct qed_nvm_image_info nvm_info;
3292        struct qed_ptt *p_ptt;
3293        int rc;
3294        u32 i;
3295
3296        if (p_hwfn->nvm_info.valid)
3297                return 0;
3298
3299        p_ptt = qed_ptt_acquire(p_hwfn);
3300        if (!p_ptt) {
3301                DP_ERR(p_hwfn, "failed to acquire ptt\n");
3302                return -EBUSY;
3303        }
3304
3305        /* Acquire from MFW the amount of available images */
3306        nvm_info.num_images = 0;
3307        rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
3308                                             p_ptt, &nvm_info.num_images);
3309        if (rc == -EOPNOTSUPP) {
3310                DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3311                goto out;
3312        } else if (rc || !nvm_info.num_images) {
3313                DP_ERR(p_hwfn, "Failed getting number of images\n");
3314                goto err0;
3315        }
3316
3317        nvm_info.image_att = kmalloc_array(nvm_info.num_images,
3318                                           sizeof(struct bist_nvm_image_att),
3319                                           GFP_KERNEL);
3320        if (!nvm_info.image_att) {
3321                rc = -ENOMEM;
3322                goto err0;
3323        }
3324
3325        /* Iterate over images and get their attributes */
3326        for (i = 0; i < nvm_info.num_images; i++) {
3327                rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
3328                                                    &nvm_info.image_att[i], i);
3329                if (rc) {
3330                        DP_ERR(p_hwfn,
3331                               "Failed getting image index %d attributes\n", i);
3332                        goto err1;
3333                }
3334
3335                DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
3336                           nvm_info.image_att[i].len);
3337        }
3338out:
3339        /* Update hwfn's nvm_info */
3340        if (nvm_info.num_images) {
3341                p_hwfn->nvm_info.num_images = nvm_info.num_images;
3342                kfree(p_hwfn->nvm_info.image_att);
3343                p_hwfn->nvm_info.image_att = nvm_info.image_att;
3344                p_hwfn->nvm_info.valid = true;
3345        }
3346
3347        qed_ptt_release(p_hwfn, p_ptt);
3348        return 0;
3349
3350err1:
3351        kfree(nvm_info.image_att);
3352err0:
3353        qed_ptt_release(p_hwfn, p_ptt);
3354        return rc;
3355}
3356
3357void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
3358{
3359        kfree(p_hwfn->nvm_info.image_att);
3360        p_hwfn->nvm_info.image_att = NULL;
3361        p_hwfn->nvm_info.valid = false;
3362}
3363
3364int
3365qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3366                          enum qed_nvm_images image_id,
3367                          struct qed_nvm_image_att *p_image_att)
3368{
3369        enum nvm_image_type type;
3370        int rc;
3371        u32 i;
3372
3373        /* Translate image_id into MFW definitions */
3374        switch (image_id) {
3375        case QED_NVM_IMAGE_ISCSI_CFG:
3376                type = NVM_TYPE_ISCSI_CFG;
3377                break;
3378        case QED_NVM_IMAGE_FCOE_CFG:
3379                type = NVM_TYPE_FCOE_CFG;
3380                break;
3381        case QED_NVM_IMAGE_MDUMP:
3382                type = NVM_TYPE_MDUMP;
3383                break;
3384        case QED_NVM_IMAGE_NVM_CFG1:
3385                type = NVM_TYPE_NVM_CFG1;
3386                break;
3387        case QED_NVM_IMAGE_DEFAULT_CFG:
3388                type = NVM_TYPE_DEFAULT_CFG;
3389                break;
3390        case QED_NVM_IMAGE_NVM_META:
3391                type = NVM_TYPE_META;
3392                break;
3393        default:
3394                DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
3395                          image_id);
3396                return -EINVAL;
3397        }
3398
3399        rc = qed_mcp_nvm_info_populate(p_hwfn);
3400        if (rc)
3401                return rc;
3402
3403        for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3404                if (type == p_hwfn->nvm_info.image_att[i].image_type)
3405                        break;
3406        if (i == p_hwfn->nvm_info.num_images) {
3407                DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3408                           "Failed to find nvram image of type %08x\n",
3409                           image_id);
3410                return -ENOENT;
3411        }
3412
3413        p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
3414        p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
3415
3416        return 0;
3417}
3418
3419int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
3420                          enum qed_nvm_images image_id,
3421                          u8 *p_buffer, u32 buffer_len)
3422{
3423        struct qed_nvm_image_att image_att;
3424        int rc;
3425
3426        memset(p_buffer, 0, buffer_len);
3427
3428        rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
3429        if (rc)
3430                return rc;
3431
3432        /* Validate sizes - both the image's and the supplied buffer's */
3433        if (image_att.length <= 4) {
3434                DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3435                           "Image [%d] is too small - only %d bytes\n",
3436                           image_id, image_att.length);
3437                return -EINVAL;
3438        }
3439
3440        if (image_att.length > buffer_len) {
3441                DP_VERBOSE(p_hwfn,
3442                           QED_MSG_STORAGE,
3443                           "Image [%d] is too big - %08x bytes where only %08x are available\n",
3444                           image_id, image_att.length, buffer_len);
3445                return -ENOMEM;
3446        }
3447
3448        return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
3449                                p_buffer, image_att.length);
3450}
3451
3452static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
3453{
3454        enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3455
3456        switch (res_id) {
3457        case QED_SB:
3458                mfw_res_id = RESOURCE_NUM_SB_E;
3459                break;
3460        case QED_L2_QUEUE:
3461                mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3462                break;
3463        case QED_VPORT:
3464                mfw_res_id = RESOURCE_NUM_VPORT_E;
3465                break;
3466        case QED_RSS_ENG:
3467                mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3468                break;
3469        case QED_PQ:
3470                mfw_res_id = RESOURCE_NUM_PQ_E;
3471                break;
3472        case QED_RL:
3473                mfw_res_id = RESOURCE_NUM_RL_E;
3474                break;
3475        case QED_MAC:
3476        case QED_VLAN:
3477                /* Each VFC resource can accommodate both a MAC and a VLAN */
3478                mfw_res_id = RESOURCE_VFC_FILTER_E;
3479                break;
3480        case QED_ILT:
3481                mfw_res_id = RESOURCE_ILT_E;
3482                break;
3483        case QED_LL2_RAM_QUEUE:
3484                mfw_res_id = RESOURCE_LL2_QUEUE_E;
3485                break;
3486        case QED_LL2_CTX_QUEUE:
3487                mfw_res_id = RESOURCE_LL2_CQS_E;
3488                break;
3489        case QED_RDMA_CNQ_RAM:
3490        case QED_CMDQS_CQS:
3491                /* CNQ/CMDQS are the same resource */
3492                mfw_res_id = RESOURCE_CQS_E;
3493                break;
3494        case QED_RDMA_STATS_QUEUE:
3495                mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3496                break;
3497        case QED_BDQ:
3498                mfw_res_id = RESOURCE_BDQ_E;
3499                break;
3500        default:
3501                break;
3502        }
3503
3504        return mfw_res_id;
3505}
3506
3507#define QED_RESC_ALLOC_VERSION_MAJOR    2
3508#define QED_RESC_ALLOC_VERSION_MINOR    0
3509#define QED_RESC_ALLOC_VERSION                               \
3510        ((QED_RESC_ALLOC_VERSION_MAJOR <<                    \
3511          DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3512         (QED_RESC_ALLOC_VERSION_MINOR <<                    \
3513          DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3514
3515struct qed_resc_alloc_in_params {
3516        u32 cmd;
3517        enum qed_resources res_id;
3518        u32 resc_max_val;
3519};
3520
3521struct qed_resc_alloc_out_params {
3522        u32 mcp_resp;
3523        u32 mcp_param;
3524        u32 resc_num;
3525        u32 resc_start;
3526        u32 vf_resc_num;
3527        u32 vf_resc_start;
3528        u32 flags;
3529};
3530
3531static int
3532qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
3533                            struct qed_ptt *p_ptt,
3534                            struct qed_resc_alloc_in_params *p_in_params,
3535                            struct qed_resc_alloc_out_params *p_out_params)
3536{
3537        struct qed_mcp_mb_params mb_params;
3538        struct resource_info mfw_resc_info;
3539        int rc;
3540
3541        memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
3542
3543        mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
3544        if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3545                DP_ERR(p_hwfn,
3546                       "Failed to match resource %d [%s] with the MFW resources\n",
3547                       p_in_params->res_id,
3548                       qed_hw_get_resc_name(p_in_params->res_id));
3549                return -EINVAL;
3550        }
3551
3552        switch (p_in_params->cmd) {
3553        case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3554                mfw_resc_info.size = p_in_params->resc_max_val;
3555                fallthrough;
3556        case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3557                break;
3558        default:
3559                DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3560                       p_in_params->cmd);
3561                return -EINVAL;
3562        }
3563
3564        memset(&mb_params, 0, sizeof(mb_params));
3565        mb_params.cmd = p_in_params->cmd;
3566        mb_params.param = QED_RESC_ALLOC_VERSION;
3567        mb_params.p_data_src = &mfw_resc_info;
3568        mb_params.data_src_size = sizeof(mfw_resc_info);
3569        mb_params.p_data_dst = mb_params.p_data_src;
3570        mb_params.data_dst_size = mb_params.data_src_size;
3571
3572        DP_VERBOSE(p_hwfn,
3573                   QED_MSG_SP,
3574                   "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3575                   p_in_params->cmd,
3576                   p_in_params->res_id,
3577                   qed_hw_get_resc_name(p_in_params->res_id),
3578                   QED_MFW_GET_FIELD(mb_params.param,
3579                                     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3580                   QED_MFW_GET_FIELD(mb_params.param,
3581                                     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3582                   p_in_params->resc_max_val);
3583
3584        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3585        if (rc)
3586                return rc;
3587
3588        p_out_params->mcp_resp = mb_params.mcp_resp;
3589        p_out_params->mcp_param = mb_params.mcp_param;
3590        p_out_params->resc_num = mfw_resc_info.size;
3591        p_out_params->resc_start = mfw_resc_info.offset;
3592        p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3593        p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3594        p_out_params->flags = mfw_resc_info.flags;
3595
3596        DP_VERBOSE(p_hwfn,
3597                   QED_MSG_SP,
3598                   "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3599                   QED_MFW_GET_FIELD(p_out_params->mcp_param,
3600                                     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3601                   QED_MFW_GET_FIELD(p_out_params->mcp_param,
3602                                     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3603                   p_out_params->resc_num,
3604                   p_out_params->resc_start,
3605                   p_out_params->vf_resc_num,
3606                   p_out_params->vf_resc_start, p_out_params->flags);
3607
3608        return 0;
3609}
3610
3611int
3612qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
3613                         struct qed_ptt *p_ptt,
3614                         enum qed_resources res_id,
3615                         u32 resc_max_val, u32 *p_mcp_resp)
3616{
3617        struct qed_resc_alloc_out_params out_params;
3618        struct qed_resc_alloc_in_params in_params;
3619        int rc;
3620
3621        memset(&in_params, 0, sizeof(in_params));
3622        in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3623        in_params.res_id = res_id;
3624        in_params.resc_max_val = resc_max_val;
3625        memset(&out_params, 0, sizeof(out_params));
3626        rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3627                                         &out_params);
3628        if (rc)
3629                return rc;
3630
3631        *p_mcp_resp = out_params.mcp_resp;
3632
3633        return 0;
3634}
3635
3636int
3637qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
3638                      struct qed_ptt *p_ptt,
3639                      enum qed_resources res_id,
3640                      u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
3641{
3642        struct qed_resc_alloc_out_params out_params;
3643        struct qed_resc_alloc_in_params in_params;
3644        int rc;
3645
3646        memset(&in_params, 0, sizeof(in_params));
3647        in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3648        in_params.res_id = res_id;
3649        memset(&out_params, 0, sizeof(out_params));
3650        rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3651                                         &out_params);
3652        if (rc)
3653                return rc;
3654
3655        *p_mcp_resp = out_params.mcp_resp;
3656
3657        if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3658                *p_resc_num = out_params.resc_num;
3659                *p_resc_start = out_params.resc_start;
3660        }
3661
3662        return 0;
3663}
3664
3665int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3666{
3667        u32 mcp_resp, mcp_param;
3668
3669        return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3670                           &mcp_resp, &mcp_param);
3671}
3672
3673static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
3674                                struct qed_ptt *p_ptt,
3675                                u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
3676{
3677        int rc;
3678
3679        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3680                         p_mcp_resp, p_mcp_param);
3681        if (rc)
3682                return rc;
3683
3684        if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3685                DP_INFO(p_hwfn,
3686                        "The resource command is unsupported by the MFW\n");
3687                return -EINVAL;
3688        }
3689
3690        if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3691                u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3692
3693                DP_NOTICE(p_hwfn,
3694                          "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3695                          param, opcode);
3696                return -EINVAL;
3697        }
3698
3699        return rc;
3700}
3701
3702static int
3703__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3704                    struct qed_ptt *p_ptt,
3705                    struct qed_resc_lock_params *p_params)
3706{
3707        u32 param = 0, mcp_resp, mcp_param;
3708        u8 opcode;
3709        int rc;
3710
3711        switch (p_params->timeout) {
3712        case QED_MCP_RESC_LOCK_TO_DEFAULT:
3713                opcode = RESOURCE_OPCODE_REQ;
3714                p_params->timeout = 0;
3715                break;
3716        case QED_MCP_RESC_LOCK_TO_NONE:
3717                opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3718                p_params->timeout = 0;
3719                break;
3720        default:
3721                opcode = RESOURCE_OPCODE_REQ_W_AGING;
3722                break;
3723        }
3724
3725        QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3726        QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3727        QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3728
3729        DP_VERBOSE(p_hwfn,
3730                   QED_MSG_SP,
3731                   "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3732                   param, p_params->timeout, opcode, p_params->resource);
3733
3734        /* Attempt to acquire the resource */
3735        rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3736        if (rc)
3737                return rc;
3738
3739        /* Analyze the response */
3740        p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3741        opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3742
3743        DP_VERBOSE(p_hwfn,
3744                   QED_MSG_SP,
3745                   "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3746                   mcp_param, opcode, p_params->owner);
3747
3748        switch (opcode) {
3749        case RESOURCE_OPCODE_GNT:
3750                p_params->b_granted = true;
3751                break;
3752        case RESOURCE_OPCODE_BUSY:
3753                p_params->b_granted = false;
3754                break;
3755        default:
3756                DP_NOTICE(p_hwfn,
3757                          "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3758                          mcp_param, opcode);
3759                return -EINVAL;
3760        }
3761
3762        return 0;
3763}
3764
3765int
3766qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3767                  struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3768{
3769        u32 retry_cnt = 0;
3770        int rc;
3771
3772        do {
3773                /* No need for an interval before the first iteration */
3774                if (retry_cnt) {
3775                        if (p_params->sleep_b4_retry) {
3776                                u16 retry_interval_in_ms =
3777                                    DIV_ROUND_UP(p_params->retry_interval,
3778                                                 1000);
3779
3780                                msleep(retry_interval_in_ms);
3781                        } else {
3782                                udelay(p_params->retry_interval);
3783                        }
3784                }
3785
3786                rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3787                if (rc)
3788                        return rc;
3789
3790                if (p_params->b_granted)
3791                        break;
3792        } while (retry_cnt++ < p_params->retry_num);
3793
3794        return 0;
3795}
3796
3797int
3798qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3799                    struct qed_ptt *p_ptt,
3800                    struct qed_resc_unlock_params *p_params)
3801{
3802        u32 param = 0, mcp_resp, mcp_param;
3803        u8 opcode;
3804        int rc;
3805
3806        opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3807                                   : RESOURCE_OPCODE_RELEASE;
3808        QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3809        QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3810
3811        DP_VERBOSE(p_hwfn, QED_MSG_SP,
3812                   "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3813                   param, opcode, p_params->resource);
3814
3815        /* Attempt to release the resource */
3816        rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3817        if (rc)
3818                return rc;
3819
3820        /* Analyze the response */
3821        opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3822
3823        DP_VERBOSE(p_hwfn, QED_MSG_SP,
3824                   "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3825                   mcp_param, opcode);
3826
3827        switch (opcode) {
3828        case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3829                DP_INFO(p_hwfn,
3830                        "Resource unlock request for an already released resource [%d]\n",
3831                        p_params->resource);
3832                fallthrough;
3833        case RESOURCE_OPCODE_RELEASED:
3834                p_params->b_released = true;
3835                break;
3836        case RESOURCE_OPCODE_WRONG_OWNER:
3837                p_params->b_released = false;
3838                break;
3839        default:
3840                DP_NOTICE(p_hwfn,
3841                          "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3842                          mcp_param, opcode);
3843                return -EINVAL;
3844        }
3845
3846        return 0;
3847}
3848
3849void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3850                                    struct qed_resc_unlock_params *p_unlock,
3851                                    enum qed_resc_lock
3852                                    resource, bool b_is_permanent)
3853{
3854        if (p_lock) {
3855                memset(p_lock, 0, sizeof(*p_lock));
3856
3857                /* Permanent resources don't require aging, and there's no
3858                 * point in trying to acquire them more than once since it's
3859                 * unexpected another entity would release them.
3860                 */
3861                if (b_is_permanent) {
3862                        p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3863                } else {
3864                        p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3865                        p_lock->retry_interval =
3866                            QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3867                        p_lock->sleep_b4_retry = true;
3868                }
3869
3870                p_lock->resource = resource;
3871        }
3872
3873        if (p_unlock) {
3874                memset(p_unlock, 0, sizeof(*p_unlock));
3875                p_unlock->resource = resource;
3876        }
3877}
3878
3879bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
3880{
3881        return !!(p_hwfn->mcp_info->capabilities &
3882                  FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3883}
3884
3885int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3886{
3887        u32 mcp_resp;
3888        int rc;
3889
3890        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3891                         0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3892        if (!rc)
3893                DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3894                           "MFW supported features: %08x\n",
3895                           p_hwfn->mcp_info->capabilities);
3896
3897        return rc;
3898}
3899
3900int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3901{
3902        u32 mcp_resp, mcp_param, features;
3903
3904        features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3905                   DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
3906                   DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
3907
3908        if (QED_IS_E5(p_hwfn->cdev))
3909                features |=
3910                    DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL;
3911
3912        return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3913                           features, &mcp_resp, &mcp_param);
3914}
3915
3916int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3917{
3918        struct qed_mcp_mb_params mb_params = {0};
3919        struct qed_dev *cdev = p_hwfn->cdev;
3920        u8 fir_valid, l2_valid;
3921        int rc;
3922
3923        mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
3924        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3925        if (rc)
3926                return rc;
3927
3928        if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3929                DP_INFO(p_hwfn,
3930                        "The get_engine_config command is unsupported by the MFW\n");
3931                return -EOPNOTSUPP;
3932        }
3933
3934        fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3935                                      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
3936        if (fir_valid)
3937                cdev->fir_affin =
3938                    QED_MFW_GET_FIELD(mb_params.mcp_param,
3939                                      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
3940
3941        l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3942                                     FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
3943        if (l2_valid)
3944                cdev->l2_affin_hint =
3945                    QED_MFW_GET_FIELD(mb_params.mcp_param,
3946                                      FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
3947
3948        DP_INFO(p_hwfn,
3949                "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
3950                fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
3951
3952        return 0;
3953}
3954
3955int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3956{
3957        struct qed_mcp_mb_params mb_params = {0};
3958        struct qed_dev *cdev = p_hwfn->cdev;
3959        int rc;
3960
3961        mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
3962        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3963        if (rc)
3964                return rc;
3965
3966        if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3967                DP_INFO(p_hwfn,
3968                        "The get_ppfid_bitmap command is unsupported by the MFW\n");
3969                return -EOPNOTSUPP;
3970        }
3971
3972        cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
3973                                               FW_MB_PARAM_PPFID_BITMAP);
3974
3975        DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
3976                   cdev->ppfid_bitmap);
3977
3978        return 0;
3979}
3980
3981int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3982                        u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3983                        u32 *p_len)
3984{
3985        u32 mb_param = 0, resp, param;
3986        int rc;
3987
3988        QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3989        if (flags & QED_NVM_CFG_OPTION_INIT)
3990                QED_MFW_SET_FIELD(mb_param,
3991                                  DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3992        if (flags & QED_NVM_CFG_OPTION_FREE)
3993                QED_MFW_SET_FIELD(mb_param,
3994                                  DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3995        if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3996                QED_MFW_SET_FIELD(mb_param,
3997                                  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3998                QED_MFW_SET_FIELD(mb_param,
3999                                  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
4000                                  entity_id);
4001        }
4002
4003        rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4004                                DRV_MSG_CODE_GET_NVM_CFG_OPTION,
4005                                mb_param, &resp, &param, p_len, (u32 *)p_buf);
4006
4007        return rc;
4008}
4009
4010int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4011                        u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
4012                        u32 len)
4013{
4014        u32 mb_param = 0, resp, param;
4015
4016        QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
4017        if (flags & QED_NVM_CFG_OPTION_ALL)
4018                QED_MFW_SET_FIELD(mb_param,
4019                                  DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
4020        if (flags & QED_NVM_CFG_OPTION_INIT)
4021                QED_MFW_SET_FIELD(mb_param,
4022                                  DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
4023        if (flags & QED_NVM_CFG_OPTION_COMMIT)
4024                QED_MFW_SET_FIELD(mb_param,
4025                                  DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
4026        if (flags & QED_NVM_CFG_OPTION_FREE)
4027                QED_MFW_SET_FIELD(mb_param,
4028                                  DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
4029        if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
4030                QED_MFW_SET_FIELD(mb_param,
4031                                  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
4032                QED_MFW_SET_FIELD(mb_param,
4033                                  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
4034                                  entity_id);
4035        }
4036
4037        return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
4038                                  DRV_MSG_CODE_SET_NVM_CFG_OPTION,
4039                                  mb_param, &resp, &param, len, (u32 *)p_buf);
4040}
4041
4042#define QED_MCP_DBG_DATA_MAX_SIZE               MCP_DRV_NVM_BUF_LEN
4043#define QED_MCP_DBG_DATA_MAX_HEADER_SIZE        sizeof(u32)
4044#define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
4045        (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
4046
4047static int
4048__qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
4049                          struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
4050{
4051        struct qed_mcp_mb_params mb_params;
4052        int rc;
4053
4054        if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
4055                DP_ERR(p_hwfn,
4056                       "Debug data size is %d while it should not exceed %d\n",
4057                       size, QED_MCP_DBG_DATA_MAX_SIZE);
4058                return -EINVAL;
4059        }
4060
4061        memset(&mb_params, 0, sizeof(mb_params));
4062        mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
4063        SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
4064        mb_params.p_data_src = p_buf;
4065        mb_params.data_src_size = size;
4066        rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4067        if (rc)
4068                return rc;
4069
4070        if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4071                DP_INFO(p_hwfn,
4072                        "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
4073                return -EOPNOTSUPP;
4074        } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
4075                DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
4076                return -EBUSY;
4077        } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
4078                DP_NOTICE(p_hwfn,
4079                          "Failed to send debug data to the MFW [resp 0x%08x]\n",
4080                          mb_params.mcp_resp);
4081                return -EINVAL;
4082        }
4083
4084        return 0;
4085}
4086
4087enum qed_mcp_dbg_data_type {
4088        QED_MCP_DBG_DATA_TYPE_RAW,
4089};
4090
4091/* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
4092#define QED_MCP_DBG_DATA_HDR_SN_OFFSET  0
4093#define QED_MCP_DBG_DATA_HDR_SN_MASK            0x00000fff
4094#define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET        12
4095#define QED_MCP_DBG_DATA_HDR_TYPE_MASK  0x000ff000
4096#define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET       20
4097#define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
4098#define QED_MCP_DBG_DATA_HDR_PF_OFFSET  28
4099#define QED_MCP_DBG_DATA_HDR_PF_MASK            0xf0000000
4100
4101#define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST        0x1
4102#define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
4103
4104static int
4105qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
4106                        struct qed_ptt *p_ptt,
4107                        enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
4108{
4109        u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
4110        u32 tmp_size = size, *p_header, *p_payload;
4111        u8 flags = 0;
4112        u16 seq;
4113        int rc;
4114
4115        p_header = (u32 *)raw_data;
4116        p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
4117
4118        seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
4119
4120        /* First chunk is marked as 'first' */
4121        flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4122
4123        *p_header = 0;
4124        SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
4125        SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
4126        SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4127        SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
4128
4129        while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
4130                memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
4131                rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4132                                               QED_MCP_DBG_DATA_MAX_SIZE);
4133                if (rc)
4134                        return rc;
4135
4136                /* Clear the 'first' marking after sending the first chunk */
4137                if (p_tmp_buf == p_buf) {
4138                        flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4139                        SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
4140                                      flags);
4141                }
4142
4143                p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4144                tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4145        }
4146
4147        /* Last chunk is marked as 'last' */
4148        flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
4149        SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4150        memcpy(p_payload, p_tmp_buf, tmp_size);
4151
4152        /* Casting the left size to u8 is ok since at this point it is <= 32 */
4153        return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4154                                         (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
4155                                         tmp_size));
4156}
4157
4158int
4159qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
4160                            struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
4161{
4162        return qed_mcp_send_debug_data(p_hwfn, p_ptt,
4163                                       QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
4164}
4165