linux/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <asm/byteorder.h>
   9#include <linux/bitops.h>
  10#include <linux/errno.h>
  11#include <linux/kernel.h>
  12#include <linux/string.h>
  13#include "qed.h"
  14#include <linux/qed/qed_chain.h>
  15#include "qed_cxt.h"
  16#include "qed_dcbx.h"
  17#include "qed_hsi.h"
  18#include "qed_hw.h"
  19#include "qed_int.h"
  20#include "qed_reg_addr.h"
  21#include "qed_sp.h"
  22#include "qed_sriov.h"
  23
  24void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
  25                            struct qed_spq_entry *p_ent)
  26{
  27        /* qed_spq_get_entry() can either get an entry from the free_pool,
  28         * or, if no entries are left, allocate a new entry and add it to
  29         * the unlimited_pending list.
  30         */
  31        if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
  32                kfree(p_ent);
  33        else
  34                qed_spq_return_entry(p_hwfn, p_ent);
  35}
  36
  37int qed_sp_init_request(struct qed_hwfn *p_hwfn,
  38                        struct qed_spq_entry **pp_ent,
  39                        u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
  40{
  41        u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
  42        struct qed_spq_entry *p_ent = NULL;
  43        int rc;
  44
  45        if (!pp_ent)
  46                return -ENOMEM;
  47
  48        rc = qed_spq_get_entry(p_hwfn, pp_ent);
  49
  50        if (rc)
  51                return rc;
  52
  53        p_ent = *pp_ent;
  54
  55        p_ent->elem.hdr.cid             = cpu_to_le32(opaque_cid);
  56        p_ent->elem.hdr.cmd_id          = cmd;
  57        p_ent->elem.hdr.protocol_id     = protocol;
  58
  59        p_ent->priority         = QED_SPQ_PRIORITY_NORMAL;
  60        p_ent->comp_mode        = p_data->comp_mode;
  61        p_ent->comp_done.done   = 0;
  62
  63        switch (p_ent->comp_mode) {
  64        case QED_SPQ_MODE_EBLOCK:
  65                p_ent->comp_cb.cookie = &p_ent->comp_done;
  66                break;
  67
  68        case QED_SPQ_MODE_BLOCK:
  69                if (!p_data->p_comp_data)
  70                        goto err;
  71
  72                p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
  73                break;
  74
  75        case QED_SPQ_MODE_CB:
  76                if (!p_data->p_comp_data)
  77                        p_ent->comp_cb.function = NULL;
  78                else
  79                        p_ent->comp_cb = *p_data->p_comp_data;
  80                break;
  81
  82        default:
  83                DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
  84                          p_ent->comp_mode);
  85                goto err;
  86        }
  87
  88        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
  89                   "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
  90                   opaque_cid, cmd, protocol,
  91                   (unsigned long)&p_ent->ramrod,
  92                   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
  93                           QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
  94                           "MODE_CB"));
  95
  96        memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
  97
  98        return 0;
  99
 100err:
 101        qed_sp_destroy_request(p_hwfn, p_ent);
 102
 103        return -EINVAL;
 104}
 105
 106static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
 107{
 108        switch (type) {
 109        case QED_TUNN_CLSS_MAC_VLAN:
 110                return TUNNEL_CLSS_MAC_VLAN;
 111        case QED_TUNN_CLSS_MAC_VNI:
 112                return TUNNEL_CLSS_MAC_VNI;
 113        case QED_TUNN_CLSS_INNER_MAC_VLAN:
 114                return TUNNEL_CLSS_INNER_MAC_VLAN;
 115        case QED_TUNN_CLSS_INNER_MAC_VNI:
 116                return TUNNEL_CLSS_INNER_MAC_VNI;
 117        case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
 118                return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
 119        default:
 120                return TUNNEL_CLSS_MAC_VLAN;
 121        }
 122}
 123
 124static void
 125qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
 126                            struct qed_tunnel_info *p_src, bool b_pf_start)
 127{
 128        if (p_src->vxlan.b_update_mode || b_pf_start)
 129                p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
 130
 131        if (p_src->l2_gre.b_update_mode || b_pf_start)
 132                p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
 133
 134        if (p_src->ip_gre.b_update_mode || b_pf_start)
 135                p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
 136
 137        if (p_src->l2_geneve.b_update_mode || b_pf_start)
 138                p_tun->l2_geneve.b_mode_enabled =
 139                    p_src->l2_geneve.b_mode_enabled;
 140
 141        if (p_src->ip_geneve.b_update_mode || b_pf_start)
 142                p_tun->ip_geneve.b_mode_enabled =
 143                    p_src->ip_geneve.b_mode_enabled;
 144}
 145
 146static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
 147                                  struct qed_tunnel_info *p_src)
 148{
 149        int type;
 150
 151        p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
 152        p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
 153
 154        type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
 155        p_tun->vxlan.tun_cls = type;
 156        type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
 157        p_tun->l2_gre.tun_cls = type;
 158        type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
 159        p_tun->ip_gre.tun_cls = type;
 160        type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
 161        p_tun->l2_geneve.tun_cls = type;
 162        type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
 163        p_tun->ip_geneve.tun_cls = type;
 164}
 165
 166static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
 167                               struct qed_tunnel_info *p_src)
 168{
 169        p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
 170        p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
 171
 172        if (p_src->geneve_port.b_update_port)
 173                p_tun->geneve_port.port = p_src->geneve_port.port;
 174
 175        if (p_src->vxlan_port.b_update_port)
 176                p_tun->vxlan_port.port = p_src->vxlan_port.port;
 177}
 178
 179static void
 180__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
 181                              struct qed_tunn_update_type *tun_type)
 182{
 183        *p_tunn_cls = tun_type->tun_cls;
 184}
 185
 186static void
 187qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
 188                            struct qed_tunn_update_type *tun_type,
 189                            u8 *p_update_port,
 190                            __le16 *p_port,
 191                            struct qed_tunn_update_udp_port *p_udp_port)
 192{
 193        __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
 194        if (p_udp_port->b_update_port) {
 195                *p_update_port = 1;
 196                *p_port = cpu_to_le16(p_udp_port->port);
 197        }
 198}
 199
 200static void
 201qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
 202                              struct qed_tunnel_info *p_src,
 203                              struct pf_update_tunnel_config *p_tunn_cfg)
 204{
 205        struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
 206
 207        qed_set_pf_update_tunn_mode(p_tun, p_src, false);
 208        qed_set_tunn_cls_info(p_tun, p_src);
 209        qed_set_tunn_ports(p_tun, p_src);
 210
 211        qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
 212                                    &p_tun->vxlan,
 213                                    &p_tunn_cfg->set_vxlan_udp_port_flg,
 214                                    &p_tunn_cfg->vxlan_udp_port,
 215                                    &p_tun->vxlan_port);
 216
 217        qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
 218                                    &p_tun->l2_geneve,
 219                                    &p_tunn_cfg->set_geneve_udp_port_flg,
 220                                    &p_tunn_cfg->geneve_udp_port,
 221                                    &p_tun->geneve_port);
 222
 223        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
 224                                      &p_tun->ip_geneve);
 225
 226        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
 227                                      &p_tun->l2_gre);
 228
 229        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
 230                                      &p_tun->ip_gre);
 231
 232        p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
 233}
 234
 235static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
 236                                 struct qed_ptt *p_ptt,
 237                                 struct qed_tunnel_info *p_tun)
 238{
 239        qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
 240                           p_tun->ip_gre.b_mode_enabled);
 241        qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
 242
 243        qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
 244                              p_tun->ip_geneve.b_mode_enabled);
 245}
 246
 247static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
 248                                      struct qed_ptt *p_ptt,
 249                                      struct qed_tunnel_info *p_tunn)
 250{
 251        if (p_tunn->vxlan_port.b_update_port)
 252                qed_set_vxlan_dest_port(p_hwfn, p_ptt,
 253                                        p_tunn->vxlan_port.port);
 254
 255        if (p_tunn->geneve_port.b_update_port)
 256                qed_set_geneve_dest_port(p_hwfn, p_ptt,
 257                                         p_tunn->geneve_port.port);
 258
 259        qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
 260}
 261
 262static void
 263qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
 264                             struct qed_tunnel_info *p_src,
 265                             struct pf_start_tunnel_config *p_tunn_cfg)
 266{
 267        struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
 268
 269        if (!p_src)
 270                return;
 271
 272        qed_set_pf_update_tunn_mode(p_tun, p_src, true);
 273        qed_set_tunn_cls_info(p_tun, p_src);
 274        qed_set_tunn_ports(p_tun, p_src);
 275
 276        qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
 277                                    &p_tun->vxlan,
 278                                    &p_tunn_cfg->set_vxlan_udp_port_flg,
 279                                    &p_tunn_cfg->vxlan_udp_port,
 280                                    &p_tun->vxlan_port);
 281
 282        qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
 283                                    &p_tun->l2_geneve,
 284                                    &p_tunn_cfg->set_geneve_udp_port_flg,
 285                                    &p_tunn_cfg->geneve_udp_port,
 286                                    &p_tun->geneve_port);
 287
 288        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
 289                                      &p_tun->ip_geneve);
 290
 291        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
 292                                      &p_tun->l2_gre);
 293
 294        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
 295                                      &p_tun->ip_gre);
 296}
 297
 298int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 299                    struct qed_ptt *p_ptt,
 300                    struct qed_tunnel_info *p_tunn,
 301                    bool allow_npar_tx_switch)
 302{
 303        struct outer_tag_config_struct *outer_tag_config;
 304        struct pf_start_ramrod_data *p_ramrod = NULL;
 305        u16 sb = qed_int_get_sp_sb_id(p_hwfn);
 306        u8 sb_index = p_hwfn->p_eq->eq_sb_index;
 307        struct qed_spq_entry *p_ent = NULL;
 308        struct qed_sp_init_data init_data;
 309        u8 page_cnt, i;
 310        int rc;
 311
 312        /* update initial eq producer */
 313        qed_eq_prod_update(p_hwfn,
 314                           qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
 315
 316        memset(&init_data, 0, sizeof(init_data));
 317        init_data.cid = qed_spq_get_cid(p_hwfn);
 318        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 319        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 320
 321        rc = qed_sp_init_request(p_hwfn, &p_ent,
 322                                 COMMON_RAMROD_PF_START,
 323                                 PROTOCOLID_COMMON, &init_data);
 324        if (rc)
 325                return rc;
 326
 327        p_ramrod = &p_ent->ramrod.pf_start;
 328
 329        p_ramrod->event_ring_sb_id      = cpu_to_le16(sb);
 330        p_ramrod->event_ring_sb_index   = sb_index;
 331        p_ramrod->path_id               = QED_PATH_ID(p_hwfn);
 332        p_ramrod->dont_log_ramrods      = 0;
 333        p_ramrod->log_type_mask         = cpu_to_le16(0xf);
 334
 335        if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
 336                p_ramrod->mf_mode = MF_OVLAN;
 337        else
 338                p_ramrod->mf_mode = MF_NPAR;
 339
 340        outer_tag_config = &p_ramrod->outer_tag_config;
 341        outer_tag_config->outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan);
 342
 343        if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) {
 344                outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021Q);
 345        } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
 346                outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021AD);
 347                outer_tag_config->enable_stag_pri_change = 1;
 348        }
 349
 350        outer_tag_config->pri_map_valid = 1;
 351        for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
 352                outer_tag_config->inner_to_outer_pri_map[i] = i;
 353
 354        /* enable_stag_pri_change should be set if port is in BD mode or,
 355         * UFP with Host Control mode.
 356         */
 357        if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) {
 358                if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
 359                        outer_tag_config->enable_stag_pri_change = 1;
 360                else
 361                        outer_tag_config->enable_stag_pri_change = 0;
 362
 363                outer_tag_config->outer_tag.tci |=
 364                    cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
 365        }
 366
 367        /* Place EQ address in RAMROD */
 368        DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
 369                       qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
 370        page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
 371        p_ramrod->event_ring_num_pages = page_cnt;
 372        DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
 373                       qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
 374
 375        qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
 376
 377        if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits))
 378                p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
 379
 380        switch (p_hwfn->hw_info.personality) {
 381        case QED_PCI_ETH:
 382                p_ramrod->personality = PERSONALITY_ETH;
 383                break;
 384        case QED_PCI_FCOE:
 385                p_ramrod->personality = PERSONALITY_FCOE;
 386                break;
 387        case QED_PCI_ISCSI:
 388        case QED_PCI_NVMETCP:
 389                p_ramrod->personality = PERSONALITY_TCP_ULP;
 390                break;
 391        case QED_PCI_ETH_ROCE:
 392        case QED_PCI_ETH_IWARP:
 393                p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
 394                break;
 395        default:
 396                DP_NOTICE(p_hwfn, "Unknown personality %d\n",
 397                          p_hwfn->hw_info.personality);
 398                p_ramrod->personality = PERSONALITY_ETH;
 399        }
 400
 401        if (p_hwfn->cdev->p_iov_info) {
 402                struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
 403
 404                p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
 405                p_ramrod->num_vfs = (u8) p_iov->total_vfs;
 406        }
 407        p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
 408        p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
 409
 410        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 411                   "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
 412                   sb, sb_index, outer_tag_config->outer_tag.tci);
 413
 414        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 415
 416        if (p_tunn)
 417                qed_set_hw_tunn_mode_port(p_hwfn, p_ptt,
 418                                          &p_hwfn->cdev->tunnel);
 419
 420        return rc;
 421}
 422
 423int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
 424{
 425        struct qed_spq_entry *p_ent = NULL;
 426        struct qed_sp_init_data init_data;
 427        int rc;
 428
 429        /* Get SPQ entry */
 430        memset(&init_data, 0, sizeof(init_data));
 431        init_data.cid = qed_spq_get_cid(p_hwfn);
 432        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 433        init_data.comp_mode = QED_SPQ_MODE_CB;
 434
 435        rc = qed_sp_init_request(p_hwfn, &p_ent,
 436                                 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
 437                                 &init_data);
 438        if (rc)
 439                return rc;
 440
 441        qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
 442                                      &p_ent->ramrod.pf_update);
 443
 444        return qed_spq_post(p_hwfn, p_ent, NULL);
 445}
 446
 447int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
 448{
 449        struct qed_spq_entry *p_ent = NULL;
 450        struct qed_sp_init_data init_data;
 451        int rc;
 452
 453        if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
 454                DP_INFO(p_hwfn, "Invalid priority type %d\n",
 455                        p_hwfn->ufp_info.pri_type);
 456                return -EINVAL;
 457        }
 458
 459        /* Get SPQ entry */
 460        memset(&init_data, 0, sizeof(init_data));
 461        init_data.cid = qed_spq_get_cid(p_hwfn);
 462        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 463        init_data.comp_mode = QED_SPQ_MODE_CB;
 464
 465        rc = qed_sp_init_request(p_hwfn, &p_ent,
 466                                 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
 467                                 &init_data);
 468        if (rc)
 469                return rc;
 470
 471        p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
 472        if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
 473                p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
 474        else
 475                p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
 476
 477        return qed_spq_post(p_hwfn, p_ent, NULL);
 478}
 479
 480/* Set pf update ramrod command params */
 481int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
 482                              struct qed_ptt *p_ptt,
 483                              struct qed_tunnel_info *p_tunn,
 484                              enum spq_mode comp_mode,
 485                              struct qed_spq_comp_cb *p_comp_data)
 486{
 487        struct qed_spq_entry *p_ent = NULL;
 488        struct qed_sp_init_data init_data;
 489        int rc;
 490
 491        if (IS_VF(p_hwfn->cdev))
 492                return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
 493
 494        if (!p_tunn)
 495                return -EINVAL;
 496
 497        /* Get SPQ entry */
 498        memset(&init_data, 0, sizeof(init_data));
 499        init_data.cid = qed_spq_get_cid(p_hwfn);
 500        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 501        init_data.comp_mode = comp_mode;
 502        init_data.p_comp_data = p_comp_data;
 503
 504        rc = qed_sp_init_request(p_hwfn, &p_ent,
 505                                 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
 506                                 &init_data);
 507        if (rc)
 508                return rc;
 509
 510        qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
 511                                      &p_ent->ramrod.pf_update.tunnel_config);
 512
 513        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 514        if (rc)
 515                return rc;
 516
 517        qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel);
 518
 519        return rc;
 520}
 521
 522int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
 523{
 524        struct qed_spq_entry *p_ent = NULL;
 525        struct qed_sp_init_data init_data;
 526        int rc;
 527
 528        /* Get SPQ entry */
 529        memset(&init_data, 0, sizeof(init_data));
 530        init_data.cid = qed_spq_get_cid(p_hwfn);
 531        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 532        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 533
 534        rc = qed_sp_init_request(p_hwfn, &p_ent,
 535                                 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
 536                                 &init_data);
 537        if (rc)
 538                return rc;
 539
 540        return qed_spq_post(p_hwfn, p_ent, NULL);
 541}
 542
 543int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
 544{
 545        struct qed_spq_entry *p_ent = NULL;
 546        struct qed_sp_init_data init_data;
 547        int rc;
 548
 549        /* Get SPQ entry */
 550        memset(&init_data, 0, sizeof(init_data));
 551        init_data.cid = qed_spq_get_cid(p_hwfn);
 552        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 553        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 554
 555        rc = qed_sp_init_request(p_hwfn, &p_ent,
 556                                 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
 557                                 &init_data);
 558        if (rc)
 559                return rc;
 560
 561        return qed_spq_post(p_hwfn, p_ent, NULL);
 562}
 563
 564int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
 565{
 566        struct qed_spq_entry *p_ent = NULL;
 567        struct qed_sp_init_data init_data;
 568        int rc;
 569
 570        /* Get SPQ entry */
 571        memset(&init_data, 0, sizeof(init_data));
 572        init_data.cid = qed_spq_get_cid(p_hwfn);
 573        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 574        init_data.comp_mode = QED_SPQ_MODE_CB;
 575
 576        rc = qed_sp_init_request(p_hwfn, &p_ent,
 577                                 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
 578                                 &init_data);
 579        if (rc)
 580                return rc;
 581
 582        p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
 583        p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
 584        if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
 585                p_ent->ramrod.pf_update.mf_vlan |=
 586                        cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
 587
 588        return qed_spq_post(p_hwfn, p_ent, NULL);
 589}
 590