linux/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
<<
>>
Prefs
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/types.h>
  34#include <asm/byteorder.h>
  35#include <linux/bitops.h>
  36#include <linux/errno.h>
  37#include <linux/kernel.h>
  38#include <linux/string.h>
  39#include "qed.h"
  40#include <linux/qed/qed_chain.h>
  41#include "qed_cxt.h"
  42#include "qed_dcbx.h"
  43#include "qed_hsi.h"
  44#include "qed_hw.h"
  45#include "qed_int.h"
  46#include "qed_reg_addr.h"
  47#include "qed_sp.h"
  48#include "qed_sriov.h"
  49
  50void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
  51                            struct qed_spq_entry *p_ent)
  52{
  53        /* qed_spq_get_entry() can either get an entry from the free_pool,
  54         * or, if no entries are left, allocate a new entry and add it to
  55         * the unlimited_pending list.
  56         */
  57        if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
  58                kfree(p_ent);
  59        else
  60                qed_spq_return_entry(p_hwfn, p_ent);
  61}
  62
  63int qed_sp_init_request(struct qed_hwfn *p_hwfn,
  64                        struct qed_spq_entry **pp_ent,
  65                        u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
  66{
  67        u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
  68        struct qed_spq_entry *p_ent = NULL;
  69        int rc;
  70
  71        if (!pp_ent)
  72                return -ENOMEM;
  73
  74        rc = qed_spq_get_entry(p_hwfn, pp_ent);
  75
  76        if (rc)
  77                return rc;
  78
  79        p_ent = *pp_ent;
  80
  81        p_ent->elem.hdr.cid             = cpu_to_le32(opaque_cid);
  82        p_ent->elem.hdr.cmd_id          = cmd;
  83        p_ent->elem.hdr.protocol_id     = protocol;
  84
  85        p_ent->priority         = QED_SPQ_PRIORITY_NORMAL;
  86        p_ent->comp_mode        = p_data->comp_mode;
  87        p_ent->comp_done.done   = 0;
  88
  89        switch (p_ent->comp_mode) {
  90        case QED_SPQ_MODE_EBLOCK:
  91                p_ent->comp_cb.cookie = &p_ent->comp_done;
  92                break;
  93
  94        case QED_SPQ_MODE_BLOCK:
  95                if (!p_data->p_comp_data)
  96                        goto err;
  97
  98                p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
  99                break;
 100
 101        case QED_SPQ_MODE_CB:
 102                if (!p_data->p_comp_data)
 103                        p_ent->comp_cb.function = NULL;
 104                else
 105                        p_ent->comp_cb = *p_data->p_comp_data;
 106                break;
 107
 108        default:
 109                DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
 110                          p_ent->comp_mode);
 111                goto err;
 112        }
 113
 114        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 115                   "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
 116                   opaque_cid, cmd, protocol,
 117                   (unsigned long)&p_ent->ramrod,
 118                   D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
 119                           QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
 120                           "MODE_CB"));
 121
 122        memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
 123
 124        return 0;
 125
 126err:
 127        qed_sp_destroy_request(p_hwfn, p_ent);
 128
 129        return -EINVAL;
 130}
 131
 132static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
 133{
 134        switch (type) {
 135        case QED_TUNN_CLSS_MAC_VLAN:
 136                return TUNNEL_CLSS_MAC_VLAN;
 137        case QED_TUNN_CLSS_MAC_VNI:
 138                return TUNNEL_CLSS_MAC_VNI;
 139        case QED_TUNN_CLSS_INNER_MAC_VLAN:
 140                return TUNNEL_CLSS_INNER_MAC_VLAN;
 141        case QED_TUNN_CLSS_INNER_MAC_VNI:
 142                return TUNNEL_CLSS_INNER_MAC_VNI;
 143        case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
 144                return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
 145        default:
 146                return TUNNEL_CLSS_MAC_VLAN;
 147        }
 148}
 149
 150static void
 151qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
 152                            struct qed_tunnel_info *p_src, bool b_pf_start)
 153{
 154        if (p_src->vxlan.b_update_mode || b_pf_start)
 155                p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
 156
 157        if (p_src->l2_gre.b_update_mode || b_pf_start)
 158                p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
 159
 160        if (p_src->ip_gre.b_update_mode || b_pf_start)
 161                p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
 162
 163        if (p_src->l2_geneve.b_update_mode || b_pf_start)
 164                p_tun->l2_geneve.b_mode_enabled =
 165                    p_src->l2_geneve.b_mode_enabled;
 166
 167        if (p_src->ip_geneve.b_update_mode || b_pf_start)
 168                p_tun->ip_geneve.b_mode_enabled =
 169                    p_src->ip_geneve.b_mode_enabled;
 170}
 171
 172static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
 173                                  struct qed_tunnel_info *p_src)
 174{
 175        int type;
 176
 177        p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
 178        p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
 179
 180        type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
 181        p_tun->vxlan.tun_cls = type;
 182        type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
 183        p_tun->l2_gre.tun_cls = type;
 184        type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
 185        p_tun->ip_gre.tun_cls = type;
 186        type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
 187        p_tun->l2_geneve.tun_cls = type;
 188        type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
 189        p_tun->ip_geneve.tun_cls = type;
 190}
 191
 192static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
 193                               struct qed_tunnel_info *p_src)
 194{
 195        p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
 196        p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
 197
 198        if (p_src->geneve_port.b_update_port)
 199                p_tun->geneve_port.port = p_src->geneve_port.port;
 200
 201        if (p_src->vxlan_port.b_update_port)
 202                p_tun->vxlan_port.port = p_src->vxlan_port.port;
 203}
 204
 205static void
 206__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
 207                              struct qed_tunn_update_type *tun_type)
 208{
 209        *p_tunn_cls = tun_type->tun_cls;
 210}
 211
 212static void
 213qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
 214                            struct qed_tunn_update_type *tun_type,
 215                            u8 *p_update_port,
 216                            __le16 *p_port,
 217                            struct qed_tunn_update_udp_port *p_udp_port)
 218{
 219        __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
 220        if (p_udp_port->b_update_port) {
 221                *p_update_port = 1;
 222                *p_port = cpu_to_le16(p_udp_port->port);
 223        }
 224}
 225
 226static void
 227qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
 228                              struct qed_tunnel_info *p_src,
 229                              struct pf_update_tunnel_config *p_tunn_cfg)
 230{
 231        struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
 232
 233        qed_set_pf_update_tunn_mode(p_tun, p_src, false);
 234        qed_set_tunn_cls_info(p_tun, p_src);
 235        qed_set_tunn_ports(p_tun, p_src);
 236
 237        qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
 238                                    &p_tun->vxlan,
 239                                    &p_tunn_cfg->set_vxlan_udp_port_flg,
 240                                    &p_tunn_cfg->vxlan_udp_port,
 241                                    &p_tun->vxlan_port);
 242
 243        qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
 244                                    &p_tun->l2_geneve,
 245                                    &p_tunn_cfg->set_geneve_udp_port_flg,
 246                                    &p_tunn_cfg->geneve_udp_port,
 247                                    &p_tun->geneve_port);
 248
 249        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
 250                                      &p_tun->ip_geneve);
 251
 252        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
 253                                      &p_tun->l2_gre);
 254
 255        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
 256                                      &p_tun->ip_gre);
 257
 258        p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
 259}
 260
 261static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
 262                                 struct qed_ptt *p_ptt,
 263                                 struct qed_tunnel_info *p_tun)
 264{
 265        qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
 266                           p_tun->ip_gre.b_mode_enabled);
 267        qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
 268
 269        qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
 270                              p_tun->ip_geneve.b_mode_enabled);
 271}
 272
 273static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
 274                                      struct qed_ptt *p_ptt,
 275                                      struct qed_tunnel_info *p_tunn)
 276{
 277        if (p_tunn->vxlan_port.b_update_port)
 278                qed_set_vxlan_dest_port(p_hwfn, p_ptt,
 279                                        p_tunn->vxlan_port.port);
 280
 281        if (p_tunn->geneve_port.b_update_port)
 282                qed_set_geneve_dest_port(p_hwfn, p_ptt,
 283                                         p_tunn->geneve_port.port);
 284
 285        qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
 286}
 287
 288static void
 289qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
 290                             struct qed_tunnel_info *p_src,
 291                             struct pf_start_tunnel_config *p_tunn_cfg)
 292{
 293        struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
 294
 295        if (!p_src)
 296                return;
 297
 298        qed_set_pf_update_tunn_mode(p_tun, p_src, true);
 299        qed_set_tunn_cls_info(p_tun, p_src);
 300        qed_set_tunn_ports(p_tun, p_src);
 301
 302        qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
 303                                    &p_tun->vxlan,
 304                                    &p_tunn_cfg->set_vxlan_udp_port_flg,
 305                                    &p_tunn_cfg->vxlan_udp_port,
 306                                    &p_tun->vxlan_port);
 307
 308        qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
 309                                    &p_tun->l2_geneve,
 310                                    &p_tunn_cfg->set_geneve_udp_port_flg,
 311                                    &p_tunn_cfg->geneve_udp_port,
 312                                    &p_tun->geneve_port);
 313
 314        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
 315                                      &p_tun->ip_geneve);
 316
 317        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
 318                                      &p_tun->l2_gre);
 319
 320        __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
 321                                      &p_tun->ip_gre);
 322}
 323
 324int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 325                    struct qed_ptt *p_ptt,
 326                    struct qed_tunnel_info *p_tunn,
 327                    bool allow_npar_tx_switch)
 328{
 329        struct pf_start_ramrod_data *p_ramrod = NULL;
 330        u16 sb = qed_int_get_sp_sb_id(p_hwfn);
 331        u8 sb_index = p_hwfn->p_eq->eq_sb_index;
 332        struct qed_spq_entry *p_ent = NULL;
 333        struct qed_sp_init_data init_data;
 334        int rc = -EINVAL;
 335        u8 page_cnt, i;
 336
 337        /* update initial eq producer */
 338        qed_eq_prod_update(p_hwfn,
 339                           qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
 340
 341        memset(&init_data, 0, sizeof(init_data));
 342        init_data.cid = qed_spq_get_cid(p_hwfn);
 343        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 344        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 345
 346        rc = qed_sp_init_request(p_hwfn, &p_ent,
 347                                 COMMON_RAMROD_PF_START,
 348                                 PROTOCOLID_COMMON, &init_data);
 349        if (rc)
 350                return rc;
 351
 352        p_ramrod = &p_ent->ramrod.pf_start;
 353
 354        p_ramrod->event_ring_sb_id      = cpu_to_le16(sb);
 355        p_ramrod->event_ring_sb_index   = sb_index;
 356        p_ramrod->path_id               = QED_PATH_ID(p_hwfn);
 357        p_ramrod->dont_log_ramrods      = 0;
 358        p_ramrod->log_type_mask         = cpu_to_le16(0xf);
 359
 360        if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
 361                p_ramrod->mf_mode = MF_OVLAN;
 362        else
 363                p_ramrod->mf_mode = MF_NPAR;
 364
 365        p_ramrod->outer_tag_config.outer_tag.tci =
 366                                cpu_to_le16(p_hwfn->hw_info.ovlan);
 367        if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) {
 368                p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
 369        } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
 370                p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
 371                p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
 372        }
 373
 374        p_ramrod->outer_tag_config.pri_map_valid = 1;
 375        for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
 376                p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
 377
 378        /* enable_stag_pri_change should be set if port is in BD mode or,
 379         * UFP with Host Control mode.
 380         */
 381        if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) {
 382                if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
 383                        p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
 384                else
 385                        p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
 386
 387                p_ramrod->outer_tag_config.outer_tag.tci |=
 388                    cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
 389        }
 390
 391        /* Place EQ address in RAMROD */
 392        DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
 393                       p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
 394        page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
 395        p_ramrod->event_ring_num_pages = page_cnt;
 396        DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
 397                       p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
 398
 399        qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
 400
 401        if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits))
 402                p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
 403
 404        switch (p_hwfn->hw_info.personality) {
 405        case QED_PCI_ETH:
 406                p_ramrod->personality = PERSONALITY_ETH;
 407                break;
 408        case QED_PCI_FCOE:
 409                p_ramrod->personality = PERSONALITY_FCOE;
 410                break;
 411        case QED_PCI_ISCSI:
 412                p_ramrod->personality = PERSONALITY_ISCSI;
 413                break;
 414        case QED_PCI_ETH_ROCE:
 415        case QED_PCI_ETH_IWARP:
 416                p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
 417                break;
 418        default:
 419                DP_NOTICE(p_hwfn, "Unknown personality %d\n",
 420                          p_hwfn->hw_info.personality);
 421                p_ramrod->personality = PERSONALITY_ETH;
 422        }
 423
 424        if (p_hwfn->cdev->p_iov_info) {
 425                struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
 426
 427                p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
 428                p_ramrod->num_vfs = (u8) p_iov->total_vfs;
 429        }
 430        p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
 431        p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
 432
 433        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
 434                   "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
 435                   sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
 436
 437        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 438
 439        if (p_tunn)
 440                qed_set_hw_tunn_mode_port(p_hwfn, p_ptt,
 441                                          &p_hwfn->cdev->tunnel);
 442
 443        return rc;
 444}
 445
 446int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
 447{
 448        struct qed_spq_entry *p_ent = NULL;
 449        struct qed_sp_init_data init_data;
 450        int rc = -EINVAL;
 451
 452        /* Get SPQ entry */
 453        memset(&init_data, 0, sizeof(init_data));
 454        init_data.cid = qed_spq_get_cid(p_hwfn);
 455        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 456        init_data.comp_mode = QED_SPQ_MODE_CB;
 457
 458        rc = qed_sp_init_request(p_hwfn, &p_ent,
 459                                 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
 460                                 &init_data);
 461        if (rc)
 462                return rc;
 463
 464        qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
 465                                      &p_ent->ramrod.pf_update);
 466
 467        return qed_spq_post(p_hwfn, p_ent, NULL);
 468}
 469
 470int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
 471{
 472        struct qed_spq_entry *p_ent = NULL;
 473        struct qed_sp_init_data init_data;
 474        int rc = -EOPNOTSUPP;
 475
 476        if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
 477                DP_INFO(p_hwfn, "Invalid priority type %d\n",
 478                        p_hwfn->ufp_info.pri_type);
 479                return -EINVAL;
 480        }
 481
 482        /* Get SPQ entry */
 483        memset(&init_data, 0, sizeof(init_data));
 484        init_data.cid = qed_spq_get_cid(p_hwfn);
 485        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 486        init_data.comp_mode = QED_SPQ_MODE_CB;
 487
 488        rc = qed_sp_init_request(p_hwfn, &p_ent,
 489                                 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
 490                                 &init_data);
 491        if (rc)
 492                return rc;
 493
 494        p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
 495        if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
 496                p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
 497        else
 498                p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
 499
 500        return qed_spq_post(p_hwfn, p_ent, NULL);
 501}
 502
 503/* Set pf update ramrod command params */
 504int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
 505                              struct qed_ptt *p_ptt,
 506                              struct qed_tunnel_info *p_tunn,
 507                              enum spq_mode comp_mode,
 508                              struct qed_spq_comp_cb *p_comp_data)
 509{
 510        struct qed_spq_entry *p_ent = NULL;
 511        struct qed_sp_init_data init_data;
 512        int rc = -EINVAL;
 513
 514        if (IS_VF(p_hwfn->cdev))
 515                return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
 516
 517        if (!p_tunn)
 518                return -EINVAL;
 519
 520        /* Get SPQ entry */
 521        memset(&init_data, 0, sizeof(init_data));
 522        init_data.cid = qed_spq_get_cid(p_hwfn);
 523        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 524        init_data.comp_mode = comp_mode;
 525        init_data.p_comp_data = p_comp_data;
 526
 527        rc = qed_sp_init_request(p_hwfn, &p_ent,
 528                                 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
 529                                 &init_data);
 530        if (rc)
 531                return rc;
 532
 533        qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
 534                                      &p_ent->ramrod.pf_update.tunnel_config);
 535
 536        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 537        if (rc)
 538                return rc;
 539
 540        qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel);
 541
 542        return rc;
 543}
 544
 545int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
 546{
 547        struct qed_spq_entry *p_ent = NULL;
 548        struct qed_sp_init_data init_data;
 549        int rc = -EINVAL;
 550
 551        /* Get SPQ entry */
 552        memset(&init_data, 0, sizeof(init_data));
 553        init_data.cid = qed_spq_get_cid(p_hwfn);
 554        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 555        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 556
 557        rc = qed_sp_init_request(p_hwfn, &p_ent,
 558                                 COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
 559                                 &init_data);
 560        if (rc)
 561                return rc;
 562
 563        return qed_spq_post(p_hwfn, p_ent, NULL);
 564}
 565
 566int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
 567{
 568        struct qed_spq_entry *p_ent = NULL;
 569        struct qed_sp_init_data init_data;
 570        int rc;
 571
 572        /* Get SPQ entry */
 573        memset(&init_data, 0, sizeof(init_data));
 574        init_data.cid = qed_spq_get_cid(p_hwfn);
 575        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 576        init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 577
 578        rc = qed_sp_init_request(p_hwfn, &p_ent,
 579                                 COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
 580                                 &init_data);
 581        if (rc)
 582                return rc;
 583
 584        return qed_spq_post(p_hwfn, p_ent, NULL);
 585}
 586
 587int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
 588{
 589        struct qed_spq_entry *p_ent = NULL;
 590        struct qed_sp_init_data init_data;
 591        int rc = -EINVAL;
 592
 593        /* Get SPQ entry */
 594        memset(&init_data, 0, sizeof(init_data));
 595        init_data.cid = qed_spq_get_cid(p_hwfn);
 596        init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 597        init_data.comp_mode = QED_SPQ_MODE_CB;
 598
 599        rc = qed_sp_init_request(p_hwfn, &p_ent,
 600                                 COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
 601                                 &init_data);
 602        if (rc)
 603                return rc;
 604
 605        p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
 606        p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
 607        if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
 608                p_ent->ramrod.pf_update.mf_vlan |=
 609                        cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
 610
 611        return qed_spq_post(p_hwfn, p_ent, NULL);
 612}
 613