linux/drivers/net/ethernet/qlogic/qed/qed_sp.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#ifndef _QED_SP_H
   8#define _QED_SP_H
   9
  10#include <linux/types.h>
  11#include <linux/kernel.h>
  12#include <linux/list.h>
  13#include <linux/slab.h>
  14#include <linux/spinlock.h>
  15#include <linux/qed/qed_chain.h>
  16#include "qed.h"
  17#include "qed_hsi.h"
  18
  19enum spq_mode {
  20        QED_SPQ_MODE_BLOCK,     /* Client will poll a designated mem. address */
  21        QED_SPQ_MODE_CB,        /* Client supplies a callback */
  22        QED_SPQ_MODE_EBLOCK,    /* QED should block until completion */
  23};
  24
  25struct qed_spq_comp_cb {
  26        void    (*function)(struct qed_hwfn *,
  27                            void *,
  28                            union event_ring_data *,
  29                            u8 fw_return_code);
  30        void    *cookie;
  31};
  32
  33/**
  34 * @brief qed_eth_cqe_completion - handles the completion of a
  35 *        ramrod on the cqe ring
  36 *
  37 * @param p_hwfn
  38 * @param cqe
  39 *
  40 * @return int
  41 */
  42int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
  43                           struct eth_slow_path_rx_cqe *cqe);
  44
  45/**
  46 *  @file
  47 *
  48 *  QED Slow-hwfn queue interface
  49 */
  50
  51union ramrod_data {
  52        struct pf_start_ramrod_data pf_start;
  53        struct pf_update_ramrod_data pf_update;
  54        struct rx_queue_start_ramrod_data rx_queue_start;
  55        struct rx_queue_update_ramrod_data rx_queue_update;
  56        struct rx_queue_stop_ramrod_data rx_queue_stop;
  57        struct tx_queue_start_ramrod_data tx_queue_start;
  58        struct tx_queue_stop_ramrod_data tx_queue_stop;
  59        struct vport_start_ramrod_data vport_start;
  60        struct vport_stop_ramrod_data vport_stop;
  61        struct rx_update_gft_filter_data rx_update_gft;
  62        struct vport_update_ramrod_data vport_update;
  63        struct core_rx_start_ramrod_data core_rx_queue_start;
  64        struct core_rx_stop_ramrod_data core_rx_queue_stop;
  65        struct core_tx_start_ramrod_data core_tx_queue_start;
  66        struct core_tx_stop_ramrod_data core_tx_queue_stop;
  67        struct vport_filter_update_ramrod_data vport_filter_update;
  68
  69        struct rdma_init_func_ramrod_data rdma_init_func;
  70        struct rdma_close_func_ramrod_data rdma_close_func;
  71        struct rdma_register_tid_ramrod_data rdma_register_tid;
  72        struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
  73        struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
  74        struct roce_create_qp_req_ramrod_data roce_create_qp_req;
  75        struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
  76        struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
  77        struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
  78        struct roce_query_qp_req_ramrod_data roce_query_qp_req;
  79        struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
  80        struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
  81        struct roce_init_func_ramrod_data roce_init_func;
  82        struct rdma_create_cq_ramrod_data rdma_create_cq;
  83        struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
  84        struct rdma_srq_create_ramrod_data rdma_create_srq;
  85        struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
  86        struct rdma_srq_modify_ramrod_data rdma_modify_srq;
  87        struct iwarp_create_qp_ramrod_data iwarp_create_qp;
  88        struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload;
  89        struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload;
  90        struct iwarp_modify_qp_ramrod_data iwarp_modify_qp;
  91        struct iwarp_init_func_ramrod_data iwarp_init_func;
  92        struct fcoe_init_ramrod_params fcoe_init;
  93        struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
  94        struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
  95        struct fcoe_stat_ramrod_params fcoe_stat;
  96
  97        struct iscsi_init_ramrod_params iscsi_init;
  98        struct iscsi_spe_conn_offload iscsi_conn_offload;
  99        struct iscsi_conn_update_ramrod_params iscsi_conn_update;
 100        struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
 101        struct iscsi_spe_conn_termination iscsi_conn_terminate;
 102
 103        struct nvmetcp_init_ramrod_params nvmetcp_init;
 104        struct nvmetcp_spe_conn_offload nvmetcp_conn_offload;
 105        struct nvmetcp_conn_update_ramrod_params nvmetcp_conn_update;
 106        struct nvmetcp_spe_conn_termination nvmetcp_conn_terminate;
 107
 108        struct vf_start_ramrod_data vf_start;
 109        struct vf_stop_ramrod_data vf_stop;
 110};
 111
 112#define EQ_MAX_CREDIT   0xffffffff
 113
 114enum spq_priority {
 115        QED_SPQ_PRIORITY_NORMAL,
 116        QED_SPQ_PRIORITY_HIGH,
 117};
 118
 119union qed_spq_req_comp {
 120        struct qed_spq_comp_cb  cb;
 121        u64                     *done_addr;
 122};
 123
 124struct qed_spq_comp_done {
 125        unsigned int    done;
 126        u8              fw_return_code;
 127};
 128
 129struct qed_spq_entry {
 130        struct list_head                list;
 131
 132        u8                              flags;
 133
 134        /* HSI slow path element */
 135        struct slow_path_element        elem;
 136
 137        union ramrod_data               ramrod;
 138
 139        enum spq_priority               priority;
 140
 141        /* pending queue for this entry */
 142        struct list_head                *queue;
 143
 144        enum spq_mode                   comp_mode;
 145        struct qed_spq_comp_cb          comp_cb;
 146        struct qed_spq_comp_done        comp_done; /* SPQ_MODE_EBLOCK */
 147
 148        /* Posted entry for unlimited list entry in EBLOCK mode */
 149        struct qed_spq_entry            *post_ent;
 150};
 151
 152struct qed_eq {
 153        struct qed_chain        chain;
 154        u8                      eq_sb_index;    /* index within the SB */
 155        __le16                  *p_fw_cons;     /* ptr to index value */
 156};
 157
 158struct qed_consq {
 159        struct qed_chain chain;
 160};
 161
 162typedef int (*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, u8 opcode,
 163                                     __le16 echo, union event_ring_data *data,
 164                                     u8 fw_return_code);
 165
 166int
 167qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
 168                          enum protocol_type protocol_id,
 169                          qed_spq_async_comp_cb cb);
 170
 171void
 172qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
 173                            enum protocol_type protocol_id);
 174
 175struct qed_spq {
 176        spinlock_t              lock; /* SPQ lock */
 177
 178        struct list_head        unlimited_pending;
 179        struct list_head        pending;
 180        struct list_head        completion_pending;
 181        struct list_head        free_pool;
 182
 183        struct qed_chain        chain;
 184
 185        /* allocated dma-able memory for spq entries (+ramrod data) */
 186        dma_addr_t              p_phys;
 187        struct qed_spq_entry    *p_virt;
 188
 189#define SPQ_RING_SIZE \
 190        (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
 191
 192        /* Bitmap for handling out-of-order completions */
 193        DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
 194        u8                      comp_bitmap_idx;
 195
 196        /* Statistics */
 197        u32                     unlimited_pending_count;
 198        u32                     normal_count;
 199        u32                     high_count;
 200        u32                     comp_sent_count;
 201        u32                     comp_count;
 202
 203        u32                     cid;
 204        u32                     db_addr_offset;
 205        struct core_db_data     db_data;
 206        qed_spq_async_comp_cb   async_comp_cb[MAX_PROTOCOL_TYPE];
 207};
 208
 209/**
 210 * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
 211 *        Pends it to the future list.
 212 *
 213 * @param p_hwfn
 214 * @param p_req
 215 *
 216 * @return int
 217 */
 218int qed_spq_post(struct qed_hwfn *p_hwfn,
 219                 struct qed_spq_entry *p_ent,
 220                 u8 *fw_return_code);
 221
 222/**
 223 * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
 224 *
 225 * @param p_hwfn
 226 *
 227 * @return int
 228 */
 229int qed_spq_alloc(struct qed_hwfn *p_hwfn);
 230
 231/**
 232 * @brief qed_spq_setup - Reset the SPQ to its start state.
 233 *
 234 * @param p_hwfn
 235 */
 236void qed_spq_setup(struct qed_hwfn *p_hwfn);
 237
 238/**
 239 * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
 240 *
 241 * @param p_hwfn
 242 */
 243void qed_spq_free(struct qed_hwfn *p_hwfn);
 244
 245/**
 246 * @brief qed_spq_get_entry - Obtain an entrry from the spq
 247 *        free pool list.
 248 *
 249 *
 250 *
 251 * @param p_hwfn
 252 * @param pp_ent
 253 *
 254 * @return int
 255 */
 256int
 257qed_spq_get_entry(struct qed_hwfn *p_hwfn,
 258                  struct qed_spq_entry **pp_ent);
 259
 260/**
 261 * @brief qed_spq_return_entry - Return an entry to spq free
 262 *                                 pool list
 263 *
 264 * @param p_hwfn
 265 * @param p_ent
 266 */
 267void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
 268                          struct qed_spq_entry *p_ent);
 269/**
 270 * @brief qed_eq_allocate - Allocates & initializes an EQ struct
 271 *
 272 * @param p_hwfn
 273 * @param num_elem number of elements in the eq
 274 *
 275 * @return int
 276 */
 277int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
 278
 279/**
 280 * @brief qed_eq_setup - Reset the EQ to its start state.
 281 *
 282 * @param p_hwfn
 283 */
 284void qed_eq_setup(struct qed_hwfn *p_hwfn);
 285
 286/**
 287 * @brief qed_eq_free - deallocates the given EQ struct.
 288 *
 289 * @param p_hwfn
 290 */
 291void qed_eq_free(struct qed_hwfn *p_hwfn);
 292
 293/**
 294 * @brief qed_eq_prod_update - update the FW with default EQ producer
 295 *
 296 * @param p_hwfn
 297 * @param prod
 298 */
 299void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
 300                        u16 prod);
 301
 302/**
 303 * @brief qed_eq_completion - Completes currently pending EQ elements
 304 *
 305 * @param p_hwfn
 306 * @param cookie
 307 *
 308 * @return int
 309 */
 310int qed_eq_completion(struct qed_hwfn *p_hwfn,
 311                      void *cookie);
 312
 313/**
 314 * @brief qed_spq_completion - Completes a single event
 315 *
 316 * @param p_hwfn
 317 * @param echo - echo value from cookie (used for determining completion)
 318 * @param p_data - data from cookie (used in callback function if applicable)
 319 *
 320 * @return int
 321 */
 322int qed_spq_completion(struct qed_hwfn *p_hwfn,
 323                       __le16 echo,
 324                       u8 fw_return_code,
 325                       union event_ring_data *p_data);
 326
 327/**
 328 * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
 329 *
 330 * @param p_hwfn
 331 *
 332 * @return u32 - SPQ CID
 333 */
 334u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
 335
 336/**
 337 * @brief qed_consq_alloc - Allocates & initializes an ConsQ
 338 *        struct
 339 *
 340 * @param p_hwfn
 341 *
 342 * @return int
 343 */
 344int qed_consq_alloc(struct qed_hwfn *p_hwfn);
 345
 346/**
 347 * @brief qed_consq_setup - Reset the ConsQ to its start state.
 348 *
 349 * @param p_hwfn
 350 */
 351void qed_consq_setup(struct qed_hwfn *p_hwfn);
 352
 353/**
 354 * @brief qed_consq_free - deallocates the given ConsQ struct.
 355 *
 356 * @param p_hwfn
 357 */
 358void qed_consq_free(struct qed_hwfn *p_hwfn);
 359int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
 360
 361/**
 362 * @file
 363 *
 364 * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
 365 */
 366
 367#define QED_SP_EQ_COMPLETION  0x01
 368#define QED_SP_CQE_COMPLETION 0x02
 369
 370struct qed_sp_init_data {
 371        u32                     cid;
 372        u16                     opaque_fid;
 373
 374        /* Information regarding operation upon sending & completion */
 375        enum spq_mode           comp_mode;
 376        struct qed_spq_comp_cb *p_comp_data;
 377};
 378
 379/**
 380 * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
 381 *        Should be called on in error flows after initializing the SPQ entry
 382 *        and before posting it.
 383 *
 384 * @param p_hwfn
 385 * @param p_ent
 386 */
 387void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
 388                            struct qed_spq_entry *p_ent);
 389
 390int qed_sp_init_request(struct qed_hwfn *p_hwfn,
 391                        struct qed_spq_entry **pp_ent,
 392                        u8 cmd,
 393                        u8 protocol,
 394                        struct qed_sp_init_data *p_data);
 395
 396/**
 397 * @brief qed_sp_pf_start - PF Function Start Ramrod
 398 *
 399 * This ramrod is sent to initialize a physical function (PF). It will
 400 * configure the function related parameters and write its completion to the
 401 * event ring specified in the parameters.
 402 *
 403 * Ramrods complete on the common event ring for the PF. This ring is
 404 * allocated by the driver on host memory and its parameters are written
 405 * to the internal RAM of the UStorm by the Function Start Ramrod.
 406 *
 407 * @param p_hwfn
 408 * @param p_ptt
 409 * @param p_tunn
 410 * @param allow_npar_tx_switch
 411 *
 412 * @return int
 413 */
 414
 415int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 416                    struct qed_ptt *p_ptt,
 417                    struct qed_tunnel_info *p_tunn,
 418                    bool allow_npar_tx_switch);
 419
 420/**
 421 * @brief qed_sp_pf_update - PF Function Update Ramrod
 422 *
 423 * This ramrod updates function-related parameters. Every parameter can be
 424 * updated independently, according to configuration flags.
 425 *
 426 * @param p_hwfn
 427 *
 428 * @return int
 429 */
 430
 431int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
 432
 433/**
 434 * @brief qed_sp_pf_update_stag - Update firmware of new outer tag
 435 *
 436 * @param p_hwfn
 437 *
 438 * @return int
 439 */
 440int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
 441
 442/**
 443 * @brief qed_sp_pf_stop - PF Function Stop Ramrod
 444 *
 445 * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
 446 * sent and the last completion written to the PFs Event Ring. This ramrod also
 447 * deletes the context for the Slowhwfn connection on this PF.
 448 *
 449 * @note Not required for first packet.
 450 *
 451 * @param p_hwfn
 452 *
 453 * @return int
 454 */
 455
 456/**
 457 * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod
 458 *
 459 * @param p_hwfn
 460 *
 461 * @return int
 462 */
 463int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
 464
 465int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
 466
 467int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
 468                              struct qed_ptt *p_ptt,
 469                              struct qed_tunnel_info *p_tunn,
 470                              enum spq_mode comp_mode,
 471                              struct qed_spq_comp_cb *p_comp_data);
 472/**
 473 * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
 474 *
 475 * @param p_hwfn
 476 *
 477 * @return int
 478 */
 479
 480int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
 481
 482#endif
 483