linux/drivers/net/ethernet/qlogic/qed/qed_sp.h
<<
>>
Prefs
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015 QLogic Corporation
   3 *
   4 * This software is available under the terms of the GNU General Public License
   5 * (GPL) Version 2, available from the file COPYING in the main directory of
   6 * this source tree.
   7 */
   8
   9#ifndef _QED_SP_H
  10#define _QED_SP_H
  11
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/list.h>
  15#include <linux/slab.h>
  16#include <linux/spinlock.h>
  17#include <linux/qed/qed_chain.h>
  18#include "qed.h"
  19#include "qed_hsi.h"
  20
  21enum spq_mode {
  22        QED_SPQ_MODE_BLOCK,     /* Client will poll a designated mem. address */
  23        QED_SPQ_MODE_CB,        /* Client supplies a callback */
  24        QED_SPQ_MODE_EBLOCK,    /* QED should block until completion */
  25};
  26
  27struct qed_spq_comp_cb {
  28        void    (*function)(struct qed_hwfn *,
  29                            void *,
  30                            union event_ring_data *,
  31                            u8 fw_return_code);
  32        void    *cookie;
  33};
  34
  35/**
  36 * @brief qed_eth_cqe_completion - handles the completion of a
  37 *        ramrod on the cqe ring
  38 *
  39 * @param p_hwfn
  40 * @param cqe
  41 *
  42 * @return int
  43 */
  44int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
  45                           struct eth_slow_path_rx_cqe *cqe);
  46
  47/**
  48 *  @file
  49 *
  50 *  QED Slow-hwfn queue interface
  51 */
  52
  53union ramrod_data {
  54        struct pf_start_ramrod_data pf_start;
  55        struct pf_update_ramrod_data pf_update;
  56        struct rx_queue_start_ramrod_data rx_queue_start;
  57        struct rx_queue_update_ramrod_data rx_queue_update;
  58        struct rx_queue_stop_ramrod_data rx_queue_stop;
  59        struct tx_queue_start_ramrod_data tx_queue_start;
  60        struct tx_queue_stop_ramrod_data tx_queue_stop;
  61        struct vport_start_ramrod_data vport_start;
  62        struct vport_stop_ramrod_data vport_stop;
  63        struct vport_update_ramrod_data vport_update;
  64        struct core_rx_start_ramrod_data core_rx_queue_start;
  65        struct core_rx_stop_ramrod_data core_rx_queue_stop;
  66        struct core_tx_start_ramrod_data core_tx_queue_start;
  67        struct core_tx_stop_ramrod_data core_tx_queue_stop;
  68        struct vport_filter_update_ramrod_data vport_filter_update;
  69
  70        struct rdma_init_func_ramrod_data rdma_init_func;
  71        struct rdma_close_func_ramrod_data rdma_close_func;
  72        struct rdma_register_tid_ramrod_data rdma_register_tid;
  73        struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
  74        struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
  75        struct roce_create_qp_req_ramrod_data roce_create_qp_req;
  76        struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
  77        struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
  78        struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
  79        struct roce_query_qp_req_ramrod_data roce_query_qp_req;
  80        struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
  81        struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
  82        struct rdma_create_cq_ramrod_data rdma_create_cq;
  83        struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
  84        struct rdma_srq_create_ramrod_data rdma_create_srq;
  85        struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
  86        struct rdma_srq_modify_ramrod_data rdma_modify_srq;
  87        struct roce_init_func_ramrod_data roce_init_func;
  88
  89        struct iscsi_slow_path_hdr iscsi_empty;
  90        struct iscsi_init_ramrod_params iscsi_init;
  91        struct iscsi_spe_func_dstry iscsi_destroy;
  92        struct iscsi_spe_conn_offload iscsi_conn_offload;
  93        struct iscsi_conn_update_ramrod_params iscsi_conn_update;
  94        struct iscsi_spe_conn_termination iscsi_conn_terminate;
  95
  96        struct vf_start_ramrod_data vf_start;
  97        struct vf_stop_ramrod_data vf_stop;
  98};
  99
 100#define EQ_MAX_CREDIT   0xffffffff
 101
 102enum spq_priority {
 103        QED_SPQ_PRIORITY_NORMAL,
 104        QED_SPQ_PRIORITY_HIGH,
 105};
 106
 107union qed_spq_req_comp {
 108        struct qed_spq_comp_cb  cb;
 109        u64                     *done_addr;
 110};
 111
 112struct qed_spq_comp_done {
 113        u64     done;
 114        u8      fw_return_code;
 115};
 116
 117struct qed_spq_entry {
 118        struct list_head                list;
 119
 120        u8                              flags;
 121
 122        /* HSI slow path element */
 123        struct slow_path_element        elem;
 124
 125        union ramrod_data               ramrod;
 126
 127        enum spq_priority               priority;
 128
 129        /* pending queue for this entry */
 130        struct list_head                *queue;
 131
 132        enum spq_mode                   comp_mode;
 133        struct qed_spq_comp_cb          comp_cb;
 134        struct qed_spq_comp_done        comp_done; /* SPQ_MODE_EBLOCK */
 135};
 136
 137struct qed_eq {
 138        struct qed_chain        chain;
 139        u8                      eq_sb_index;    /* index within the SB */
 140        __le16                  *p_fw_cons;     /* ptr to index value */
 141};
 142
 143struct qed_consq {
 144        struct qed_chain chain;
 145};
 146
 147struct qed_spq {
 148        spinlock_t              lock; /* SPQ lock */
 149
 150        struct list_head        unlimited_pending;
 151        struct list_head        pending;
 152        struct list_head        completion_pending;
 153        struct list_head        free_pool;
 154
 155        struct qed_chain        chain;
 156
 157        /* allocated dma-able memory for spq entries (+ramrod data) */
 158        dma_addr_t              p_phys;
 159        struct qed_spq_entry    *p_virt;
 160
 161#define SPQ_RING_SIZE \
 162        (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
 163
 164        /* Bitmap for handling out-of-order completions */
 165        DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
 166        u8                      comp_bitmap_idx;
 167
 168        /* Statistics */
 169        u32                     unlimited_pending_count;
 170        u32                     normal_count;
 171        u32                     high_count;
 172        u32                     comp_sent_count;
 173        u32                     comp_count;
 174
 175        u32                     cid;
 176};
 177
 178/**
 179 * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
 180 *        Pends it to the future list.
 181 *
 182 * @param p_hwfn
 183 * @param p_req
 184 *
 185 * @return int
 186 */
 187int qed_spq_post(struct qed_hwfn *p_hwfn,
 188                 struct qed_spq_entry *p_ent,
 189                 u8 *fw_return_code);
 190
 191/**
 192 * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
 193 *
 194 * @param p_hwfn
 195 *
 196 * @return int
 197 */
 198int qed_spq_alloc(struct qed_hwfn *p_hwfn);
 199
 200/**
 201 * @brief qed_spq_setup - Reset the SPQ to its start state.
 202 *
 203 * @param p_hwfn
 204 */
 205void qed_spq_setup(struct qed_hwfn *p_hwfn);
 206
 207/**
 208 * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
 209 *
 210 * @param p_hwfn
 211 */
 212void qed_spq_free(struct qed_hwfn *p_hwfn);
 213
 214/**
 215 * @brief qed_spq_get_entry - Obtain an entrry from the spq
 216 *        free pool list.
 217 *
 218 *
 219 *
 220 * @param p_hwfn
 221 * @param pp_ent
 222 *
 223 * @return int
 224 */
 225int
 226qed_spq_get_entry(struct qed_hwfn *p_hwfn,
 227                  struct qed_spq_entry **pp_ent);
 228
 229/**
 230 * @brief qed_spq_return_entry - Return an entry to spq free
 231 *                                 pool list
 232 *
 233 * @param p_hwfn
 234 * @param p_ent
 235 */
 236void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
 237                          struct qed_spq_entry *p_ent);
 238/**
 239 * @brief qed_eq_allocate - Allocates & initializes an EQ struct
 240 *
 241 * @param p_hwfn
 242 * @param num_elem number of elements in the eq
 243 *
 244 * @return struct qed_eq* - a newly allocated structure; NULL upon error.
 245 */
 246struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
 247                            u16 num_elem);
 248
 249/**
 250 * @brief qed_eq_setup - Reset the SPQ to its start state.
 251 *
 252 * @param p_hwfn
 253 * @param p_eq
 254 */
 255void qed_eq_setup(struct qed_hwfn *p_hwfn,
 256                  struct qed_eq *p_eq);
 257
 258/**
 259 * @brief qed_eq_deallocate - deallocates the given EQ struct.
 260 *
 261 * @param p_hwfn
 262 * @param p_eq
 263 */
 264void qed_eq_free(struct qed_hwfn *p_hwfn,
 265                 struct qed_eq *p_eq);
 266
 267/**
 268 * @brief qed_eq_prod_update - update the FW with default EQ producer
 269 *
 270 * @param p_hwfn
 271 * @param prod
 272 */
 273void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
 274                        u16 prod);
 275
 276/**
 277 * @brief qed_eq_completion - Completes currently pending EQ elements
 278 *
 279 * @param p_hwfn
 280 * @param cookie
 281 *
 282 * @return int
 283 */
 284int qed_eq_completion(struct qed_hwfn *p_hwfn,
 285                      void *cookie);
 286
 287/**
 288 * @brief qed_spq_completion - Completes a single event
 289 *
 290 * @param p_hwfn
 291 * @param echo - echo value from cookie (used for determining completion)
 292 * @param p_data - data from cookie (used in callback function if applicable)
 293 *
 294 * @return int
 295 */
 296int qed_spq_completion(struct qed_hwfn *p_hwfn,
 297                       __le16 echo,
 298                       u8 fw_return_code,
 299                       union event_ring_data *p_data);
 300
 301/**
 302 * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
 303 *
 304 * @param p_hwfn
 305 *
 306 * @return u32 - SPQ CID
 307 */
 308u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
 309
 310/**
 311 * @brief qed_consq_alloc - Allocates & initializes an ConsQ
 312 *        struct
 313 *
 314 * @param p_hwfn
 315 *
 316 * @return struct qed_eq* - a newly allocated structure; NULL upon error.
 317 */
 318struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn);
 319
 320/**
 321 * @brief qed_consq_setup - Reset the ConsQ to its start
 322 *        state.
 323 *
 324 * @param p_hwfn
 325 * @param p_eq
 326 */
 327void qed_consq_setup(struct qed_hwfn *p_hwfn,
 328                     struct qed_consq *p_consq);
 329
 330/**
 331 * @brief qed_consq_free - deallocates the given ConsQ struct.
 332 *
 333 * @param p_hwfn
 334 * @param p_eq
 335 */
 336void qed_consq_free(struct qed_hwfn *p_hwfn,
 337                    struct qed_consq *p_consq);
 338
 339/**
 340 * @file
 341 *
 342 * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
 343 */
 344
 345#define QED_SP_EQ_COMPLETION  0x01
 346#define QED_SP_CQE_COMPLETION 0x02
 347
 348struct qed_sp_init_data {
 349        u32                     cid;
 350        u16                     opaque_fid;
 351
 352        /* Information regarding operation upon sending & completion */
 353        enum spq_mode           comp_mode;
 354        struct qed_spq_comp_cb *p_comp_data;
 355};
 356
 357int qed_sp_init_request(struct qed_hwfn *p_hwfn,
 358                        struct qed_spq_entry **pp_ent,
 359                        u8 cmd,
 360                        u8 protocol,
 361                        struct qed_sp_init_data *p_data);
 362
 363/**
 364 * @brief qed_sp_pf_start - PF Function Start Ramrod
 365 *
 366 * This ramrod is sent to initialize a physical function (PF). It will
 367 * configure the function related parameters and write its completion to the
 368 * event ring specified in the parameters.
 369 *
 370 * Ramrods complete on the common event ring for the PF. This ring is
 371 * allocated by the driver on host memory and its parameters are written
 372 * to the internal RAM of the UStorm by the Function Start Ramrod.
 373 *
 374 * @param p_hwfn
 375 * @param p_tunn
 376 * @param mode
 377 * @param allow_npar_tx_switch
 378 *
 379 * @return int
 380 */
 381
 382int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 383                    struct qed_tunn_start_params *p_tunn,
 384                    enum qed_mf_mode mode, bool allow_npar_tx_switch);
 385
 386/**
 387 * @brief qed_sp_pf_update - PF Function Update Ramrod
 388 *
 389 * This ramrod updates function-related parameters. Every parameter can be
 390 * updated independently, according to configuration flags.
 391 *
 392 * @param p_hwfn
 393 *
 394 * @return int
 395 */
 396
 397int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
 398
 399/**
 400 * @brief qed_sp_pf_stop - PF Function Stop Ramrod
 401 *
 402 * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
 403 * sent and the last completion written to the PFs Event Ring. This ramrod also
 404 * deletes the context for the Slowhwfn connection on this PF.
 405 *
 406 * @note Not required for first packet.
 407 *
 408 * @param p_hwfn
 409 *
 410 * @return int
 411 */
 412
 413int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
 414
 415int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
 416                              struct qed_tunn_update_params *p_tunn,
 417                              enum spq_mode comp_mode,
 418                              struct qed_spq_comp_cb *p_comp_data);
 419/**
 420 * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
 421 *
 422 * @param p_hwfn
 423 *
 424 * @return int
 425 */
 426
 427int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
 428
 429#endif
 430