linux/drivers/scsi/csiostor/csio_wr.h
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio FCoE driver for Linux.
   3 *
   4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#ifndef __CSIO_WR_H__
  36#define __CSIO_WR_H__
  37
  38#include <linux/cache.h>
  39
  40#include "csio_defs.h"
  41#include "t4fw_api.h"
  42#include "t4fw_api_stor.h"
  43
  44/*
  45 * SGE register field values.
  46 */
  47#define X_INGPCIEBOUNDARY_32B           0
  48#define X_INGPCIEBOUNDARY_64B           1
  49#define X_INGPCIEBOUNDARY_128B          2
  50#define X_INGPCIEBOUNDARY_256B          3
  51#define X_INGPCIEBOUNDARY_512B          4
  52#define X_INGPCIEBOUNDARY_1024B         5
  53#define X_INGPCIEBOUNDARY_2048B         6
  54#define X_INGPCIEBOUNDARY_4096B         7
  55
  56/* GTS register */
  57#define X_TIMERREG_COUNTER0             0
  58#define X_TIMERREG_COUNTER1             1
  59#define X_TIMERREG_COUNTER2             2
  60#define X_TIMERREG_COUNTER3             3
  61#define X_TIMERREG_COUNTER4             4
  62#define X_TIMERREG_COUNTER5             5
  63#define X_TIMERREG_RESTART_COUNTER      6
  64#define X_TIMERREG_UPDATE_CIDX          7
  65
  66/*
  67 * Egress Context field values
  68 */
  69#define X_FETCHBURSTMIN_16B             0
  70#define X_FETCHBURSTMIN_32B             1
  71#define X_FETCHBURSTMIN_64B             2
  72#define X_FETCHBURSTMIN_128B            3
  73
  74#define X_FETCHBURSTMAX_64B             0
  75#define X_FETCHBURSTMAX_128B            1
  76#define X_FETCHBURSTMAX_256B            2
  77#define X_FETCHBURSTMAX_512B            3
  78
  79#define X_HOSTFCMODE_NONE               0
  80#define X_HOSTFCMODE_INGRESS_QUEUE      1
  81#define X_HOSTFCMODE_STATUS_PAGE        2
  82#define X_HOSTFCMODE_BOTH               3
  83
  84/*
  85 * Ingress Context field values
  86 */
  87#define X_UPDATESCHEDULING_TIMER        0
  88#define X_UPDATESCHEDULING_COUNTER_OPTTIMER     1
  89
  90#define X_UPDATEDELIVERY_NONE           0
  91#define X_UPDATEDELIVERY_INTERRUPT      1
  92#define X_UPDATEDELIVERY_STATUS_PAGE    2
  93#define X_UPDATEDELIVERY_BOTH           3
  94
  95#define X_INTERRUPTDESTINATION_PCIE     0
  96#define X_INTERRUPTDESTINATION_IQ       1
  97
  98#define X_RSPD_TYPE_FLBUF               0
  99#define X_RSPD_TYPE_CPL                 1
 100#define X_RSPD_TYPE_INTR                2
 101
 102/* WR status is at the same position as retval in a CMD header */
 103#define csio_wr_status(_wr)             \
 104                (FW_CMD_RETVAL_G(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
 105
 106struct csio_hw;
 107
 108extern int csio_intr_coalesce_cnt;
 109extern int csio_intr_coalesce_time;
 110
 111/* Ingress queue params */
 112struct csio_iq_params {
 113
 114        uint8_t         iq_start:1;
 115        uint8_t         iq_stop:1;
 116        uint8_t         pfn:3;
 117
 118        uint8_t         vfn;
 119
 120        uint16_t        physiqid;
 121        uint16_t        iqid;
 122
 123        uint16_t        fl0id;
 124        uint16_t        fl1id;
 125
 126        uint8_t         viid;
 127
 128        uint8_t         type;
 129        uint8_t         iqasynch;
 130        uint8_t         reserved4;
 131
 132        uint8_t         iqandst;
 133        uint8_t         iqanus;
 134        uint8_t         iqanud;
 135
 136        uint16_t        iqandstindex;
 137
 138        uint8_t         iqdroprss;
 139        uint8_t         iqpciech;
 140        uint8_t         iqdcaen;
 141
 142        uint8_t         iqdcacpu;
 143        uint8_t         iqintcntthresh;
 144        uint8_t         iqo;
 145
 146        uint8_t         iqcprio;
 147        uint8_t         iqesize;
 148
 149        uint16_t        iqsize;
 150
 151        uint64_t        iqaddr;
 152
 153        uint8_t         iqflintiqhsen;
 154        uint8_t         reserved5;
 155        uint8_t         iqflintcongen;
 156        uint8_t         iqflintcngchmap;
 157
 158        uint32_t        reserved6;
 159
 160        uint8_t         fl0hostfcmode;
 161        uint8_t         fl0cprio;
 162        uint8_t         fl0paden;
 163        uint8_t         fl0packen;
 164        uint8_t         fl0congen;
 165        uint8_t         fl0dcaen;
 166
 167        uint8_t         fl0dcacpu;
 168        uint8_t         fl0fbmin;
 169
 170        uint8_t         fl0fbmax;
 171        uint8_t         fl0cidxfthresho;
 172        uint8_t         fl0cidxfthresh;
 173
 174        uint16_t        fl0size;
 175
 176        uint64_t        fl0addr;
 177
 178        uint64_t        reserved7;
 179
 180        uint8_t         fl1hostfcmode;
 181        uint8_t         fl1cprio;
 182        uint8_t         fl1paden;
 183        uint8_t         fl1packen;
 184        uint8_t         fl1congen;
 185        uint8_t         fl1dcaen;
 186
 187        uint8_t         fl1dcacpu;
 188        uint8_t         fl1fbmin;
 189
 190        uint8_t         fl1fbmax;
 191        uint8_t         fl1cidxfthresho;
 192        uint8_t         fl1cidxfthresh;
 193
 194        uint16_t        fl1size;
 195
 196        uint64_t        fl1addr;
 197};
 198
 199/* Egress queue params */
 200struct csio_eq_params {
 201
 202        uint8_t         pfn;
 203        uint8_t         vfn;
 204
 205        uint8_t         eqstart:1;
 206        uint8_t         eqstop:1;
 207
 208        uint16_t        physeqid;
 209        uint32_t        eqid;
 210
 211        uint8_t         hostfcmode:2;
 212        uint8_t         cprio:1;
 213        uint8_t         pciechn:3;
 214
 215        uint16_t        iqid;
 216
 217        uint8_t         dcaen:1;
 218        uint8_t         dcacpu:5;
 219
 220        uint8_t         fbmin:3;
 221        uint8_t         fbmax:3;
 222
 223        uint8_t         cidxfthresho:1;
 224        uint8_t         cidxfthresh:3;
 225
 226        uint16_t        eqsize;
 227
 228        uint64_t        eqaddr;
 229};
 230
 231struct csio_dma_buf {
 232        struct list_head        list;
 233        void                    *vaddr;         /* Virtual address */
 234        dma_addr_t              paddr;          /* Physical address */
 235        uint32_t                len;            /* Buffer size */
 236};
 237
 238/* Generic I/O request structure */
 239struct csio_ioreq {
 240        struct csio_sm          sm;             /* SM, List
 241                                                 * should be the first member
 242                                                 */
 243        int                     iq_idx;         /* Ingress queue index */
 244        int                     eq_idx;         /* Egress queue index */
 245        uint32_t                nsge;           /* Number of SG elements */
 246        uint32_t                tmo;            /* Driver timeout */
 247        uint32_t                datadir;        /* Data direction */
 248        struct csio_dma_buf     dma_buf;        /* Req/resp DMA buffers */
 249        uint16_t                wr_status;      /* WR completion status */
 250        int16_t                 drv_status;     /* Driver internal status */
 251        struct csio_lnode       *lnode;         /* Owner lnode */
 252        struct csio_rnode       *rnode;         /* Src/destination rnode */
 253        void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *);
 254                                                /* completion callback */
 255        void                    *scratch1;      /* Scratch area 1.
 256                                                 */
 257        void                    *scratch2;      /* Scratch area 2. */
 258        struct list_head        gen_list;       /* Any list associated with
 259                                                 * this ioreq.
 260                                                 */
 261        uint64_t                fw_handle;      /* Unique handle passed
 262                                                 * to FW
 263                                                 */
 264        uint8_t                 dcopy;          /* Data copy required */
 265        uint8_t                 reserved1;
 266        uint16_t                reserved2;
 267        struct completion       cmplobj;        /* ioreq completion object */
 268} ____cacheline_aligned_in_smp;
 269
 270/*
 271 * Egress status page for egress cidx updates
 272 */
 273struct csio_qstatus_page {
 274        __be32 qid;
 275        __be16 cidx;
 276        __be16 pidx;
 277};
 278
 279
 280enum {
 281        CSIO_MAX_FLBUF_PER_IQWR = 4,
 282        CSIO_QCREDIT_SZ  = 64,                  /* pidx/cidx increments
 283                                                 * in bytes
 284                                                 */
 285        CSIO_MAX_QID = 0xFFFF,
 286        CSIO_MAX_IQ = 128,
 287
 288        CSIO_SGE_NTIMERS = 6,
 289        CSIO_SGE_NCOUNTERS = 4,
 290        CSIO_SGE_FL_SIZE_REGS = 16,
 291};
 292
 293/* Defines for type */
 294enum {
 295        CSIO_EGRESS     = 1,
 296        CSIO_INGRESS    = 2,
 297        CSIO_FREELIST   = 3,
 298};
 299
 300/*
 301 * Structure for footer (last 2 flits) of Ingress Queue Entry.
 302 */
 303struct csio_iqwr_footer {
 304        __be32                  hdrbuflen_pidx;
 305        __be32                  pldbuflen_qid;
 306        union {
 307                u8              type_gen;
 308                __be64          last_flit;
 309        } u;
 310};
 311
 312#define IQWRF_NEWBUF            (1 << 31)
 313#define IQWRF_LEN_GET(x)        (((x) >> 0) & 0x7fffffffU)
 314#define IQWRF_GEN_SHIFT         7
 315#define IQWRF_TYPE_GET(x)       (((x) >> 4) & 0x3U)
 316
 317
 318/*
 319 * WR pair:
 320 * ========
 321 * A WR can start towards the end of a queue, and then continue at the
 322 * beginning, since the queue is considered to be circular. This will
 323 * require a pair of address/len to be passed back to the caller -
 324 * hence the Work request pair structure.
 325 */
 326struct csio_wr_pair {
 327        void                    *addr1;
 328        uint32_t                size1;
 329        void                    *addr2;
 330        uint32_t                size2;
 331};
 332
 333/*
 334 * The following structure is used by ingress processing to return the
 335 * free list buffers to consumers.
 336 */
 337struct csio_fl_dma_buf {
 338        struct csio_dma_buf     flbufs[CSIO_MAX_FLBUF_PER_IQWR];
 339                                                /* Freelist DMA buffers */
 340        int                     offset;         /* Offset within the
 341                                                 * first FL buf.
 342                                                 */
 343        uint32_t                totlen;         /* Total length */
 344        uint8_t                 defer_free;     /* Free of buffer can
 345                                                 * deferred
 346                                                 */
 347};
 348
 349/* Data-types */
 350typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t,
 351                             struct csio_fl_dma_buf *, void *);
 352
 353struct csio_iq {
 354        uint16_t                iqid;           /* Queue ID */
 355        uint16_t                physiqid;       /* Physical Queue ID */
 356        uint16_t                genbit;         /* Generation bit,
 357                                                 * initially set to 1
 358                                                 */
 359        int                     flq_idx;        /* Freelist queue index */
 360        iq_handler_t            iq_intx_handler; /* IQ INTx handler routine */
 361};
 362
 363struct csio_eq {
 364        uint16_t                eqid;           /* Qid */
 365        uint16_t                physeqid;       /* Physical Queue ID */
 366        uint8_t                 wrap[512];      /* Temp area for q-wrap around*/
 367};
 368
 369struct csio_fl {
 370        uint16_t                flid;           /* Qid */
 371        uint16_t                packen;         /* Packing enabled? */
 372        int                     offset;         /* Offset within FL buf */
 373        int                     sreg;           /* Size register */
 374        struct csio_dma_buf     *bufs;          /* Free list buffer ptr array
 375                                                 * indexed using flq->cidx/pidx
 376                                                 */
 377};
 378
 379struct csio_qstats {
 380        uint32_t        n_tot_reqs;             /* Total no. of Requests */
 381        uint32_t        n_tot_rsps;             /* Total no. of responses */
 382        uint32_t        n_qwrap;                /* Queue wraps */
 383        uint32_t        n_eq_wr_split;          /* Number of split EQ WRs */
 384        uint32_t        n_qentry;               /* Queue entry */
 385        uint32_t        n_qempty;               /* Queue empty */
 386        uint32_t        n_qfull;                /* Queue fulls */
 387        uint32_t        n_rsp_unknown;          /* Unknown response type */
 388        uint32_t        n_stray_comp;           /* Stray completion intr */
 389        uint32_t        n_flq_refill;           /* Number of FL refills */
 390};
 391
 392/* Queue metadata */
 393struct csio_q {
 394        uint16_t                type;           /* Type: Ingress/Egress/FL */
 395        uint16_t                pidx;           /* producer index */
 396        uint16_t                cidx;           /* consumer index */
 397        uint16_t                inc_idx;        /* Incremental index */
 398        uint32_t                wr_sz;          /* Size of all WRs in this q
 399                                                 * if fixed
 400                                                 */
 401        void                    *vstart;        /* Base virtual address
 402                                                 * of queue
 403                                                 */
 404        void                    *vwrap;         /* Virtual end address to
 405                                                 * wrap around at
 406                                                 */
 407        uint32_t                credits;        /* Size of queue in credits */
 408        void                    *owner;         /* Owner */
 409        union {                                 /* Queue contexts */
 410                struct csio_iq  iq;
 411                struct csio_eq  eq;
 412                struct csio_fl  fl;
 413        } un;
 414
 415        dma_addr_t              pstart;         /* Base physical address of
 416                                                 * queue
 417                                                 */
 418        uint32_t                portid;         /* PCIE Channel */
 419        uint32_t                size;           /* Size of queue in bytes */
 420        struct csio_qstats      stats;          /* Statistics */
 421} ____cacheline_aligned_in_smp;
 422
 423struct csio_sge {
 424        uint32_t        csio_fl_align;          /* Calculated and cached
 425                                                 * for fast path
 426                                                 */
 427        uint32_t        sge_control;            /* padding, boundaries,
 428                                                 * lengths, etc.
 429                                                 */
 430        uint32_t        sge_host_page_size;     /* Host page size */
 431        uint32_t        sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS];
 432                                                /* free list buffer sizes */
 433        uint16_t        timer_val[CSIO_SGE_NTIMERS];
 434        uint8_t         counter_val[CSIO_SGE_NCOUNTERS];
 435};
 436
 437/* Work request module */
 438struct csio_wrm {
 439        int                     num_q;          /* Number of queues */
 440        struct csio_q           **q_arr;        /* Array of queue pointers
 441                                                 * allocated dynamically
 442                                                 * based on configured values
 443                                                 */
 444        uint32_t                fw_iq_start;    /* Start ID of IQ for this fn*/
 445        uint32_t                fw_eq_start;    /* Start ID of EQ for this fn*/
 446        struct csio_q           *intr_map[CSIO_MAX_IQ];
 447                                                /* IQ-id to IQ map table. */
 448        int                     free_qidx;      /* queue idx of free queue */
 449        struct csio_sge         sge;            /* SGE params */
 450};
 451
 452#define csio_get_q(__hw, __idx)         ((__hw)->wrm.q_arr[__idx])
 453#define csio_q_type(__hw, __idx)        ((__hw)->wrm.q_arr[(__idx)]->type)
 454#define csio_q_pidx(__hw, __idx)        ((__hw)->wrm.q_arr[(__idx)]->pidx)
 455#define csio_q_cidx(__hw, __idx)        ((__hw)->wrm.q_arr[(__idx)]->cidx)
 456#define csio_q_inc_idx(__hw, __idx)     ((__hw)->wrm.q_arr[(__idx)]->inc_idx)
 457#define csio_q_vstart(__hw, __idx)      ((__hw)->wrm.q_arr[(__idx)]->vstart)
 458#define csio_q_pstart(__hw, __idx)      ((__hw)->wrm.q_arr[(__idx)]->pstart)
 459#define csio_q_size(__hw, __idx)        ((__hw)->wrm.q_arr[(__idx)]->size)
 460#define csio_q_credits(__hw, __idx)     ((__hw)->wrm.q_arr[(__idx)]->credits)
 461#define csio_q_portid(__hw, __idx)      ((__hw)->wrm.q_arr[(__idx)]->portid)
 462#define csio_q_wr_sz(__hw, __idx)       ((__hw)->wrm.q_arr[(__idx)]->wr_sz)
 463#define csio_q_iqid(__hw, __idx)        ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)
 464#define csio_q_physiqid(__hw, __idx)                                    \
 465                                ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)
 466#define csio_q_iq_flq_idx(__hw, __idx)                                  \
 467                                ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)
 468#define csio_q_eqid(__hw, __idx)        ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid)
 469#define csio_q_flid(__hw, __idx)        ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid)
 470
 471#define csio_q_physeqid(__hw, __idx)                                    \
 472                                ((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid)
 473#define csio_iq_has_fl(__iq)            ((__iq)->un.iq.flq_idx != -1)
 474
 475#define csio_q_iq_to_flid(__hw, __iq_idx)                               \
 476        csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
 477#define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id)                \
 478                (__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx)
 479#define csio_q_eq_wrap(__hw, __idx)     ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap)
 480
 481struct csio_mb;
 482
 483int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t,
 484                    uint16_t, void *, uint32_t, int, iq_handler_t);
 485int csio_wr_iq_create(struct csio_hw *, void *, int,
 486                                uint32_t, uint8_t, bool,
 487                                void (*)(struct csio_hw *, struct csio_mb *));
 488int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t,
 489                                void (*)(struct csio_hw *, struct csio_mb *));
 490int csio_wr_destroy_queues(struct csio_hw *, bool cmd);
 491
 492
 493int csio_wr_get(struct csio_hw *, int, uint32_t,
 494                          struct csio_wr_pair *);
 495void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t);
 496int csio_wr_issue(struct csio_hw *, int, bool);
 497int csio_wr_process_iq(struct csio_hw *, struct csio_q *,
 498                                 void (*)(struct csio_hw *, void *,
 499                                          uint32_t, struct csio_fl_dma_buf *,
 500                                          void *),
 501                                 void *);
 502int csio_wr_process_iq_idx(struct csio_hw *, int,
 503                                 void (*)(struct csio_hw *, void *,
 504                                          uint32_t, struct csio_fl_dma_buf *,
 505                                          void *),
 506                                 void *);
 507
 508void csio_wr_sge_init(struct csio_hw *);
 509int csio_wrm_init(struct csio_wrm *, struct csio_hw *);
 510void csio_wrm_exit(struct csio_wrm *, struct csio_hw *);
 511
 512#endif /* ifndef __CSIO_WR_H__ */
 513