linux/drivers/infiniband/ulp/srp/ib_srp.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005 Cisco Systems.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#ifndef IB_SRP_H
  34#define IB_SRP_H
  35
  36#include <linux/types.h>
  37#include <linux/list.h>
  38#include <linux/mutex.h>
  39#include <linux/scatterlist.h>
  40
  41#include <scsi/scsi_host.h>
  42#include <scsi/scsi_cmnd.h>
  43
  44#include <rdma/ib_verbs.h>
  45#include <rdma/ib_sa.h>
  46#include <rdma/ib_cm.h>
  47#include <rdma/ib_fmr_pool.h>
  48
  49enum {
  50        SRP_PATH_REC_TIMEOUT_MS = 1000,
  51        SRP_ABORT_TIMEOUT_MS    = 5000,
  52
  53        SRP_PORT_REDIRECT       = 1,
  54        SRP_DLID_REDIRECT       = 2,
  55        SRP_STALE_CONN          = 3,
  56
  57        SRP_MAX_LUN             = 512,
  58        SRP_DEF_SG_TABLESIZE    = 12,
  59
  60        SRP_DEFAULT_QUEUE_SIZE  = 1 << 6,
  61        SRP_RSP_SQ_SIZE         = 1,
  62        SRP_TSK_MGMT_SQ_SIZE    = 1,
  63        SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
  64                                  SRP_TSK_MGMT_SQ_SIZE,
  65
  66        SRP_TAG_NO_REQ          = ~0U,
  67        SRP_TAG_TSK_MGMT        = 1U << 31,
  68
  69        SRP_MAX_PAGES_PER_MR    = 512,
  70
  71        LOCAL_INV_WR_ID_MASK    = 1,
  72        FAST_REG_WR_ID_MASK     = 2,
  73
  74        SRP_LAST_WR_ID          = 0xfffffffcU,
  75};
  76
  77enum srp_target_state {
  78        SRP_TARGET_SCANNING,
  79        SRP_TARGET_LIVE,
  80        SRP_TARGET_REMOVED,
  81};
  82
  83enum srp_iu_type {
  84        SRP_IU_CMD,
  85        SRP_IU_TSK_MGMT,
  86        SRP_IU_RSP,
  87};
  88
  89/*
  90 * @mr_page_mask: HCA memory registration page mask.
  91 * @mr_page_size: HCA memory registration page size.
  92 * @mr_max_size: Maximum size in bytes of a single FMR / FR registration
  93 *   request.
  94 */
  95struct srp_device {
  96        struct list_head        dev_list;
  97        struct ib_device       *dev;
  98        struct ib_pd           *pd;
  99        struct ib_mr           *mr;
 100        u64                     mr_page_mask;
 101        int                     mr_page_size;
 102        int                     mr_max_size;
 103        int                     max_pages_per_mr;
 104        bool                    has_fmr;
 105        bool                    has_fr;
 106        bool                    use_fast_reg;
 107};
 108
 109struct srp_host {
 110        struct srp_device      *srp_dev;
 111        u8                      port;
 112        struct device           dev;
 113        struct list_head        target_list;
 114        spinlock_t              target_lock;
 115        struct completion       released;
 116        struct list_head        list;
 117        struct mutex            add_target_mutex;
 118};
 119
 120struct srp_request {
 121        struct scsi_cmnd       *scmnd;
 122        struct srp_iu          *cmd;
 123        union {
 124                struct ib_pool_fmr **fmr_list;
 125                struct srp_fr_desc **fr_list;
 126        };
 127        u64                    *map_page;
 128        struct srp_direct_buf  *indirect_desc;
 129        dma_addr_t              indirect_dma_addr;
 130        short                   nmdesc;
 131};
 132
 133/**
 134 * struct srp_rdma_ch
 135 * @comp_vector: Completion vector used by this RDMA channel.
 136 */
 137struct srp_rdma_ch {
 138        /* These are RW in the hot path, and commonly used together */
 139        struct list_head        free_tx;
 140        spinlock_t              lock;
 141        s32                     req_lim;
 142
 143        /* These are read-only in the hot path */
 144        struct srp_target_port *target ____cacheline_aligned_in_smp;
 145        struct ib_cq           *send_cq;
 146        struct ib_cq           *recv_cq;
 147        struct ib_qp           *qp;
 148        union {
 149                struct ib_fmr_pool     *fmr_pool;
 150                struct srp_fr_pool     *fr_pool;
 151        };
 152
 153        /* Everything above this point is used in the hot path of
 154         * command processing. Try to keep them packed into cachelines.
 155         */
 156
 157        struct completion       done;
 158        int                     status;
 159
 160        struct ib_sa_path_rec   path;
 161        struct ib_sa_query     *path_query;
 162        int                     path_query_id;
 163
 164        struct ib_cm_id        *cm_id;
 165        struct srp_iu         **tx_ring;
 166        struct srp_iu         **rx_ring;
 167        struct srp_request     *req_ring;
 168        int                     max_ti_iu_len;
 169        int                     comp_vector;
 170
 171        struct completion       tsk_mgmt_done;
 172        u8                      tsk_mgmt_status;
 173};
 174
 175/**
 176 * struct srp_target_port
 177 * @comp_vector: Completion vector used by the first RDMA channel created for
 178 *   this target port.
 179 */
 180struct srp_target_port {
 181        /* read and written in the hot path */
 182        spinlock_t              lock;
 183
 184        /* read only in the hot path */
 185        struct srp_rdma_ch      *ch;
 186        u32                     ch_count;
 187        u32                     lkey;
 188        u32                     rkey;
 189        enum srp_target_state   state;
 190        unsigned int            max_iu_len;
 191        unsigned int            cmd_sg_cnt;
 192        unsigned int            indirect_size;
 193        bool                    allow_ext_sg;
 194
 195        /* other member variables */
 196        union ib_gid            sgid;
 197        __be64                  id_ext;
 198        __be64                  ioc_guid;
 199        __be64                  service_id;
 200        __be64                  initiator_ext;
 201        u16                     io_class;
 202        struct srp_host        *srp_host;
 203        struct Scsi_Host       *scsi_host;
 204        struct srp_rport       *rport;
 205        char                    target_name[32];
 206        unsigned int            scsi_id;
 207        unsigned int            sg_tablesize;
 208        int                     queue_size;
 209        int                     req_ring_size;
 210        int                     comp_vector;
 211        int                     tl_retry_count;
 212
 213        union ib_gid            orig_dgid;
 214        __be16                  pkey;
 215
 216        u32                     rq_tmo_jiffies;
 217        bool                    connected;
 218
 219        int                     zero_req_lim;
 220
 221        struct work_struct      tl_err_work;
 222        struct work_struct      remove_work;
 223
 224        struct list_head        list;
 225        bool                    qp_in_error;
 226};
 227
 228struct srp_iu {
 229        struct list_head        list;
 230        u64                     dma;
 231        void                   *buf;
 232        size_t                  size;
 233        enum dma_data_direction direction;
 234};
 235
 236/**
 237 * struct srp_fr_desc - fast registration work request arguments
 238 * @entry: Entry in srp_fr_pool.free_list.
 239 * @mr:    Memory region.
 240 * @frpl:  Fast registration page list.
 241 */
 242struct srp_fr_desc {
 243        struct list_head                entry;
 244        struct ib_mr                    *mr;
 245        struct ib_fast_reg_page_list    *frpl;
 246};
 247
 248/**
 249 * struct srp_fr_pool - pool of fast registration descriptors
 250 *
 251 * An entry is available for allocation if and only if it occurs in @free_list.
 252 *
 253 * @size:      Number of descriptors in this pool.
 254 * @max_page_list_len: Maximum fast registration work request page list length.
 255 * @lock:      Protects free_list.
 256 * @free_list: List of free descriptors.
 257 * @desc:      Fast registration descriptor pool.
 258 */
 259struct srp_fr_pool {
 260        int                     size;
 261        int                     max_page_list_len;
 262        spinlock_t              lock;
 263        struct list_head        free_list;
 264        struct srp_fr_desc      desc[0];
 265};
 266
 267/**
 268 * struct srp_map_state - per-request DMA memory mapping state
 269 * @desc:           Pointer to the element of the SRP buffer descriptor array
 270 *                  that is being filled in.
 271 * @pages:          Array with DMA addresses of pages being considered for
 272 *                  memory registration.
 273 * @base_dma_addr:  DMA address of the first page that has not yet been mapped.
 274 * @dma_len:        Number of bytes that will be registered with the next
 275 *                  FMR or FR memory registration call.
 276 * @total_len:      Total number of bytes in the sg-list being mapped.
 277 * @npages:         Number of page addresses in the pages[] array.
 278 * @nmdesc:         Number of FMR or FR memory descriptors used for mapping.
 279 * @ndesc:          Number of SRP buffer descriptors that have been filled in.
 280 * @unmapped_sg:    First element of the sg-list that is mapped via FMR or FR.
 281 * @unmapped_index: Index of the first element mapped via FMR or FR.
 282 * @unmapped_addr:  DMA address of the first element mapped via FMR or FR.
 283 */
 284struct srp_map_state {
 285        union {
 286                struct ib_pool_fmr **next_fmr;
 287                struct srp_fr_desc **next_fr;
 288        };
 289        struct srp_direct_buf  *desc;
 290        u64                    *pages;
 291        dma_addr_t              base_dma_addr;
 292        u32                     dma_len;
 293        u32                     total_len;
 294        unsigned int            npages;
 295        unsigned int            nmdesc;
 296        unsigned int            ndesc;
 297        struct scatterlist     *unmapped_sg;
 298        int                     unmapped_index;
 299        dma_addr_t              unmapped_addr;
 300};
 301
 302#endif /* IB_SRP_H */
 303