linux/drivers/infiniband/hw/cxgb3/iwch_provider.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef __IWCH_PROVIDER_H__
  33#define __IWCH_PROVIDER_H__
  34
  35#include <linux/list.h>
  36#include <linux/spinlock.h>
  37#include <rdma/ib_verbs.h>
  38#include <asm/types.h>
  39#include "t3cdev.h"
  40#include "iwch.h"
  41#include "cxio_wr.h"
  42#include "cxio_hal.h"
  43
  44struct iwch_pd {
  45        struct ib_pd ibpd;
  46        u32 pdid;
  47        struct iwch_dev *rhp;
  48};
  49
  50static inline struct iwch_pd *to_iwch_pd(struct ib_pd *ibpd)
  51{
  52        return container_of(ibpd, struct iwch_pd, ibpd);
  53}
  54
  55struct tpt_attributes {
  56        u32 stag;
  57        u32 state:1;
  58        u32 type:2;
  59        u32 rsvd:1;
  60        enum tpt_mem_perm perms;
  61        u32 remote_invaliate_disable:1;
  62        u32 zbva:1;
  63        u32 mw_bind_enable:1;
  64        u32 page_size:5;
  65
  66        u32 pdid;
  67        u32 qpid;
  68        u32 pbl_addr;
  69        u32 len;
  70        u64 va_fbo;
  71        u32 pbl_size;
  72};
  73
  74struct iwch_mr {
  75        struct ib_mr ibmr;
  76        struct ib_umem *umem;
  77        struct iwch_dev *rhp;
  78        u64 kva;
  79        struct tpt_attributes attr;
  80};
  81
  82typedef struct iwch_mw iwch_mw_handle;
  83
  84static inline struct iwch_mr *to_iwch_mr(struct ib_mr *ibmr)
  85{
  86        return container_of(ibmr, struct iwch_mr, ibmr);
  87}
  88
  89struct iwch_mw {
  90        struct ib_mw ibmw;
  91        struct iwch_dev *rhp;
  92        u64 kva;
  93        struct tpt_attributes attr;
  94};
  95
  96static inline struct iwch_mw *to_iwch_mw(struct ib_mw *ibmw)
  97{
  98        return container_of(ibmw, struct iwch_mw, ibmw);
  99}
 100
 101struct iwch_cq {
 102        struct ib_cq ibcq;
 103        struct iwch_dev *rhp;
 104        struct t3_cq cq;
 105        spinlock_t lock;
 106        spinlock_t comp_handler_lock;
 107        atomic_t refcnt;
 108        wait_queue_head_t wait;
 109        u32 __user *user_rptr_addr;
 110};
 111
 112static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq)
 113{
 114        return container_of(ibcq, struct iwch_cq, ibcq);
 115}
 116
 117enum IWCH_QP_FLAGS {
 118        QP_QUIESCED = 0x01
 119};
 120
 121struct iwch_mpa_attributes {
 122        u8 initiator;
 123        u8 recv_marker_enabled;
 124        u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
 125        u8 crc_enabled;
 126        u8 version;     /* 0 or 1 */
 127};
 128
 129struct iwch_qp_attributes {
 130        u32 scq;
 131        u32 rcq;
 132        u32 sq_num_entries;
 133        u32 rq_num_entries;
 134        u32 sq_max_sges;
 135        u32 sq_max_sges_rdma_write;
 136        u32 rq_max_sges;
 137        u32 state;
 138        u8 enable_rdma_read;
 139        u8 enable_rdma_write;   /* enable inbound Read Resp. */
 140        u8 enable_bind;
 141        u8 enable_mmid0_fastreg;        /* Enable STAG0 + Fast-register */
 142        /*
 143         * Next QP state. If specify the current state, only the
 144         * QP attributes will be modified.
 145         */
 146        u32 max_ord;
 147        u32 max_ird;
 148        u32 pd; /* IN */
 149        u32 next_state;
 150        char terminate_buffer[52];
 151        u32 terminate_msg_len;
 152        u8 is_terminate_local;
 153        struct iwch_mpa_attributes mpa_attr;    /* IN-OUT */
 154        struct iwch_ep *llp_stream_handle;
 155        char *stream_msg_buf;   /* Last stream msg. before Idle -> RTS */
 156        u32 stream_msg_buf_len; /* Only on Idle -> RTS */
 157};
 158
 159struct iwch_qp {
 160        struct ib_qp ibqp;
 161        struct iwch_dev *rhp;
 162        struct iwch_ep *ep;
 163        struct iwch_qp_attributes attr;
 164        struct t3_wq wq;
 165        spinlock_t lock;
 166        atomic_t refcnt;
 167        wait_queue_head_t wait;
 168        enum IWCH_QP_FLAGS flags;
 169        struct timer_list timer;
 170};
 171
 172static inline int qp_quiesced(struct iwch_qp *qhp)
 173{
 174        return qhp->flags & QP_QUIESCED;
 175}
 176
 177static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
 178{
 179        return container_of(ibqp, struct iwch_qp, ibqp);
 180}
 181
 182void iwch_qp_add_ref(struct ib_qp *qp);
 183void iwch_qp_rem_ref(struct ib_qp *qp);
 184
 185struct iwch_ucontext {
 186        struct ib_ucontext ibucontext;
 187        struct cxio_ucontext uctx;
 188        u32 key;
 189        spinlock_t mmap_lock;
 190        struct list_head mmaps;
 191};
 192
 193static inline struct iwch_ucontext *to_iwch_ucontext(struct ib_ucontext *c)
 194{
 195        return container_of(c, struct iwch_ucontext, ibucontext);
 196}
 197
 198struct iwch_mm_entry {
 199        struct list_head entry;
 200        u64 addr;
 201        u32 key;
 202        unsigned len;
 203};
 204
 205static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
 206                                                u32 key, unsigned len)
 207{
 208        struct list_head *pos, *nxt;
 209        struct iwch_mm_entry *mm;
 210
 211        spin_lock(&ucontext->mmap_lock);
 212        list_for_each_safe(pos, nxt, &ucontext->mmaps) {
 213
 214                mm = list_entry(pos, struct iwch_mm_entry, entry);
 215                if (mm->key == key && mm->len == len) {
 216                        list_del_init(&mm->entry);
 217                        spin_unlock(&ucontext->mmap_lock);
 218                        PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
 219                             key, (unsigned long long) mm->addr, mm->len);
 220                        return mm;
 221                }
 222        }
 223        spin_unlock(&ucontext->mmap_lock);
 224        return NULL;
 225}
 226
 227static inline void insert_mmap(struct iwch_ucontext *ucontext,
 228                               struct iwch_mm_entry *mm)
 229{
 230        spin_lock(&ucontext->mmap_lock);
 231        PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
 232             mm->key, (unsigned long long) mm->addr, mm->len);
 233        list_add_tail(&mm->entry, &ucontext->mmaps);
 234        spin_unlock(&ucontext->mmap_lock);
 235}
 236
 237enum iwch_qp_attr_mask {
 238        IWCH_QP_ATTR_NEXT_STATE = 1 << 0,
 239        IWCH_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
 240        IWCH_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
 241        IWCH_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
 242        IWCH_QP_ATTR_MAX_ORD = 1 << 11,
 243        IWCH_QP_ATTR_MAX_IRD = 1 << 12,
 244        IWCH_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
 245        IWCH_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
 246        IWCH_QP_ATTR_MPA_ATTR = 1 << 24,
 247        IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
 248        IWCH_QP_ATTR_VALID_MODIFY = (IWCH_QP_ATTR_ENABLE_RDMA_READ |
 249                                     IWCH_QP_ATTR_ENABLE_RDMA_WRITE |
 250                                     IWCH_QP_ATTR_MAX_ORD |
 251                                     IWCH_QP_ATTR_MAX_IRD |
 252                                     IWCH_QP_ATTR_LLP_STREAM_HANDLE |
 253                                     IWCH_QP_ATTR_STREAM_MSG_BUFFER |
 254                                     IWCH_QP_ATTR_MPA_ATTR |
 255                                     IWCH_QP_ATTR_QP_CONTEXT_ACTIVATE)
 256};
 257
 258int iwch_modify_qp(struct iwch_dev *rhp,
 259                                struct iwch_qp *qhp,
 260                                enum iwch_qp_attr_mask mask,
 261                                struct iwch_qp_attributes *attrs,
 262                                int internal);
 263
 264enum iwch_qp_state {
 265        IWCH_QP_STATE_IDLE,
 266        IWCH_QP_STATE_RTS,
 267        IWCH_QP_STATE_ERROR,
 268        IWCH_QP_STATE_TERMINATE,
 269        IWCH_QP_STATE_CLOSING,
 270        IWCH_QP_STATE_TOT
 271};
 272
 273static inline int iwch_convert_state(enum ib_qp_state ib_state)
 274{
 275        switch (ib_state) {
 276        case IB_QPS_RESET:
 277        case IB_QPS_INIT:
 278                return IWCH_QP_STATE_IDLE;
 279        case IB_QPS_RTS:
 280                return IWCH_QP_STATE_RTS;
 281        case IB_QPS_SQD:
 282                return IWCH_QP_STATE_CLOSING;
 283        case IB_QPS_SQE:
 284                return IWCH_QP_STATE_TERMINATE;
 285        case IB_QPS_ERR:
 286                return IWCH_QP_STATE_ERROR;
 287        default:
 288                return -1;
 289        }
 290}
 291
 292static inline u32 iwch_ib_to_tpt_access(int acc)
 293{
 294        return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
 295               (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) |
 296               (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) |
 297               (acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) |
 298               TPT_LOCAL_READ;
 299}
 300
 301static inline u32 iwch_ib_to_tpt_bind_access(int acc)
 302{
 303        return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) |
 304               (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0);
 305}
 306
 307enum iwch_mmid_state {
 308        IWCH_STAG_STATE_VALID,
 309        IWCH_STAG_STATE_INVALID
 310};
 311
 312enum iwch_qp_query_flags {
 313        IWCH_QP_QUERY_CONTEXT_NONE = 0x0,       /* No ctx; Only attrs */
 314        IWCH_QP_QUERY_CONTEXT_GET = 0x1,        /* Get ctx + attrs */
 315        IWCH_QP_QUERY_CONTEXT_SUSPEND = 0x2,    /* Not Supported */
 316
 317        /*
 318         * Quiesce QP context; Consumer
 319         * will NOT replay outstanding WR
 320         */
 321        IWCH_QP_QUERY_CONTEXT_QUIESCE = 0x4,
 322        IWCH_QP_QUERY_CONTEXT_REMOVE = 0x8,
 323        IWCH_QP_QUERY_TEST_USERWRITE = 0x32     /* Test special */
 324};
 325
 326u16 iwch_rqes_posted(struct iwch_qp *qhp);
 327int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 328                      struct ib_send_wr **bad_wr);
 329int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 330                      struct ib_recv_wr **bad_wr);
 331int iwch_bind_mw(struct ib_qp *qp,
 332                             struct ib_mw *mw,
 333                             struct ib_mw_bind *mw_bind);
 334int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 335int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
 336int iwch_post_zb_read(struct iwch_ep *ep);
 337int iwch_register_device(struct iwch_dev *dev);
 338void iwch_unregister_device(struct iwch_dev *dev);
 339void stop_read_rep_timer(struct iwch_qp *qhp);
 340int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
 341                      struct iwch_mr *mhp, int shift);
 342int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
 343                                        struct iwch_mr *mhp,
 344                                        int shift,
 345                                        int npages);
 346int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
 347void iwch_free_pbl(struct iwch_mr *mhp);
 348int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
 349int build_phys_page_list(struct ib_phys_buf *buffer_list,
 350                                        int num_phys_buf,
 351                                        u64 *iova_start,
 352                                        u64 *total_size,
 353                                        int *npages,
 354                                        int *shift,
 355                                        __be64 **page_list);
 356
 357
 358#define IWCH_NODE_DESC "cxgb3 Chelsio Communications"
 359
 360#endif
 361