linux/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *      - Redistributions in binary form must reproduce the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer in the documentation and/or other materials
  20 *        provided with the distribution.
  21 *
  22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  29 * SOFTWARE.
  30 */
  31#ifndef __IW_CXGB4_H__
  32#define __IW_CXGB4_H__
  33
  34#include <linux/mutex.h>
  35#include <linux/list.h>
  36#include <linux/spinlock.h>
  37#include <linux/idr.h>
  38#include <linux/workqueue.h>
  39#include <linux/netdevice.h>
  40#include <linux/sched.h>
  41#include <linux/pci.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/inet.h>
  44#include <linux/wait.h>
  45#include <linux/kref.h>
  46#include <linux/timer.h>
  47#include <linux/io.h>
  48#include <linux/kfifo.h>
  49
  50#include <asm/byteorder.h>
  51
  52#include <net/net_namespace.h>
  53
  54#include <rdma/ib_verbs.h>
  55#include <rdma/iw_cm.h>
  56
  57#include "cxgb4.h"
  58#include "cxgb4_uld.h"
  59#include "l2t.h"
  60#include "user.h"
  61
  62#define DRV_NAME "iw_cxgb4"
  63#define MOD DRV_NAME ":"
  64
  65extern int c4iw_debug;
  66#define PDBG(fmt, args...) \
  67do { \
  68        if (c4iw_debug) \
  69                printk(MOD fmt, ## args); \
  70} while (0)
  71
  72#include "t4.h"
  73
  74#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
  75#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
  76
  77static inline void *cplhdr(struct sk_buff *skb)
  78{
  79        return skb->data;
  80}
  81
  82struct c4iw_resource {
  83        struct kfifo tpt_fifo;
  84        spinlock_t tpt_fifo_lock;
  85        struct kfifo qid_fifo;
  86        spinlock_t qid_fifo_lock;
  87        struct kfifo pdid_fifo;
  88        spinlock_t pdid_fifo_lock;
  89};
  90
  91struct c4iw_qid_list {
  92        struct list_head entry;
  93        u32 qid;
  94};
  95
  96struct c4iw_dev_ucontext {
  97        struct list_head qpids;
  98        struct list_head cqids;
  99        struct mutex lock;
 100};
 101
 102enum c4iw_rdev_flags {
 103        T4_FATAL_ERROR = (1<<0),
 104};
 105
 106struct c4iw_rdev {
 107        struct c4iw_resource resource;
 108        unsigned long qpshift;
 109        u32 qpmask;
 110        unsigned long cqshift;
 111        u32 cqmask;
 112        struct c4iw_dev_ucontext uctx;
 113        struct gen_pool *pbl_pool;
 114        struct gen_pool *rqt_pool;
 115        struct gen_pool *ocqp_pool;
 116        u32 flags;
 117        struct cxgb4_lld_info lldi;
 118        unsigned long oc_mw_pa;
 119        void __iomem *oc_mw_kva;
 120};
 121
 122static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
 123{
 124        return rdev->flags & T4_FATAL_ERROR;
 125}
 126
 127static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
 128{
 129        return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
 130}
 131
 132#define C4IW_WR_TO (10*HZ)
 133
 134struct c4iw_wr_wait {
 135        wait_queue_head_t wait;
 136        int done;
 137        int ret;
 138};
 139
 140static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
 141{
 142        wr_waitp->ret = 0;
 143        wr_waitp->done = 0;
 144        init_waitqueue_head(&wr_waitp->wait);
 145}
 146
 147static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
 148                                 struct c4iw_wr_wait *wr_waitp,
 149                                 u32 hwtid, u32 qpid,
 150                                 const char *func)
 151{
 152        unsigned to = C4IW_WR_TO;
 153        do {
 154
 155                wait_event_timeout(wr_waitp->wait, wr_waitp->done, to);
 156                if (!wr_waitp->done) {
 157                        printk(KERN_ERR MOD "%s - Device %s not responding - "
 158                               "tid %u qpid %u\n", func,
 159                               pci_name(rdev->lldi.pdev), hwtid, qpid);
 160                        to = to << 2;
 161                }
 162        } while (!wr_waitp->done);
 163        if (wr_waitp->ret)
 164                printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n",
 165                       pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
 166        return wr_waitp->ret;
 167}
 168
 169
 170struct c4iw_dev {
 171        struct ib_device ibdev;
 172        struct c4iw_rdev rdev;
 173        u32 device_cap_flags;
 174        struct idr cqidr;
 175        struct idr qpidr;
 176        struct idr mmidr;
 177        spinlock_t lock;
 178        struct list_head entry;
 179        struct delayed_work db_drop_task;
 180        struct dentry *debugfs_root;
 181        u8 registered;
 182};
 183
 184static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
 185{
 186        return container_of(ibdev, struct c4iw_dev, ibdev);
 187}
 188
 189static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
 190{
 191        return container_of(rdev, struct c4iw_dev, rdev);
 192}
 193
 194static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
 195{
 196        return idr_find(&rhp->cqidr, cqid);
 197}
 198
 199static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
 200{
 201        return idr_find(&rhp->qpidr, qpid);
 202}
 203
 204static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
 205{
 206        return idr_find(&rhp->mmidr, mmid);
 207}
 208
 209static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
 210                                void *handle, u32 id)
 211{
 212        int ret;
 213        int newid;
 214
 215        do {
 216                if (!idr_pre_get(idr, GFP_KERNEL))
 217                        return -ENOMEM;
 218                spin_lock_irq(&rhp->lock);
 219                ret = idr_get_new_above(idr, handle, id, &newid);
 220                BUG_ON(newid != id);
 221                spin_unlock_irq(&rhp->lock);
 222        } while (ret == -EAGAIN);
 223
 224        return ret;
 225}
 226
 227static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
 228{
 229        spin_lock_irq(&rhp->lock);
 230        idr_remove(idr, id);
 231        spin_unlock_irq(&rhp->lock);
 232}
 233
 234struct c4iw_pd {
 235        struct ib_pd ibpd;
 236        u32 pdid;
 237        struct c4iw_dev *rhp;
 238};
 239
 240static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
 241{
 242        return container_of(ibpd, struct c4iw_pd, ibpd);
 243}
 244
 245struct tpt_attributes {
 246        u64 len;
 247        u64 va_fbo;
 248        enum fw_ri_mem_perms perms;
 249        u32 stag;
 250        u32 pdid;
 251        u32 qpid;
 252        u32 pbl_addr;
 253        u32 pbl_size;
 254        u32 state:1;
 255        u32 type:2;
 256        u32 rsvd:1;
 257        u32 remote_invaliate_disable:1;
 258        u32 zbva:1;
 259        u32 mw_bind_enable:1;
 260        u32 page_size:5;
 261};
 262
 263struct c4iw_mr {
 264        struct ib_mr ibmr;
 265        struct ib_umem *umem;
 266        struct c4iw_dev *rhp;
 267        u64 kva;
 268        struct tpt_attributes attr;
 269};
 270
 271static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
 272{
 273        return container_of(ibmr, struct c4iw_mr, ibmr);
 274}
 275
 276struct c4iw_mw {
 277        struct ib_mw ibmw;
 278        struct c4iw_dev *rhp;
 279        u64 kva;
 280        struct tpt_attributes attr;
 281};
 282
 283static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
 284{
 285        return container_of(ibmw, struct c4iw_mw, ibmw);
 286}
 287
 288struct c4iw_fr_page_list {
 289        struct ib_fast_reg_page_list ibpl;
 290        DEFINE_DMA_UNMAP_ADDR(mapping);
 291        dma_addr_t dma_addr;
 292        struct c4iw_dev *dev;
 293        int size;
 294};
 295
 296static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
 297                                        struct ib_fast_reg_page_list *ibpl)
 298{
 299        return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
 300}
 301
 302struct c4iw_cq {
 303        struct ib_cq ibcq;
 304        struct c4iw_dev *rhp;
 305        struct t4_cq cq;
 306        spinlock_t lock;
 307        atomic_t refcnt;
 308        wait_queue_head_t wait;
 309};
 310
 311static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
 312{
 313        return container_of(ibcq, struct c4iw_cq, ibcq);
 314}
 315
 316struct c4iw_mpa_attributes {
 317        u8 initiator;
 318        u8 recv_marker_enabled;
 319        u8 xmit_marker_enabled;
 320        u8 crc_enabled;
 321        u8 version;
 322        u8 p2p_type;
 323};
 324
 325struct c4iw_qp_attributes {
 326        u32 scq;
 327        u32 rcq;
 328        u32 sq_num_entries;
 329        u32 rq_num_entries;
 330        u32 sq_max_sges;
 331        u32 sq_max_sges_rdma_write;
 332        u32 rq_max_sges;
 333        u32 state;
 334        u8 enable_rdma_read;
 335        u8 enable_rdma_write;
 336        u8 enable_bind;
 337        u8 enable_mmid0_fastreg;
 338        u32 max_ord;
 339        u32 max_ird;
 340        u32 pd;
 341        u32 next_state;
 342        char terminate_buffer[52];
 343        u32 terminate_msg_len;
 344        u8 is_terminate_local;
 345        struct c4iw_mpa_attributes mpa_attr;
 346        struct c4iw_ep *llp_stream_handle;
 347};
 348
 349struct c4iw_qp {
 350        struct ib_qp ibqp;
 351        struct c4iw_dev *rhp;
 352        struct c4iw_ep *ep;
 353        struct c4iw_qp_attributes attr;
 354        struct t4_wq wq;
 355        spinlock_t lock;
 356        struct mutex mutex;
 357        atomic_t refcnt;
 358        wait_queue_head_t wait;
 359        struct timer_list timer;
 360};
 361
 362static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
 363{
 364        return container_of(ibqp, struct c4iw_qp, ibqp);
 365}
 366
 367struct c4iw_ucontext {
 368        struct ib_ucontext ibucontext;
 369        struct c4iw_dev_ucontext uctx;
 370        u32 key;
 371        spinlock_t mmap_lock;
 372        struct list_head mmaps;
 373};
 374
 375static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
 376{
 377        return container_of(c, struct c4iw_ucontext, ibucontext);
 378}
 379
 380struct c4iw_mm_entry {
 381        struct list_head entry;
 382        u64 addr;
 383        u32 key;
 384        unsigned len;
 385};
 386
 387static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
 388                                                u32 key, unsigned len)
 389{
 390        struct list_head *pos, *nxt;
 391        struct c4iw_mm_entry *mm;
 392
 393        spin_lock(&ucontext->mmap_lock);
 394        list_for_each_safe(pos, nxt, &ucontext->mmaps) {
 395
 396                mm = list_entry(pos, struct c4iw_mm_entry, entry);
 397                if (mm->key == key && mm->len == len) {
 398                        list_del_init(&mm->entry);
 399                        spin_unlock(&ucontext->mmap_lock);
 400                        PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
 401                             key, (unsigned long long) mm->addr, mm->len);
 402                        return mm;
 403                }
 404        }
 405        spin_unlock(&ucontext->mmap_lock);
 406        return NULL;
 407}
 408
 409static inline void insert_mmap(struct c4iw_ucontext *ucontext,
 410                               struct c4iw_mm_entry *mm)
 411{
 412        spin_lock(&ucontext->mmap_lock);
 413        PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
 414             mm->key, (unsigned long long) mm->addr, mm->len);
 415        list_add_tail(&mm->entry, &ucontext->mmaps);
 416        spin_unlock(&ucontext->mmap_lock);
 417}
 418
 419enum c4iw_qp_attr_mask {
 420        C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
 421        C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
 422        C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
 423        C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
 424        C4IW_QP_ATTR_MAX_ORD = 1 << 11,
 425        C4IW_QP_ATTR_MAX_IRD = 1 << 12,
 426        C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
 427        C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
 428        C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
 429        C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
 430        C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
 431                                     C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
 432                                     C4IW_QP_ATTR_MAX_ORD |
 433                                     C4IW_QP_ATTR_MAX_IRD |
 434                                     C4IW_QP_ATTR_LLP_STREAM_HANDLE |
 435                                     C4IW_QP_ATTR_STREAM_MSG_BUFFER |
 436                                     C4IW_QP_ATTR_MPA_ATTR |
 437                                     C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
 438};
 439
 440int c4iw_modify_qp(struct c4iw_dev *rhp,
 441                                struct c4iw_qp *qhp,
 442                                enum c4iw_qp_attr_mask mask,
 443                                struct c4iw_qp_attributes *attrs,
 444                                int internal);
 445
 446enum c4iw_qp_state {
 447        C4IW_QP_STATE_IDLE,
 448        C4IW_QP_STATE_RTS,
 449        C4IW_QP_STATE_ERROR,
 450        C4IW_QP_STATE_TERMINATE,
 451        C4IW_QP_STATE_CLOSING,
 452        C4IW_QP_STATE_TOT
 453};
 454
 455static inline int c4iw_convert_state(enum ib_qp_state ib_state)
 456{
 457        switch (ib_state) {
 458        case IB_QPS_RESET:
 459        case IB_QPS_INIT:
 460                return C4IW_QP_STATE_IDLE;
 461        case IB_QPS_RTS:
 462                return C4IW_QP_STATE_RTS;
 463        case IB_QPS_SQD:
 464                return C4IW_QP_STATE_CLOSING;
 465        case IB_QPS_SQE:
 466                return C4IW_QP_STATE_TERMINATE;
 467        case IB_QPS_ERR:
 468                return C4IW_QP_STATE_ERROR;
 469        default:
 470                return -1;
 471        }
 472}
 473
 474static inline u32 c4iw_ib_to_tpt_access(int a)
 475{
 476        return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
 477               (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
 478               (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
 479               FW_RI_MEM_ACCESS_LOCAL_READ;
 480}
 481
 482static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
 483{
 484        return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
 485               (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
 486}
 487
 488enum c4iw_mmid_state {
 489        C4IW_STAG_STATE_VALID,
 490        C4IW_STAG_STATE_INVALID
 491};
 492
 493#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
 494
 495#define MPA_KEY_REQ "MPA ID Req Frame"
 496#define MPA_KEY_REP "MPA ID Rep Frame"
 497
 498#define MPA_MAX_PRIVATE_DATA    256
 499#define MPA_REJECT              0x20
 500#define MPA_CRC                 0x40
 501#define MPA_MARKERS             0x80
 502#define MPA_FLAGS_MASK          0xE0
 503
 504#define c4iw_put_ep(ep) { \
 505        PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__,  \
 506             ep, atomic_read(&((ep)->kref.refcount))); \
 507        WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
 508        kref_put(&((ep)->kref), _c4iw_free_ep); \
 509}
 510
 511#define c4iw_get_ep(ep) { \
 512        PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
 513             ep, atomic_read(&((ep)->kref.refcount))); \
 514        kref_get(&((ep)->kref));  \
 515}
 516void _c4iw_free_ep(struct kref *kref);
 517
 518struct mpa_message {
 519        u8 key[16];
 520        u8 flags;
 521        u8 revision;
 522        __be16 private_data_size;
 523        u8 private_data[0];
 524};
 525
 526struct terminate_message {
 527        u8 layer_etype;
 528        u8 ecode;
 529        __be16 hdrct_rsvd;
 530        u8 len_hdrs[0];
 531};
 532
 533#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
 534
 535enum c4iw_layers_types {
 536        LAYER_RDMAP             = 0x00,
 537        LAYER_DDP               = 0x10,
 538        LAYER_MPA               = 0x20,
 539        RDMAP_LOCAL_CATA        = 0x00,
 540        RDMAP_REMOTE_PROT       = 0x01,
 541        RDMAP_REMOTE_OP         = 0x02,
 542        DDP_LOCAL_CATA          = 0x00,
 543        DDP_TAGGED_ERR          = 0x01,
 544        DDP_UNTAGGED_ERR        = 0x02,
 545        DDP_LLP                 = 0x03
 546};
 547
 548enum c4iw_rdma_ecodes {
 549        RDMAP_INV_STAG          = 0x00,
 550        RDMAP_BASE_BOUNDS       = 0x01,
 551        RDMAP_ACC_VIOL          = 0x02,
 552        RDMAP_STAG_NOT_ASSOC    = 0x03,
 553        RDMAP_TO_WRAP           = 0x04,
 554        RDMAP_INV_VERS          = 0x05,
 555        RDMAP_INV_OPCODE        = 0x06,
 556        RDMAP_STREAM_CATA       = 0x07,
 557        RDMAP_GLOBAL_CATA       = 0x08,
 558        RDMAP_CANT_INV_STAG     = 0x09,
 559        RDMAP_UNSPECIFIED       = 0xff
 560};
 561
 562enum c4iw_ddp_ecodes {
 563        DDPT_INV_STAG           = 0x00,
 564        DDPT_BASE_BOUNDS        = 0x01,
 565        DDPT_STAG_NOT_ASSOC     = 0x02,
 566        DDPT_TO_WRAP            = 0x03,
 567        DDPT_INV_VERS           = 0x04,
 568        DDPU_INV_QN             = 0x01,
 569        DDPU_INV_MSN_NOBUF      = 0x02,
 570        DDPU_INV_MSN_RANGE      = 0x03,
 571        DDPU_INV_MO             = 0x04,
 572        DDPU_MSG_TOOBIG         = 0x05,
 573        DDPU_INV_VERS           = 0x06
 574};
 575
 576enum c4iw_mpa_ecodes {
 577        MPA_CRC_ERR             = 0x02,
 578        MPA_MARKER_ERR          = 0x03
 579};
 580
 581enum c4iw_ep_state {
 582        IDLE = 0,
 583        LISTEN,
 584        CONNECTING,
 585        MPA_REQ_WAIT,
 586        MPA_REQ_SENT,
 587        MPA_REQ_RCVD,
 588        MPA_REP_SENT,
 589        FPDU_MODE,
 590        ABORTING,
 591        CLOSING,
 592        MORIBUND,
 593        DEAD,
 594};
 595
 596enum c4iw_ep_flags {
 597        PEER_ABORT_IN_PROGRESS  = 0,
 598        ABORT_REQ_IN_PROGRESS   = 1,
 599        RELEASE_RESOURCES       = 2,
 600        CLOSE_SENT              = 3,
 601};
 602
 603struct c4iw_ep_common {
 604        struct iw_cm_id *cm_id;
 605        struct c4iw_qp *qp;
 606        struct c4iw_dev *dev;
 607        enum c4iw_ep_state state;
 608        struct kref kref;
 609        struct mutex mutex;
 610        struct sockaddr_in local_addr;
 611        struct sockaddr_in remote_addr;
 612        struct c4iw_wr_wait wr_wait;
 613        unsigned long flags;
 614};
 615
 616struct c4iw_listen_ep {
 617        struct c4iw_ep_common com;
 618        unsigned int stid;
 619        int backlog;
 620};
 621
 622struct c4iw_ep {
 623        struct c4iw_ep_common com;
 624        struct c4iw_ep *parent_ep;
 625        struct timer_list timer;
 626        struct list_head entry;
 627        unsigned int atid;
 628        u32 hwtid;
 629        u32 snd_seq;
 630        u32 rcv_seq;
 631        struct l2t_entry *l2t;
 632        struct dst_entry *dst;
 633        struct sk_buff *mpa_skb;
 634        struct c4iw_mpa_attributes mpa_attr;
 635        u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
 636        unsigned int mpa_pkt_len;
 637        u32 ird;
 638        u32 ord;
 639        u32 smac_idx;
 640        u32 tx_chan;
 641        u32 mtu;
 642        u16 mss;
 643        u16 emss;
 644        u16 plen;
 645        u16 rss_qid;
 646        u16 txq_idx;
 647        u16 ctrlq_idx;
 648        u8 tos;
 649};
 650
 651static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
 652{
 653        return cm_id->provider_data;
 654}
 655
 656static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
 657{
 658        return cm_id->provider_data;
 659}
 660
 661static inline int compute_wscale(int win)
 662{
 663        int wscale = 0;
 664
 665        while (wscale < 14 && (65535<<wscale) < win)
 666                wscale++;
 667        return wscale;
 668}
 669
 670typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
 671
 672int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
 673                     struct l2t_entry *l2t);
 674void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
 675                   struct c4iw_dev_ucontext *uctx);
 676u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock);
 677void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock);
 678int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
 679int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
 680int c4iw_pblpool_create(struct c4iw_rdev *rdev);
 681int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
 682int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
 683void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
 684void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
 685void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
 686void c4iw_destroy_resource(struct c4iw_resource *rscp);
 687int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
 688int c4iw_register_device(struct c4iw_dev *dev);
 689void c4iw_unregister_device(struct c4iw_dev *dev);
 690int __init c4iw_cm_init(void);
 691void __exit c4iw_cm_term(void);
 692void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
 693                               struct c4iw_dev_ucontext *uctx);
 694void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
 695                            struct c4iw_dev_ucontext *uctx);
 696int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 697int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 698                      struct ib_send_wr **bad_wr);
 699int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 700                      struct ib_recv_wr **bad_wr);
 701int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
 702                 struct ib_mw_bind *mw_bind);
 703int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
 704int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
 705int c4iw_destroy_listen(struct iw_cm_id *cm_id);
 706int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
 707int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
 708void c4iw_qp_add_ref(struct ib_qp *qp);
 709void c4iw_qp_rem_ref(struct ib_qp *qp);
 710void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
 711struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
 712                                        struct ib_device *device,
 713                                        int page_list_len);
 714struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
 715int c4iw_dealloc_mw(struct ib_mw *mw);
 716struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
 717struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
 718                                           u64 length, u64 virt, int acc,
 719                                           struct ib_udata *udata);
 720struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
 721struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
 722                                        struct ib_phys_buf *buffer_list,
 723                                        int num_phys_buf,
 724                                        int acc,
 725                                        u64 *iova_start);
 726int c4iw_reregister_phys_mem(struct ib_mr *mr,
 727                                     int mr_rereg_mask,
 728                                     struct ib_pd *pd,
 729                                     struct ib_phys_buf *buffer_list,
 730                                     int num_phys_buf,
 731                                     int acc, u64 *iova_start);
 732int c4iw_dereg_mr(struct ib_mr *ib_mr);
 733int c4iw_destroy_cq(struct ib_cq *ib_cq);
 734struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
 735                                        int vector,
 736                                        struct ib_ucontext *ib_context,
 737                                        struct ib_udata *udata);
 738int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
 739int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
 740int c4iw_destroy_qp(struct ib_qp *ib_qp);
 741struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
 742                             struct ib_qp_init_attr *attrs,
 743                             struct ib_udata *udata);
 744int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 745                                 int attr_mask, struct ib_udata *udata);
 746struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
 747u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
 748void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 749u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
 750void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 751u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
 752void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 753int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
 754void c4iw_flush_hw_cq(struct t4_cq *cq);
 755void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
 756void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
 757int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
 758int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
 759int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
 760int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
 761u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
 762int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
 763u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
 764void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
 765                struct c4iw_dev_ucontext *uctx);
 766u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
 767void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
 768                struct c4iw_dev_ucontext *uctx);
 769void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
 770
 771extern struct cxgb4_client t4c_client;
 772extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
 773extern int c4iw_max_read_depth;
 774
 775#endif
 776