linux/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *      - Redistributions in binary form must reproduce the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer in the documentation and/or other materials
  20 *        provided with the distribution.
  21 *
  22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  29 * SOFTWARE.
  30 */
  31#ifndef __IW_CXGB4_H__
  32#define __IW_CXGB4_H__
  33
  34#include <linux/mutex.h>
  35#include <linux/list.h>
  36#include <linux/spinlock.h>
  37#include <linux/idr.h>
  38#include <linux/completion.h>
  39#include <linux/netdevice.h>
  40#include <linux/sched.h>
  41#include <linux/pci.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/inet.h>
  44#include <linux/wait.h>
  45#include <linux/kref.h>
  46#include <linux/timer.h>
  47#include <linux/io.h>
  48
  49#include <asm/byteorder.h>
  50
  51#include <net/net_namespace.h>
  52
  53#include <rdma/ib_verbs.h>
  54#include <rdma/iw_cm.h>
  55#include <rdma/rdma_netlink.h>
  56#include <rdma/iw_portmap.h>
  57
  58#include "cxgb4.h"
  59#include "cxgb4_uld.h"
  60#include "l2t.h"
  61#include "user.h"
  62
  63#define DRV_NAME "iw_cxgb4"
  64#define MOD DRV_NAME ":"
  65
  66extern int c4iw_debug;
  67#define PDBG(fmt, args...) \
  68do { \
  69        if (c4iw_debug) \
  70                printk(MOD fmt, ## args); \
  71} while (0)
  72
  73#include "t4.h"
  74
  75#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
  76#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
  77
  78static inline void *cplhdr(struct sk_buff *skb)
  79{
  80        return skb->data;
  81}
  82
  83#define C4IW_ID_TABLE_F_RANDOM 1       /* Pseudo-randomize the id's returned */
  84#define C4IW_ID_TABLE_F_EMPTY  2       /* Table is initially empty */
  85
  86struct c4iw_id_table {
  87        u32 flags;
  88        u32 start;              /* logical minimal id */
  89        u32 last;               /* hint for find */
  90        u32 max;
  91        spinlock_t lock;
  92        unsigned long *table;
  93};
  94
  95struct c4iw_resource {
  96        struct c4iw_id_table tpt_table;
  97        struct c4iw_id_table qid_table;
  98        struct c4iw_id_table pdid_table;
  99};
 100
 101struct c4iw_qid_list {
 102        struct list_head entry;
 103        u32 qid;
 104};
 105
 106struct c4iw_dev_ucontext {
 107        struct list_head qpids;
 108        struct list_head cqids;
 109        struct mutex lock;
 110};
 111
 112enum c4iw_rdev_flags {
 113        T4_FATAL_ERROR = (1<<0),
 114        T4_STATUS_PAGE_DISABLED = (1<<1),
 115};
 116
 117struct c4iw_stat {
 118        u64 total;
 119        u64 cur;
 120        u64 max;
 121        u64 fail;
 122};
 123
 124struct c4iw_stats {
 125        struct mutex lock;
 126        struct c4iw_stat qid;
 127        struct c4iw_stat pd;
 128        struct c4iw_stat stag;
 129        struct c4iw_stat pbl;
 130        struct c4iw_stat rqt;
 131        struct c4iw_stat ocqp;
 132        u64  db_full;
 133        u64  db_empty;
 134        u64  db_drop;
 135        u64  db_state_transitions;
 136        u64  db_fc_interruptions;
 137        u64  tcam_full;
 138        u64  act_ofld_conn_fails;
 139        u64  pas_ofld_conn_fails;
 140        u64  neg_adv;
 141};
 142
 143struct c4iw_hw_queue {
 144        int t4_eq_status_entries;
 145        int t4_max_eq_size;
 146        int t4_max_iq_size;
 147        int t4_max_rq_size;
 148        int t4_max_sq_size;
 149        int t4_max_qp_depth;
 150        int t4_max_cq_depth;
 151        int t4_stat_len;
 152};
 153
 154struct wr_log_entry {
 155        struct timespec post_host_ts;
 156        struct timespec poll_host_ts;
 157        u64 post_sge_ts;
 158        u64 cqe_sge_ts;
 159        u64 poll_sge_ts;
 160        u16 qid;
 161        u16 wr_id;
 162        u8 opcode;
 163        u8 valid;
 164};
 165
 166struct c4iw_rdev {
 167        struct c4iw_resource resource;
 168        u32 qpmask;
 169        u32 cqmask;
 170        struct c4iw_dev_ucontext uctx;
 171        struct gen_pool *pbl_pool;
 172        struct gen_pool *rqt_pool;
 173        struct gen_pool *ocqp_pool;
 174        u32 flags;
 175        struct cxgb4_lld_info lldi;
 176        unsigned long bar2_pa;
 177        void __iomem *bar2_kva;
 178        unsigned long oc_mw_pa;
 179        void __iomem *oc_mw_kva;
 180        struct c4iw_stats stats;
 181        struct c4iw_hw_queue hw_queue;
 182        struct t4_dev_status_page *status_page;
 183        atomic_t wr_log_idx;
 184        struct wr_log_entry *wr_log;
 185        int wr_log_size;
 186};
 187
 188static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
 189{
 190        return rdev->flags & T4_FATAL_ERROR;
 191}
 192
 193static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
 194{
 195        return (int)(rdev->lldi.vr->stag.size >> 5);
 196}
 197
 198#define C4IW_WR_TO (60*HZ)
 199
 200struct c4iw_wr_wait {
 201        struct completion completion;
 202        int ret;
 203};
 204
 205static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
 206{
 207        wr_waitp->ret = 0;
 208        init_completion(&wr_waitp->completion);
 209}
 210
 211static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
 212{
 213        wr_waitp->ret = ret;
 214        complete(&wr_waitp->completion);
 215}
 216
 217static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
 218                                 struct c4iw_wr_wait *wr_waitp,
 219                                 u32 hwtid, u32 qpid,
 220                                 const char *func)
 221{
 222        int ret;
 223
 224        if (c4iw_fatal_error(rdev)) {
 225                wr_waitp->ret = -EIO;
 226                goto out;
 227        }
 228
 229        ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
 230        if (!ret) {
 231                PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
 232                     func, pci_name(rdev->lldi.pdev), hwtid, qpid);
 233                rdev->flags |= T4_FATAL_ERROR;
 234                wr_waitp->ret = -EIO;
 235        }
 236out:
 237        if (wr_waitp->ret)
 238                PDBG("%s: FW reply %d tid %u qpid %u\n",
 239                     pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
 240        return wr_waitp->ret;
 241}
 242
 243enum db_state {
 244        NORMAL = 0,
 245        FLOW_CONTROL = 1,
 246        RECOVERY = 2,
 247        STOPPED = 3
 248};
 249
 250struct c4iw_dev {
 251        struct ib_device ibdev;
 252        struct c4iw_rdev rdev;
 253        u32 device_cap_flags;
 254        struct idr cqidr;
 255        struct idr qpidr;
 256        struct idr mmidr;
 257        spinlock_t lock;
 258        struct mutex db_mutex;
 259        struct dentry *debugfs_root;
 260        enum db_state db_state;
 261        struct idr hwtid_idr;
 262        struct idr atid_idr;
 263        struct idr stid_idr;
 264        struct list_head db_fc_list;
 265        u32 avail_ird;
 266};
 267
 268static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
 269{
 270        return container_of(ibdev, struct c4iw_dev, ibdev);
 271}
 272
 273static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
 274{
 275        return container_of(rdev, struct c4iw_dev, rdev);
 276}
 277
 278static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
 279{
 280        return idr_find(&rhp->cqidr, cqid);
 281}
 282
 283static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
 284{
 285        return idr_find(&rhp->qpidr, qpid);
 286}
 287
 288static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
 289{
 290        return idr_find(&rhp->mmidr, mmid);
 291}
 292
 293static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
 294                                 void *handle, u32 id, int lock)
 295{
 296        int ret;
 297
 298        if (lock) {
 299                idr_preload(GFP_KERNEL);
 300                spin_lock_irq(&rhp->lock);
 301        }
 302
 303        ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
 304
 305        if (lock) {
 306                spin_unlock_irq(&rhp->lock);
 307                idr_preload_end();
 308        }
 309
 310        BUG_ON(ret == -ENOSPC);
 311        return ret < 0 ? ret : 0;
 312}
 313
 314static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
 315                                void *handle, u32 id)
 316{
 317        return _insert_handle(rhp, idr, handle, id, 1);
 318}
 319
 320static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
 321                                       void *handle, u32 id)
 322{
 323        return _insert_handle(rhp, idr, handle, id, 0);
 324}
 325
 326static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
 327                                   u32 id, int lock)
 328{
 329        if (lock)
 330                spin_lock_irq(&rhp->lock);
 331        idr_remove(idr, id);
 332        if (lock)
 333                spin_unlock_irq(&rhp->lock);
 334}
 335
 336static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
 337{
 338        _remove_handle(rhp, idr, id, 1);
 339}
 340
 341static inline void remove_handle_nolock(struct c4iw_dev *rhp,
 342                                         struct idr *idr, u32 id)
 343{
 344        _remove_handle(rhp, idr, id, 0);
 345}
 346
 347extern uint c4iw_max_read_depth;
 348
 349static inline int cur_max_read_depth(struct c4iw_dev *dev)
 350{
 351        return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
 352}
 353
 354struct c4iw_pd {
 355        struct ib_pd ibpd;
 356        u32 pdid;
 357        struct c4iw_dev *rhp;
 358};
 359
 360static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
 361{
 362        return container_of(ibpd, struct c4iw_pd, ibpd);
 363}
 364
 365struct tpt_attributes {
 366        u64 len;
 367        u64 va_fbo;
 368        enum fw_ri_mem_perms perms;
 369        u32 stag;
 370        u32 pdid;
 371        u32 qpid;
 372        u32 pbl_addr;
 373        u32 pbl_size;
 374        u32 state:1;
 375        u32 type:2;
 376        u32 rsvd:1;
 377        u32 remote_invaliate_disable:1;
 378        u32 zbva:1;
 379        u32 mw_bind_enable:1;
 380        u32 page_size:5;
 381};
 382
 383struct c4iw_mr {
 384        struct ib_mr ibmr;
 385        struct ib_umem *umem;
 386        struct c4iw_dev *rhp;
 387        u64 kva;
 388        struct tpt_attributes attr;
 389        u64 *mpl;
 390        dma_addr_t mpl_addr;
 391        u32 max_mpl_len;
 392        u32 mpl_len;
 393};
 394
 395static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
 396{
 397        return container_of(ibmr, struct c4iw_mr, ibmr);
 398}
 399
 400struct c4iw_mw {
 401        struct ib_mw ibmw;
 402        struct c4iw_dev *rhp;
 403        u64 kva;
 404        struct tpt_attributes attr;
 405};
 406
 407static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
 408{
 409        return container_of(ibmw, struct c4iw_mw, ibmw);
 410}
 411
 412struct c4iw_cq {
 413        struct ib_cq ibcq;
 414        struct c4iw_dev *rhp;
 415        struct t4_cq cq;
 416        spinlock_t lock;
 417        spinlock_t comp_handler_lock;
 418        atomic_t refcnt;
 419        wait_queue_head_t wait;
 420};
 421
 422static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
 423{
 424        return container_of(ibcq, struct c4iw_cq, ibcq);
 425}
 426
 427struct c4iw_mpa_attributes {
 428        u8 initiator;
 429        u8 recv_marker_enabled;
 430        u8 xmit_marker_enabled;
 431        u8 crc_enabled;
 432        u8 enhanced_rdma_conn;
 433        u8 version;
 434        u8 p2p_type;
 435};
 436
 437struct c4iw_qp_attributes {
 438        u32 scq;
 439        u32 rcq;
 440        u32 sq_num_entries;
 441        u32 rq_num_entries;
 442        u32 sq_max_sges;
 443        u32 sq_max_sges_rdma_write;
 444        u32 rq_max_sges;
 445        u32 state;
 446        u8 enable_rdma_read;
 447        u8 enable_rdma_write;
 448        u8 enable_bind;
 449        u8 enable_mmid0_fastreg;
 450        u32 max_ord;
 451        u32 max_ird;
 452        u32 pd;
 453        u32 next_state;
 454        char terminate_buffer[52];
 455        u32 terminate_msg_len;
 456        u8 is_terminate_local;
 457        struct c4iw_mpa_attributes mpa_attr;
 458        struct c4iw_ep *llp_stream_handle;
 459        u8 layer_etype;
 460        u8 ecode;
 461        u16 sq_db_inc;
 462        u16 rq_db_inc;
 463        u8 send_term;
 464};
 465
 466struct c4iw_qp {
 467        struct ib_qp ibqp;
 468        struct list_head db_fc_entry;
 469        struct c4iw_dev *rhp;
 470        struct c4iw_ep *ep;
 471        struct c4iw_qp_attributes attr;
 472        struct t4_wq wq;
 473        spinlock_t lock;
 474        struct mutex mutex;
 475        atomic_t refcnt;
 476        wait_queue_head_t wait;
 477        struct timer_list timer;
 478        int sq_sig_all;
 479        struct completion rq_drained;
 480        struct completion sq_drained;
 481};
 482
 483static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
 484{
 485        return container_of(ibqp, struct c4iw_qp, ibqp);
 486}
 487
 488struct c4iw_ucontext {
 489        struct ib_ucontext ibucontext;
 490        struct c4iw_dev_ucontext uctx;
 491        u32 key;
 492        spinlock_t mmap_lock;
 493        struct list_head mmaps;
 494};
 495
 496static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
 497{
 498        return container_of(c, struct c4iw_ucontext, ibucontext);
 499}
 500
 501struct c4iw_mm_entry {
 502        struct list_head entry;
 503        u64 addr;
 504        u32 key;
 505        unsigned len;
 506};
 507
 508static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
 509                                                u32 key, unsigned len)
 510{
 511        struct list_head *pos, *nxt;
 512        struct c4iw_mm_entry *mm;
 513
 514        spin_lock(&ucontext->mmap_lock);
 515        list_for_each_safe(pos, nxt, &ucontext->mmaps) {
 516
 517                mm = list_entry(pos, struct c4iw_mm_entry, entry);
 518                if (mm->key == key && mm->len == len) {
 519                        list_del_init(&mm->entry);
 520                        spin_unlock(&ucontext->mmap_lock);
 521                        PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
 522                             key, (unsigned long long) mm->addr, mm->len);
 523                        return mm;
 524                }
 525        }
 526        spin_unlock(&ucontext->mmap_lock);
 527        return NULL;
 528}
 529
 530static inline void insert_mmap(struct c4iw_ucontext *ucontext,
 531                               struct c4iw_mm_entry *mm)
 532{
 533        spin_lock(&ucontext->mmap_lock);
 534        PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
 535             mm->key, (unsigned long long) mm->addr, mm->len);
 536        list_add_tail(&mm->entry, &ucontext->mmaps);
 537        spin_unlock(&ucontext->mmap_lock);
 538}
 539
 540enum c4iw_qp_attr_mask {
 541        C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
 542        C4IW_QP_ATTR_SQ_DB = 1<<1,
 543        C4IW_QP_ATTR_RQ_DB = 1<<2,
 544        C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
 545        C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
 546        C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
 547        C4IW_QP_ATTR_MAX_ORD = 1 << 11,
 548        C4IW_QP_ATTR_MAX_IRD = 1 << 12,
 549        C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
 550        C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
 551        C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
 552        C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
 553        C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
 554                                     C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
 555                                     C4IW_QP_ATTR_MAX_ORD |
 556                                     C4IW_QP_ATTR_MAX_IRD |
 557                                     C4IW_QP_ATTR_LLP_STREAM_HANDLE |
 558                                     C4IW_QP_ATTR_STREAM_MSG_BUFFER |
 559                                     C4IW_QP_ATTR_MPA_ATTR |
 560                                     C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
 561};
 562
 563int c4iw_modify_qp(struct c4iw_dev *rhp,
 564                                struct c4iw_qp *qhp,
 565                                enum c4iw_qp_attr_mask mask,
 566                                struct c4iw_qp_attributes *attrs,
 567                                int internal);
 568
 569enum c4iw_qp_state {
 570        C4IW_QP_STATE_IDLE,
 571        C4IW_QP_STATE_RTS,
 572        C4IW_QP_STATE_ERROR,
 573        C4IW_QP_STATE_TERMINATE,
 574        C4IW_QP_STATE_CLOSING,
 575        C4IW_QP_STATE_TOT
 576};
 577
 578static inline int c4iw_convert_state(enum ib_qp_state ib_state)
 579{
 580        switch (ib_state) {
 581        case IB_QPS_RESET:
 582        case IB_QPS_INIT:
 583                return C4IW_QP_STATE_IDLE;
 584        case IB_QPS_RTS:
 585                return C4IW_QP_STATE_RTS;
 586        case IB_QPS_SQD:
 587                return C4IW_QP_STATE_CLOSING;
 588        case IB_QPS_SQE:
 589                return C4IW_QP_STATE_TERMINATE;
 590        case IB_QPS_ERR:
 591                return C4IW_QP_STATE_ERROR;
 592        default:
 593                return -1;
 594        }
 595}
 596
 597static inline int to_ib_qp_state(int c4iw_qp_state)
 598{
 599        switch (c4iw_qp_state) {
 600        case C4IW_QP_STATE_IDLE:
 601                return IB_QPS_INIT;
 602        case C4IW_QP_STATE_RTS:
 603                return IB_QPS_RTS;
 604        case C4IW_QP_STATE_CLOSING:
 605                return IB_QPS_SQD;
 606        case C4IW_QP_STATE_TERMINATE:
 607                return IB_QPS_SQE;
 608        case C4IW_QP_STATE_ERROR:
 609                return IB_QPS_ERR;
 610        }
 611        return IB_QPS_ERR;
 612}
 613
 614static inline u32 c4iw_ib_to_tpt_access(int a)
 615{
 616        return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
 617               (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
 618               (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
 619               FW_RI_MEM_ACCESS_LOCAL_READ;
 620}
 621
 622static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
 623{
 624        return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
 625               (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
 626}
 627
 628enum c4iw_mmid_state {
 629        C4IW_STAG_STATE_VALID,
 630        C4IW_STAG_STATE_INVALID
 631};
 632
 633#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
 634
 635#define MPA_KEY_REQ "MPA ID Req Frame"
 636#define MPA_KEY_REP "MPA ID Rep Frame"
 637
 638#define MPA_MAX_PRIVATE_DATA    256
 639#define MPA_ENHANCED_RDMA_CONN  0x10
 640#define MPA_REJECT              0x20
 641#define MPA_CRC                 0x40
 642#define MPA_MARKERS             0x80
 643#define MPA_FLAGS_MASK          0xE0
 644
 645#define MPA_V2_PEER2PEER_MODEL          0x8000
 646#define MPA_V2_ZERO_LEN_FPDU_RTR        0x4000
 647#define MPA_V2_RDMA_WRITE_RTR           0x8000
 648#define MPA_V2_RDMA_READ_RTR            0x4000
 649#define MPA_V2_IRD_ORD_MASK             0x3FFF
 650
 651#define c4iw_put_ep(ep) { \
 652        PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__,  \
 653             ep, atomic_read(&((ep)->kref.refcount))); \
 654        WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
 655        kref_put(&((ep)->kref), _c4iw_free_ep); \
 656}
 657
 658#define c4iw_get_ep(ep) { \
 659        PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
 660             ep, atomic_read(&((ep)->kref.refcount))); \
 661        kref_get(&((ep)->kref));  \
 662}
 663void _c4iw_free_ep(struct kref *kref);
 664
 665struct mpa_message {
 666        u8 key[16];
 667        u8 flags;
 668        u8 revision;
 669        __be16 private_data_size;
 670        u8 private_data[0];
 671};
 672
 673struct mpa_v2_conn_params {
 674        __be16 ird;
 675        __be16 ord;
 676};
 677
 678struct terminate_message {
 679        u8 layer_etype;
 680        u8 ecode;
 681        __be16 hdrct_rsvd;
 682        u8 len_hdrs[0];
 683};
 684
 685#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
 686
 687enum c4iw_layers_types {
 688        LAYER_RDMAP             = 0x00,
 689        LAYER_DDP               = 0x10,
 690        LAYER_MPA               = 0x20,
 691        RDMAP_LOCAL_CATA        = 0x00,
 692        RDMAP_REMOTE_PROT       = 0x01,
 693        RDMAP_REMOTE_OP         = 0x02,
 694        DDP_LOCAL_CATA          = 0x00,
 695        DDP_TAGGED_ERR          = 0x01,
 696        DDP_UNTAGGED_ERR        = 0x02,
 697        DDP_LLP                 = 0x03
 698};
 699
 700enum c4iw_rdma_ecodes {
 701        RDMAP_INV_STAG          = 0x00,
 702        RDMAP_BASE_BOUNDS       = 0x01,
 703        RDMAP_ACC_VIOL          = 0x02,
 704        RDMAP_STAG_NOT_ASSOC    = 0x03,
 705        RDMAP_TO_WRAP           = 0x04,
 706        RDMAP_INV_VERS          = 0x05,
 707        RDMAP_INV_OPCODE        = 0x06,
 708        RDMAP_STREAM_CATA       = 0x07,
 709        RDMAP_GLOBAL_CATA       = 0x08,
 710        RDMAP_CANT_INV_STAG     = 0x09,
 711        RDMAP_UNSPECIFIED       = 0xff
 712};
 713
 714enum c4iw_ddp_ecodes {
 715        DDPT_INV_STAG           = 0x00,
 716        DDPT_BASE_BOUNDS        = 0x01,
 717        DDPT_STAG_NOT_ASSOC     = 0x02,
 718        DDPT_TO_WRAP            = 0x03,
 719        DDPT_INV_VERS           = 0x04,
 720        DDPU_INV_QN             = 0x01,
 721        DDPU_INV_MSN_NOBUF      = 0x02,
 722        DDPU_INV_MSN_RANGE      = 0x03,
 723        DDPU_INV_MO             = 0x04,
 724        DDPU_MSG_TOOBIG         = 0x05,
 725        DDPU_INV_VERS           = 0x06
 726};
 727
 728enum c4iw_mpa_ecodes {
 729        MPA_CRC_ERR             = 0x02,
 730        MPA_MARKER_ERR          = 0x03,
 731        MPA_LOCAL_CATA          = 0x05,
 732        MPA_INSUFF_IRD          = 0x06,
 733        MPA_NOMATCH_RTR         = 0x07,
 734};
 735
 736enum c4iw_ep_state {
 737        IDLE = 0,
 738        LISTEN,
 739        CONNECTING,
 740        MPA_REQ_WAIT,
 741        MPA_REQ_SENT,
 742        MPA_REQ_RCVD,
 743        MPA_REP_SENT,
 744        FPDU_MODE,
 745        ABORTING,
 746        CLOSING,
 747        MORIBUND,
 748        DEAD,
 749};
 750
 751enum c4iw_ep_flags {
 752        PEER_ABORT_IN_PROGRESS  = 0,
 753        ABORT_REQ_IN_PROGRESS   = 1,
 754        RELEASE_RESOURCES       = 2,
 755        CLOSE_SENT              = 3,
 756        TIMEOUT                 = 4,
 757        QP_REFERENCED           = 5,
 758};
 759
 760enum c4iw_ep_history {
 761        ACT_OPEN_REQ            = 0,
 762        ACT_OFLD_CONN           = 1,
 763        ACT_OPEN_RPL            = 2,
 764        ACT_ESTAB               = 3,
 765        PASS_ACCEPT_REQ         = 4,
 766        PASS_ESTAB              = 5,
 767        ABORT_UPCALL            = 6,
 768        ESTAB_UPCALL            = 7,
 769        CLOSE_UPCALL            = 8,
 770        ULP_ACCEPT              = 9,
 771        ULP_REJECT              = 10,
 772        TIMEDOUT                = 11,
 773        PEER_ABORT              = 12,
 774        PEER_CLOSE              = 13,
 775        CONNREQ_UPCALL          = 14,
 776        ABORT_CONN              = 15,
 777        DISCONN_UPCALL          = 16,
 778        EP_DISC_CLOSE           = 17,
 779        EP_DISC_ABORT           = 18,
 780        CONN_RPL_UPCALL         = 19,
 781        ACT_RETRY_NOMEM         = 20,
 782        ACT_RETRY_INUSE         = 21
 783};
 784
 785struct c4iw_ep_common {
 786        struct iw_cm_id *cm_id;
 787        struct c4iw_qp *qp;
 788        struct c4iw_dev *dev;
 789        enum c4iw_ep_state state;
 790        struct kref kref;
 791        struct mutex mutex;
 792        struct sockaddr_storage local_addr;
 793        struct sockaddr_storage remote_addr;
 794        struct c4iw_wr_wait wr_wait;
 795        unsigned long flags;
 796        unsigned long history;
 797};
 798
 799struct c4iw_listen_ep {
 800        struct c4iw_ep_common com;
 801        unsigned int stid;
 802        int backlog;
 803};
 804
 805struct c4iw_ep_stats {
 806        unsigned connect_neg_adv;
 807        unsigned abort_neg_adv;
 808};
 809
 810struct c4iw_ep {
 811        struct c4iw_ep_common com;
 812        struct c4iw_ep *parent_ep;
 813        struct timer_list timer;
 814        struct list_head entry;
 815        unsigned int atid;
 816        u32 hwtid;
 817        u32 snd_seq;
 818        u32 rcv_seq;
 819        struct l2t_entry *l2t;
 820        struct dst_entry *dst;
 821        struct sk_buff *mpa_skb;
 822        struct c4iw_mpa_attributes mpa_attr;
 823        u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
 824        unsigned int mpa_pkt_len;
 825        u32 ird;
 826        u32 ord;
 827        u32 smac_idx;
 828        u32 tx_chan;
 829        u32 mtu;
 830        u16 mss;
 831        u16 emss;
 832        u16 plen;
 833        u16 rss_qid;
 834        u16 txq_idx;
 835        u16 ctrlq_idx;
 836        u8 tos;
 837        u8 retry_with_mpa_v1;
 838        u8 tried_with_mpa_v1;
 839        unsigned int retry_count;
 840        int snd_win;
 841        int rcv_win;
 842        struct c4iw_ep_stats stats;
 843};
 844
 845static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
 846{
 847        return cm_id->provider_data;
 848}
 849
 850static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
 851{
 852        return cm_id->provider_data;
 853}
 854
 855static inline int compute_wscale(int win)
 856{
 857        int wscale = 0;
 858
 859        while (wscale < 14 && (65535<<wscale) < win)
 860                wscale++;
 861        return wscale;
 862}
 863
 864static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
 865{
 866#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
 867        return infop->vr->ocq.size > 0;
 868#else
 869        return 0;
 870#endif
 871}
 872
 873u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
 874void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
 875int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
 876                        u32 reserved, u32 flags);
 877void c4iw_id_table_free(struct c4iw_id_table *alloc);
 878
 879typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
 880
 881int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
 882                     struct l2t_entry *l2t);
 883void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
 884                   struct c4iw_dev_ucontext *uctx);
 885u32 c4iw_get_resource(struct c4iw_id_table *id_table);
 886void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
 887int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
 888int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
 889int c4iw_pblpool_create(struct c4iw_rdev *rdev);
 890int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
 891int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
 892void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
 893void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
 894void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
 895void c4iw_destroy_resource(struct c4iw_resource *rscp);
 896int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
 897int c4iw_register_device(struct c4iw_dev *dev);
 898void c4iw_unregister_device(struct c4iw_dev *dev);
 899int __init c4iw_cm_init(void);
 900void c4iw_cm_term(void);
 901void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
 902                               struct c4iw_dev_ucontext *uctx);
 903void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
 904                            struct c4iw_dev_ucontext *uctx);
 905int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 906int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 907                      struct ib_send_wr **bad_wr);
 908int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 909                      struct ib_recv_wr **bad_wr);
 910int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
 911int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
 912int c4iw_destroy_listen(struct iw_cm_id *cm_id);
 913int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
 914int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
 915void c4iw_qp_add_ref(struct ib_qp *qp);
 916void c4iw_qp_rem_ref(struct ib_qp *qp);
 917struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
 918                            enum ib_mr_type mr_type,
 919                            u32 max_num_sg);
 920int c4iw_map_mr_sg(struct ib_mr *ibmr,
 921                   struct scatterlist *sg,
 922                   int sg_nents);
 923int c4iw_dealloc_mw(struct ib_mw *mw);
 924struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 925                            struct ib_udata *udata);
 926struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
 927                                           u64 length, u64 virt, int acc,
 928                                           struct ib_udata *udata);
 929struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
 930int c4iw_dereg_mr(struct ib_mr *ib_mr);
 931int c4iw_destroy_cq(struct ib_cq *ib_cq);
 932struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 933                             const struct ib_cq_init_attr *attr,
 934                             struct ib_ucontext *ib_context,
 935                             struct ib_udata *udata);
 936int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
 937int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
 938int c4iw_destroy_qp(struct ib_qp *ib_qp);
 939struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
 940                             struct ib_qp_init_attr *attrs,
 941                             struct ib_udata *udata);
 942int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 943                                 int attr_mask, struct ib_udata *udata);
 944int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 945                     int attr_mask, struct ib_qp_init_attr *init_attr);
 946struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
 947u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
 948void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 949u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
 950void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 951u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
 952void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 953int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
 954void c4iw_flush_hw_cq(struct c4iw_cq *chp);
 955void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
 956int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
 957int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
 958int c4iw_flush_sq(struct c4iw_qp *qhp);
 959int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
 960u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
 961int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
 962u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
 963void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
 964                struct c4iw_dev_ucontext *uctx);
 965u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
 966void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
 967                struct c4iw_dev_ucontext *uctx);
 968void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
 969
 970extern struct cxgb4_client t4c_client;
 971extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
 972void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
 973                              enum cxgb4_bar2_qtype qtype,
 974                              unsigned int *pbar2_qid, u64 *pbar2_pa);
 975extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
 976extern int c4iw_wr_log;
 977extern int db_fc_threshold;
 978extern int db_coalescing_threshold;
 979extern int use_dsgl;
 980void c4iw_drain_rq(struct ib_qp *qp);
 981void c4iw_drain_sq(struct ib_qp *qp);
 982
 983
 984#endif
 985