linux/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *      - Redistributions in binary form must reproduce the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer in the documentation and/or other materials
  20 *        provided with the distribution.
  21 *
  22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  29 * SOFTWARE.
  30 */
  31#ifndef __IW_CXGB4_H__
  32#define __IW_CXGB4_H__
  33
  34#include <linux/mutex.h>
  35#include <linux/list.h>
  36#include <linux/spinlock.h>
  37#include <linux/idr.h>
  38#include <linux/completion.h>
  39#include <linux/netdevice.h>
  40#include <linux/sched.h>
  41#include <linux/pci.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/inet.h>
  44#include <linux/wait.h>
  45#include <linux/kref.h>
  46#include <linux/timer.h>
  47#include <linux/io.h>
  48
  49#include <asm/byteorder.h>
  50
  51#include <net/net_namespace.h>
  52
  53#include <rdma/ib_verbs.h>
  54#include <rdma/iw_cm.h>
  55#include <rdma/rdma_netlink.h>
  56#include <rdma/iw_portmap.h>
  57
  58#include "cxgb4.h"
  59#include "cxgb4_uld.h"
  60#include "l2t.h"
  61#include <rdma/cxgb4-abi.h>
  62
  63#define DRV_NAME "iw_cxgb4"
  64#define MOD DRV_NAME ":"
  65
  66extern int c4iw_debug;
  67#define PDBG(fmt, args...) \
  68do { \
  69        if (c4iw_debug) \
  70                printk(MOD fmt, ## args); \
  71} while (0)
  72
  73#include "t4.h"
  74
  75#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
  76#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
  77
  78static inline void *cplhdr(struct sk_buff *skb)
  79{
  80        return skb->data;
  81}
  82
  83#define C4IW_ID_TABLE_F_RANDOM 1       /* Pseudo-randomize the id's returned */
  84#define C4IW_ID_TABLE_F_EMPTY  2       /* Table is initially empty */
  85
  86struct c4iw_id_table {
  87        u32 flags;
  88        u32 start;              /* logical minimal id */
  89        u32 last;               /* hint for find */
  90        u32 max;
  91        spinlock_t lock;
  92        unsigned long *table;
  93};
  94
  95struct c4iw_resource {
  96        struct c4iw_id_table tpt_table;
  97        struct c4iw_id_table qid_table;
  98        struct c4iw_id_table pdid_table;
  99};
 100
 101struct c4iw_qid_list {
 102        struct list_head entry;
 103        u32 qid;
 104};
 105
 106struct c4iw_dev_ucontext {
 107        struct list_head qpids;
 108        struct list_head cqids;
 109        struct mutex lock;
 110};
 111
 112enum c4iw_rdev_flags {
 113        T4_FATAL_ERROR = (1<<0),
 114        T4_STATUS_PAGE_DISABLED = (1<<1),
 115};
 116
 117struct c4iw_stat {
 118        u64 total;
 119        u64 cur;
 120        u64 max;
 121        u64 fail;
 122};
 123
 124struct c4iw_stats {
 125        struct mutex lock;
 126        struct c4iw_stat qid;
 127        struct c4iw_stat pd;
 128        struct c4iw_stat stag;
 129        struct c4iw_stat pbl;
 130        struct c4iw_stat rqt;
 131        struct c4iw_stat ocqp;
 132        u64  db_full;
 133        u64  db_empty;
 134        u64  db_drop;
 135        u64  db_state_transitions;
 136        u64  db_fc_interruptions;
 137        u64  tcam_full;
 138        u64  act_ofld_conn_fails;
 139        u64  pas_ofld_conn_fails;
 140        u64  neg_adv;
 141};
 142
 143struct c4iw_hw_queue {
 144        int t4_eq_status_entries;
 145        int t4_max_eq_size;
 146        int t4_max_iq_size;
 147        int t4_max_rq_size;
 148        int t4_max_sq_size;
 149        int t4_max_qp_depth;
 150        int t4_max_cq_depth;
 151        int t4_stat_len;
 152};
 153
 154struct wr_log_entry {
 155        struct timespec post_host_ts;
 156        struct timespec poll_host_ts;
 157        u64 post_sge_ts;
 158        u64 cqe_sge_ts;
 159        u64 poll_sge_ts;
 160        u16 qid;
 161        u16 wr_id;
 162        u8 opcode;
 163        u8 valid;
 164};
 165
 166struct c4iw_rdev {
 167        struct c4iw_resource resource;
 168        u32 qpmask;
 169        u32 cqmask;
 170        struct c4iw_dev_ucontext uctx;
 171        struct gen_pool *pbl_pool;
 172        struct gen_pool *rqt_pool;
 173        struct gen_pool *ocqp_pool;
 174        u32 flags;
 175        struct cxgb4_lld_info lldi;
 176        unsigned long bar2_pa;
 177        void __iomem *bar2_kva;
 178        unsigned long oc_mw_pa;
 179        void __iomem *oc_mw_kva;
 180        struct c4iw_stats stats;
 181        struct c4iw_hw_queue hw_queue;
 182        struct t4_dev_status_page *status_page;
 183        atomic_t wr_log_idx;
 184        struct wr_log_entry *wr_log;
 185        int wr_log_size;
 186};
 187
 188static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
 189{
 190        return rdev->flags & T4_FATAL_ERROR;
 191}
 192
 193static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
 194{
 195        return (int)(rdev->lldi.vr->stag.size >> 5);
 196}
 197
 198#define C4IW_WR_TO (60*HZ)
 199
 200struct c4iw_wr_wait {
 201        struct completion completion;
 202        int ret;
 203};
 204
 205static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
 206{
 207        wr_waitp->ret = 0;
 208        init_completion(&wr_waitp->completion);
 209}
 210
 211static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
 212{
 213        wr_waitp->ret = ret;
 214        complete(&wr_waitp->completion);
 215}
 216
 217static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
 218                                 struct c4iw_wr_wait *wr_waitp,
 219                                 u32 hwtid, u32 qpid,
 220                                 const char *func)
 221{
 222        int ret;
 223
 224        if (c4iw_fatal_error(rdev)) {
 225                wr_waitp->ret = -EIO;
 226                goto out;
 227        }
 228
 229        ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
 230        if (!ret) {
 231                PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
 232                     func, pci_name(rdev->lldi.pdev), hwtid, qpid);
 233                rdev->flags |= T4_FATAL_ERROR;
 234                wr_waitp->ret = -EIO;
 235        }
 236out:
 237        if (wr_waitp->ret)
 238                PDBG("%s: FW reply %d tid %u qpid %u\n",
 239                     pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
 240        return wr_waitp->ret;
 241}
 242
 243enum db_state {
 244        NORMAL = 0,
 245        FLOW_CONTROL = 1,
 246        RECOVERY = 2,
 247        STOPPED = 3
 248};
 249
 250struct c4iw_dev {
 251        struct ib_device ibdev;
 252        struct c4iw_rdev rdev;
 253        u32 device_cap_flags;
 254        struct idr cqidr;
 255        struct idr qpidr;
 256        struct idr mmidr;
 257        spinlock_t lock;
 258        struct mutex db_mutex;
 259        struct dentry *debugfs_root;
 260        enum db_state db_state;
 261        struct idr hwtid_idr;
 262        struct idr atid_idr;
 263        struct idr stid_idr;
 264        struct list_head db_fc_list;
 265        u32 avail_ird;
 266        wait_queue_head_t wait;
 267};
 268
 269static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
 270{
 271        return container_of(ibdev, struct c4iw_dev, ibdev);
 272}
 273
 274static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
 275{
 276        return container_of(rdev, struct c4iw_dev, rdev);
 277}
 278
 279static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
 280{
 281        return idr_find(&rhp->cqidr, cqid);
 282}
 283
 284static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
 285{
 286        return idr_find(&rhp->qpidr, qpid);
 287}
 288
 289static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
 290{
 291        return idr_find(&rhp->mmidr, mmid);
 292}
 293
 294static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
 295                                 void *handle, u32 id, int lock)
 296{
 297        int ret;
 298
 299        if (lock) {
 300                idr_preload(GFP_KERNEL);
 301                spin_lock_irq(&rhp->lock);
 302        }
 303
 304        ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
 305
 306        if (lock) {
 307                spin_unlock_irq(&rhp->lock);
 308                idr_preload_end();
 309        }
 310
 311        BUG_ON(ret == -ENOSPC);
 312        return ret < 0 ? ret : 0;
 313}
 314
 315static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
 316                                void *handle, u32 id)
 317{
 318        return _insert_handle(rhp, idr, handle, id, 1);
 319}
 320
 321static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
 322                                       void *handle, u32 id)
 323{
 324        return _insert_handle(rhp, idr, handle, id, 0);
 325}
 326
 327static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
 328                                   u32 id, int lock)
 329{
 330        if (lock)
 331                spin_lock_irq(&rhp->lock);
 332        idr_remove(idr, id);
 333        if (lock)
 334                spin_unlock_irq(&rhp->lock);
 335}
 336
 337static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
 338{
 339        _remove_handle(rhp, idr, id, 1);
 340}
 341
 342static inline void remove_handle_nolock(struct c4iw_dev *rhp,
 343                                         struct idr *idr, u32 id)
 344{
 345        _remove_handle(rhp, idr, id, 0);
 346}
 347
 348extern uint c4iw_max_read_depth;
 349
 350static inline int cur_max_read_depth(struct c4iw_dev *dev)
 351{
 352        return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
 353}
 354
 355struct c4iw_pd {
 356        struct ib_pd ibpd;
 357        u32 pdid;
 358        struct c4iw_dev *rhp;
 359};
 360
 361static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
 362{
 363        return container_of(ibpd, struct c4iw_pd, ibpd);
 364}
 365
 366struct tpt_attributes {
 367        u64 len;
 368        u64 va_fbo;
 369        enum fw_ri_mem_perms perms;
 370        u32 stag;
 371        u32 pdid;
 372        u32 qpid;
 373        u32 pbl_addr;
 374        u32 pbl_size;
 375        u32 state:1;
 376        u32 type:2;
 377        u32 rsvd:1;
 378        u32 remote_invaliate_disable:1;
 379        u32 zbva:1;
 380        u32 mw_bind_enable:1;
 381        u32 page_size:5;
 382};
 383
 384struct c4iw_mr {
 385        struct ib_mr ibmr;
 386        struct ib_umem *umem;
 387        struct c4iw_dev *rhp;
 388        struct sk_buff *dereg_skb;
 389        u64 kva;
 390        struct tpt_attributes attr;
 391        u64 *mpl;
 392        dma_addr_t mpl_addr;
 393        u32 max_mpl_len;
 394        u32 mpl_len;
 395};
 396
 397static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
 398{
 399        return container_of(ibmr, struct c4iw_mr, ibmr);
 400}
 401
 402struct c4iw_mw {
 403        struct ib_mw ibmw;
 404        struct c4iw_dev *rhp;
 405        struct sk_buff *dereg_skb;
 406        u64 kva;
 407        struct tpt_attributes attr;
 408};
 409
 410static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
 411{
 412        return container_of(ibmw, struct c4iw_mw, ibmw);
 413}
 414
 415struct c4iw_cq {
 416        struct ib_cq ibcq;
 417        struct c4iw_dev *rhp;
 418        struct sk_buff *destroy_skb;
 419        struct t4_cq cq;
 420        spinlock_t lock;
 421        spinlock_t comp_handler_lock;
 422        atomic_t refcnt;
 423        wait_queue_head_t wait;
 424};
 425
 426static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
 427{
 428        return container_of(ibcq, struct c4iw_cq, ibcq);
 429}
 430
 431struct c4iw_mpa_attributes {
 432        u8 initiator;
 433        u8 recv_marker_enabled;
 434        u8 xmit_marker_enabled;
 435        u8 crc_enabled;
 436        u8 enhanced_rdma_conn;
 437        u8 version;
 438        u8 p2p_type;
 439};
 440
 441struct c4iw_qp_attributes {
 442        u32 scq;
 443        u32 rcq;
 444        u32 sq_num_entries;
 445        u32 rq_num_entries;
 446        u32 sq_max_sges;
 447        u32 sq_max_sges_rdma_write;
 448        u32 rq_max_sges;
 449        u32 state;
 450        u8 enable_rdma_read;
 451        u8 enable_rdma_write;
 452        u8 enable_bind;
 453        u8 enable_mmid0_fastreg;
 454        u32 max_ord;
 455        u32 max_ird;
 456        u32 pd;
 457        u32 next_state;
 458        char terminate_buffer[52];
 459        u32 terminate_msg_len;
 460        u8 is_terminate_local;
 461        struct c4iw_mpa_attributes mpa_attr;
 462        struct c4iw_ep *llp_stream_handle;
 463        u8 layer_etype;
 464        u8 ecode;
 465        u16 sq_db_inc;
 466        u16 rq_db_inc;
 467        u8 send_term;
 468};
 469
 470struct c4iw_qp {
 471        struct ib_qp ibqp;
 472        struct list_head db_fc_entry;
 473        struct c4iw_dev *rhp;
 474        struct c4iw_ep *ep;
 475        struct c4iw_qp_attributes attr;
 476        struct t4_wq wq;
 477        spinlock_t lock;
 478        struct mutex mutex;
 479        struct kref kref;
 480        wait_queue_head_t wait;
 481        struct timer_list timer;
 482        int sq_sig_all;
 483        struct completion rq_drained;
 484        struct completion sq_drained;
 485};
 486
 487static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
 488{
 489        return container_of(ibqp, struct c4iw_qp, ibqp);
 490}
 491
 492struct c4iw_ucontext {
 493        struct ib_ucontext ibucontext;
 494        struct c4iw_dev_ucontext uctx;
 495        u32 key;
 496        spinlock_t mmap_lock;
 497        struct list_head mmaps;
 498};
 499
 500static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
 501{
 502        return container_of(c, struct c4iw_ucontext, ibucontext);
 503}
 504
 505struct c4iw_mm_entry {
 506        struct list_head entry;
 507        u64 addr;
 508        u32 key;
 509        unsigned len;
 510};
 511
 512static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
 513                                                u32 key, unsigned len)
 514{
 515        struct list_head *pos, *nxt;
 516        struct c4iw_mm_entry *mm;
 517
 518        spin_lock(&ucontext->mmap_lock);
 519        list_for_each_safe(pos, nxt, &ucontext->mmaps) {
 520
 521                mm = list_entry(pos, struct c4iw_mm_entry, entry);
 522                if (mm->key == key && mm->len == len) {
 523                        list_del_init(&mm->entry);
 524                        spin_unlock(&ucontext->mmap_lock);
 525                        PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
 526                             key, (unsigned long long) mm->addr, mm->len);
 527                        return mm;
 528                }
 529        }
 530        spin_unlock(&ucontext->mmap_lock);
 531        return NULL;
 532}
 533
 534static inline void insert_mmap(struct c4iw_ucontext *ucontext,
 535                               struct c4iw_mm_entry *mm)
 536{
 537        spin_lock(&ucontext->mmap_lock);
 538        PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
 539             mm->key, (unsigned long long) mm->addr, mm->len);
 540        list_add_tail(&mm->entry, &ucontext->mmaps);
 541        spin_unlock(&ucontext->mmap_lock);
 542}
 543
 544enum c4iw_qp_attr_mask {
 545        C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
 546        C4IW_QP_ATTR_SQ_DB = 1<<1,
 547        C4IW_QP_ATTR_RQ_DB = 1<<2,
 548        C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
 549        C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
 550        C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
 551        C4IW_QP_ATTR_MAX_ORD = 1 << 11,
 552        C4IW_QP_ATTR_MAX_IRD = 1 << 12,
 553        C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
 554        C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
 555        C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
 556        C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
 557        C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
 558                                     C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
 559                                     C4IW_QP_ATTR_MAX_ORD |
 560                                     C4IW_QP_ATTR_MAX_IRD |
 561                                     C4IW_QP_ATTR_LLP_STREAM_HANDLE |
 562                                     C4IW_QP_ATTR_STREAM_MSG_BUFFER |
 563                                     C4IW_QP_ATTR_MPA_ATTR |
 564                                     C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
 565};
 566
 567int c4iw_modify_qp(struct c4iw_dev *rhp,
 568                                struct c4iw_qp *qhp,
 569                                enum c4iw_qp_attr_mask mask,
 570                                struct c4iw_qp_attributes *attrs,
 571                                int internal);
 572
 573enum c4iw_qp_state {
 574        C4IW_QP_STATE_IDLE,
 575        C4IW_QP_STATE_RTS,
 576        C4IW_QP_STATE_ERROR,
 577        C4IW_QP_STATE_TERMINATE,
 578        C4IW_QP_STATE_CLOSING,
 579        C4IW_QP_STATE_TOT
 580};
 581
 582static inline int c4iw_convert_state(enum ib_qp_state ib_state)
 583{
 584        switch (ib_state) {
 585        case IB_QPS_RESET:
 586        case IB_QPS_INIT:
 587                return C4IW_QP_STATE_IDLE;
 588        case IB_QPS_RTS:
 589                return C4IW_QP_STATE_RTS;
 590        case IB_QPS_SQD:
 591                return C4IW_QP_STATE_CLOSING;
 592        case IB_QPS_SQE:
 593                return C4IW_QP_STATE_TERMINATE;
 594        case IB_QPS_ERR:
 595                return C4IW_QP_STATE_ERROR;
 596        default:
 597                return -1;
 598        }
 599}
 600
 601static inline int to_ib_qp_state(int c4iw_qp_state)
 602{
 603        switch (c4iw_qp_state) {
 604        case C4IW_QP_STATE_IDLE:
 605                return IB_QPS_INIT;
 606        case C4IW_QP_STATE_RTS:
 607                return IB_QPS_RTS;
 608        case C4IW_QP_STATE_CLOSING:
 609                return IB_QPS_SQD;
 610        case C4IW_QP_STATE_TERMINATE:
 611                return IB_QPS_SQE;
 612        case C4IW_QP_STATE_ERROR:
 613                return IB_QPS_ERR;
 614        }
 615        return IB_QPS_ERR;
 616}
 617
 618static inline u32 c4iw_ib_to_tpt_access(int a)
 619{
 620        return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
 621               (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
 622               (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
 623               FW_RI_MEM_ACCESS_LOCAL_READ;
 624}
 625
 626static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
 627{
 628        return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
 629               (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
 630}
 631
 632enum c4iw_mmid_state {
 633        C4IW_STAG_STATE_VALID,
 634        C4IW_STAG_STATE_INVALID
 635};
 636
 637#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
 638
 639#define MPA_KEY_REQ "MPA ID Req Frame"
 640#define MPA_KEY_REP "MPA ID Rep Frame"
 641
 642#define MPA_MAX_PRIVATE_DATA    256
 643#define MPA_ENHANCED_RDMA_CONN  0x10
 644#define MPA_REJECT              0x20
 645#define MPA_CRC                 0x40
 646#define MPA_MARKERS             0x80
 647#define MPA_FLAGS_MASK          0xE0
 648
 649#define MPA_V2_PEER2PEER_MODEL          0x8000
 650#define MPA_V2_ZERO_LEN_FPDU_RTR        0x4000
 651#define MPA_V2_RDMA_WRITE_RTR           0x8000
 652#define MPA_V2_RDMA_READ_RTR            0x4000
 653#define MPA_V2_IRD_ORD_MASK             0x3FFF
 654
 655#define c4iw_put_ep(ep) { \
 656        PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__,  \
 657             ep, atomic_read(&((ep)->kref.refcount))); \
 658        WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
 659        kref_put(&((ep)->kref), _c4iw_free_ep); \
 660}
 661
 662#define c4iw_get_ep(ep) { \
 663        PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
 664             ep, atomic_read(&((ep)->kref.refcount))); \
 665        kref_get(&((ep)->kref));  \
 666}
 667void _c4iw_free_ep(struct kref *kref);
 668
 669struct mpa_message {
 670        u8 key[16];
 671        u8 flags;
 672        u8 revision;
 673        __be16 private_data_size;
 674        u8 private_data[0];
 675};
 676
 677struct mpa_v2_conn_params {
 678        __be16 ird;
 679        __be16 ord;
 680};
 681
 682struct terminate_message {
 683        u8 layer_etype;
 684        u8 ecode;
 685        __be16 hdrct_rsvd;
 686        u8 len_hdrs[0];
 687};
 688
 689#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
 690
 691enum c4iw_layers_types {
 692        LAYER_RDMAP             = 0x00,
 693        LAYER_DDP               = 0x10,
 694        LAYER_MPA               = 0x20,
 695        RDMAP_LOCAL_CATA        = 0x00,
 696        RDMAP_REMOTE_PROT       = 0x01,
 697        RDMAP_REMOTE_OP         = 0x02,
 698        DDP_LOCAL_CATA          = 0x00,
 699        DDP_TAGGED_ERR          = 0x01,
 700        DDP_UNTAGGED_ERR        = 0x02,
 701        DDP_LLP                 = 0x03
 702};
 703
 704enum c4iw_rdma_ecodes {
 705        RDMAP_INV_STAG          = 0x00,
 706        RDMAP_BASE_BOUNDS       = 0x01,
 707        RDMAP_ACC_VIOL          = 0x02,
 708        RDMAP_STAG_NOT_ASSOC    = 0x03,
 709        RDMAP_TO_WRAP           = 0x04,
 710        RDMAP_INV_VERS          = 0x05,
 711        RDMAP_INV_OPCODE        = 0x06,
 712        RDMAP_STREAM_CATA       = 0x07,
 713        RDMAP_GLOBAL_CATA       = 0x08,
 714        RDMAP_CANT_INV_STAG     = 0x09,
 715        RDMAP_UNSPECIFIED       = 0xff
 716};
 717
 718enum c4iw_ddp_ecodes {
 719        DDPT_INV_STAG           = 0x00,
 720        DDPT_BASE_BOUNDS        = 0x01,
 721        DDPT_STAG_NOT_ASSOC     = 0x02,
 722        DDPT_TO_WRAP            = 0x03,
 723        DDPT_INV_VERS           = 0x04,
 724        DDPU_INV_QN             = 0x01,
 725        DDPU_INV_MSN_NOBUF      = 0x02,
 726        DDPU_INV_MSN_RANGE      = 0x03,
 727        DDPU_INV_MO             = 0x04,
 728        DDPU_MSG_TOOBIG         = 0x05,
 729        DDPU_INV_VERS           = 0x06
 730};
 731
 732enum c4iw_mpa_ecodes {
 733        MPA_CRC_ERR             = 0x02,
 734        MPA_MARKER_ERR          = 0x03,
 735        MPA_LOCAL_CATA          = 0x05,
 736        MPA_INSUFF_IRD          = 0x06,
 737        MPA_NOMATCH_RTR         = 0x07,
 738};
 739
 740enum c4iw_ep_state {
 741        IDLE = 0,
 742        LISTEN,
 743        CONNECTING,
 744        MPA_REQ_WAIT,
 745        MPA_REQ_SENT,
 746        MPA_REQ_RCVD,
 747        MPA_REP_SENT,
 748        FPDU_MODE,
 749        ABORTING,
 750        CLOSING,
 751        MORIBUND,
 752        DEAD,
 753};
 754
 755enum c4iw_ep_flags {
 756        PEER_ABORT_IN_PROGRESS  = 0,
 757        ABORT_REQ_IN_PROGRESS   = 1,
 758        RELEASE_RESOURCES       = 2,
 759        CLOSE_SENT              = 3,
 760        TIMEOUT                 = 4,
 761        QP_REFERENCED           = 5,
 762        STOP_MPA_TIMER          = 7,
 763};
 764
 765enum c4iw_ep_history {
 766        ACT_OPEN_REQ            = 0,
 767        ACT_OFLD_CONN           = 1,
 768        ACT_OPEN_RPL            = 2,
 769        ACT_ESTAB               = 3,
 770        PASS_ACCEPT_REQ         = 4,
 771        PASS_ESTAB              = 5,
 772        ABORT_UPCALL            = 6,
 773        ESTAB_UPCALL            = 7,
 774        CLOSE_UPCALL            = 8,
 775        ULP_ACCEPT              = 9,
 776        ULP_REJECT              = 10,
 777        TIMEDOUT                = 11,
 778        PEER_ABORT              = 12,
 779        PEER_CLOSE              = 13,
 780        CONNREQ_UPCALL          = 14,
 781        ABORT_CONN              = 15,
 782        DISCONN_UPCALL          = 16,
 783        EP_DISC_CLOSE           = 17,
 784        EP_DISC_ABORT           = 18,
 785        CONN_RPL_UPCALL         = 19,
 786        ACT_RETRY_NOMEM         = 20,
 787        ACT_RETRY_INUSE         = 21,
 788        CLOSE_CON_RPL           = 22,
 789        EP_DISC_FAIL            = 24,
 790        QP_REFED                = 25,
 791        QP_DEREFED              = 26,
 792        CM_ID_REFED             = 27,
 793        CM_ID_DEREFED           = 28,
 794};
 795
 796enum conn_pre_alloc_buffers {
 797        CN_ABORT_REQ_BUF,
 798        CN_ABORT_RPL_BUF,
 799        CN_CLOSE_CON_REQ_BUF,
 800        CN_DESTROY_BUF,
 801        CN_FLOWC_BUF,
 802        CN_MAX_CON_BUF
 803};
 804
 805#define FLOWC_LEN 80
 806union cpl_wr_size {
 807        struct cpl_abort_req abrt_req;
 808        struct cpl_abort_rpl abrt_rpl;
 809        struct fw_ri_wr ri_req;
 810        struct cpl_close_con_req close_req;
 811        char flowc_buf[FLOWC_LEN];
 812};
 813
 814struct c4iw_ep_common {
 815        struct iw_cm_id *cm_id;
 816        struct c4iw_qp *qp;
 817        struct c4iw_dev *dev;
 818        struct sk_buff_head ep_skb_list;
 819        enum c4iw_ep_state state;
 820        struct kref kref;
 821        struct mutex mutex;
 822        struct sockaddr_storage local_addr;
 823        struct sockaddr_storage remote_addr;
 824        struct c4iw_wr_wait wr_wait;
 825        unsigned long flags;
 826        unsigned long history;
 827};
 828
 829struct c4iw_listen_ep {
 830        struct c4iw_ep_common com;
 831        unsigned int stid;
 832        int backlog;
 833};
 834
 835struct c4iw_ep_stats {
 836        unsigned connect_neg_adv;
 837        unsigned abort_neg_adv;
 838};
 839
 840struct c4iw_ep {
 841        struct c4iw_ep_common com;
 842        struct c4iw_ep *parent_ep;
 843        struct timer_list timer;
 844        struct list_head entry;
 845        unsigned int atid;
 846        u32 hwtid;
 847        u32 snd_seq;
 848        u32 rcv_seq;
 849        struct l2t_entry *l2t;
 850        struct dst_entry *dst;
 851        struct sk_buff *mpa_skb;
 852        struct c4iw_mpa_attributes mpa_attr;
 853        u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
 854        unsigned int mpa_pkt_len;
 855        u32 ird;
 856        u32 ord;
 857        u32 smac_idx;
 858        u32 tx_chan;
 859        u32 mtu;
 860        u16 mss;
 861        u16 emss;
 862        u16 plen;
 863        u16 rss_qid;
 864        u16 txq_idx;
 865        u16 ctrlq_idx;
 866        u8 tos;
 867        u8 retry_with_mpa_v1;
 868        u8 tried_with_mpa_v1;
 869        unsigned int retry_count;
 870        int snd_win;
 871        int rcv_win;
 872        struct c4iw_ep_stats stats;
 873};
 874
 875static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
 876{
 877        return cm_id->provider_data;
 878}
 879
 880static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
 881{
 882        return cm_id->provider_data;
 883}
 884
 885static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
 886{
 887#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
 888        return infop->vr->ocq.size > 0;
 889#else
 890        return 0;
 891#endif
 892}
 893
 894u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
 895void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
 896int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
 897                        u32 reserved, u32 flags);
 898void c4iw_id_table_free(struct c4iw_id_table *alloc);
 899
 900typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
 901
 902int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
 903                     struct l2t_entry *l2t);
 904void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
 905                   struct c4iw_dev_ucontext *uctx);
 906u32 c4iw_get_resource(struct c4iw_id_table *id_table);
 907void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
 908int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
 909int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
 910int c4iw_pblpool_create(struct c4iw_rdev *rdev);
 911int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
 912int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
 913void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
 914void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
 915void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
 916void c4iw_destroy_resource(struct c4iw_resource *rscp);
 917int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
 918int c4iw_register_device(struct c4iw_dev *dev);
 919void c4iw_unregister_device(struct c4iw_dev *dev);
 920int __init c4iw_cm_init(void);
 921void c4iw_cm_term(void);
 922void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
 923                               struct c4iw_dev_ucontext *uctx);
 924void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
 925                            struct c4iw_dev_ucontext *uctx);
 926int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 927int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 928                      struct ib_send_wr **bad_wr);
 929int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 930                      struct ib_recv_wr **bad_wr);
 931int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
 932int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
 933int c4iw_destroy_listen(struct iw_cm_id *cm_id);
 934int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
 935int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
 936void c4iw_qp_add_ref(struct ib_qp *qp);
 937void c4iw_qp_rem_ref(struct ib_qp *qp);
 938struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
 939                            enum ib_mr_type mr_type,
 940                            u32 max_num_sg);
 941int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
 942                   unsigned int *sg_offset);
 943int c4iw_dealloc_mw(struct ib_mw *mw);
 944struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 945                            struct ib_udata *udata);
 946struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
 947                                           u64 length, u64 virt, int acc,
 948                                           struct ib_udata *udata);
 949struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
 950int c4iw_dereg_mr(struct ib_mr *ib_mr);
 951int c4iw_destroy_cq(struct ib_cq *ib_cq);
 952struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 953                             const struct ib_cq_init_attr *attr,
 954                             struct ib_ucontext *ib_context,
 955                             struct ib_udata *udata);
 956int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
 957int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
 958int c4iw_destroy_qp(struct ib_qp *ib_qp);
 959struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
 960                             struct ib_qp_init_attr *attrs,
 961                             struct ib_udata *udata);
 962int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 963                                 int attr_mask, struct ib_udata *udata);
 964int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 965                     int attr_mask, struct ib_qp_init_attr *init_attr);
 966struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
 967u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
 968void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 969u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
 970void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 971u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
 972void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 973int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
 974void c4iw_flush_hw_cq(struct c4iw_cq *chp);
 975void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
 976int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
 977int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
 978int c4iw_flush_sq(struct c4iw_qp *qhp);
 979int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
 980u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
 981int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
 982u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
 983void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
 984                struct c4iw_dev_ucontext *uctx);
 985u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
 986void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
 987                struct c4iw_dev_ucontext *uctx);
 988void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
 989
 990extern struct cxgb4_client t4c_client;
 991extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
 992void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
 993                              enum cxgb4_bar2_qtype qtype,
 994                              unsigned int *pbar2_qid, u64 *pbar2_pa);
 995extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
 996extern int c4iw_wr_log;
 997extern int db_fc_threshold;
 998extern int db_coalescing_threshold;
 999extern int use_dsgl;
1000void c4iw_drain_rq(struct ib_qp *qp);
1001void c4iw_drain_sq(struct ib_qp *qp);
1002void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
1003
1004#endif
1005