linux/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *      - Redistributions in binary form must reproduce the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer in the documentation and/or other materials
  20 *        provided with the distribution.
  21 *
  22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  29 * SOFTWARE.
  30 */
  31#ifndef __IW_CXGB4_H__
  32#define __IW_CXGB4_H__
  33
  34#include <linux/mutex.h>
  35#include <linux/list.h>
  36#include <linux/spinlock.h>
  37#include <linux/idr.h>
  38#include <linux/completion.h>
  39#include <linux/netdevice.h>
  40#include <linux/sched/mm.h>
  41#include <linux/pci.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/inet.h>
  44#include <linux/wait.h>
  45#include <linux/kref.h>
  46#include <linux/timer.h>
  47#include <linux/io.h>
  48#include <linux/workqueue.h>
  49
  50#include <asm/byteorder.h>
  51
  52#include <net/net_namespace.h>
  53
  54#include <rdma/ib_verbs.h>
  55#include <rdma/iw_cm.h>
  56#include <rdma/rdma_netlink.h>
  57#include <rdma/iw_portmap.h>
  58
  59#include "cxgb4.h"
  60#include "cxgb4_uld.h"
  61#include "l2t.h"
  62#include <rdma/cxgb4-abi.h>
  63
  64#define DRV_NAME "iw_cxgb4"
  65#define MOD DRV_NAME ":"
  66
  67#ifdef pr_fmt
  68#undef pr_fmt
  69#endif
  70
  71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  72
  73#include "t4.h"
  74
  75#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
  76#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
  77
  78static inline void *cplhdr(struct sk_buff *skb)
  79{
  80        return skb->data;
  81}
  82
  83#define C4IW_ID_TABLE_F_RANDOM 1       /* Pseudo-randomize the id's returned */
  84#define C4IW_ID_TABLE_F_EMPTY  2       /* Table is initially empty */
  85
  86struct c4iw_id_table {
  87        u32 flags;
  88        u32 start;              /* logical minimal id */
  89        u32 last;               /* hint for find */
  90        u32 max;
  91        spinlock_t lock;
  92        unsigned long *table;
  93};
  94
  95struct c4iw_resource {
  96        struct c4iw_id_table tpt_table;
  97        struct c4iw_id_table qid_table;
  98        struct c4iw_id_table pdid_table;
  99};
 100
 101struct c4iw_qid_list {
 102        struct list_head entry;
 103        u32 qid;
 104};
 105
 106struct c4iw_dev_ucontext {
 107        struct list_head qpids;
 108        struct list_head cqids;
 109        struct mutex lock;
 110        struct kref kref;
 111};
 112
 113enum c4iw_rdev_flags {
 114        T4_FATAL_ERROR = (1<<0),
 115        T4_STATUS_PAGE_DISABLED = (1<<1),
 116};
 117
 118struct c4iw_stat {
 119        u64 total;
 120        u64 cur;
 121        u64 max;
 122        u64 fail;
 123};
 124
 125struct c4iw_stats {
 126        struct mutex lock;
 127        struct c4iw_stat qid;
 128        struct c4iw_stat pd;
 129        struct c4iw_stat stag;
 130        struct c4iw_stat pbl;
 131        struct c4iw_stat rqt;
 132        struct c4iw_stat ocqp;
 133        u64  db_full;
 134        u64  db_empty;
 135        u64  db_drop;
 136        u64  db_state_transitions;
 137        u64  db_fc_interruptions;
 138        u64  tcam_full;
 139        u64  act_ofld_conn_fails;
 140        u64  pas_ofld_conn_fails;
 141        u64  neg_adv;
 142};
 143
 144struct c4iw_hw_queue {
 145        int t4_eq_status_entries;
 146        int t4_max_eq_size;
 147        int t4_max_iq_size;
 148        int t4_max_rq_size;
 149        int t4_max_sq_size;
 150        int t4_max_qp_depth;
 151        int t4_max_cq_depth;
 152        int t4_stat_len;
 153};
 154
 155struct wr_log_entry {
 156        struct timespec post_host_ts;
 157        struct timespec poll_host_ts;
 158        u64 post_sge_ts;
 159        u64 cqe_sge_ts;
 160        u64 poll_sge_ts;
 161        u16 qid;
 162        u16 wr_id;
 163        u8 opcode;
 164        u8 valid;
 165};
 166
 167struct c4iw_rdev {
 168        struct c4iw_resource resource;
 169        u32 qpmask;
 170        u32 cqmask;
 171        struct c4iw_dev_ucontext uctx;
 172        struct gen_pool *pbl_pool;
 173        struct gen_pool *rqt_pool;
 174        struct gen_pool *ocqp_pool;
 175        u32 flags;
 176        struct cxgb4_lld_info lldi;
 177        unsigned long bar2_pa;
 178        void __iomem *bar2_kva;
 179        unsigned long oc_mw_pa;
 180        void __iomem *oc_mw_kva;
 181        struct c4iw_stats stats;
 182        struct c4iw_hw_queue hw_queue;
 183        struct t4_dev_status_page *status_page;
 184        atomic_t wr_log_idx;
 185        struct wr_log_entry *wr_log;
 186        int wr_log_size;
 187        struct workqueue_struct *free_workq;
 188};
 189
 190static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
 191{
 192        return rdev->flags & T4_FATAL_ERROR;
 193}
 194
 195static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
 196{
 197        return (int)(rdev->lldi.vr->stag.size >> 5);
 198}
 199
 200#define C4IW_WR_TO (60*HZ)
 201
 202struct c4iw_wr_wait {
 203        struct completion completion;
 204        int ret;
 205};
 206
 207static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
 208{
 209        wr_waitp->ret = 0;
 210        init_completion(&wr_waitp->completion);
 211}
 212
 213static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
 214{
 215        wr_waitp->ret = ret;
 216        complete(&wr_waitp->completion);
 217}
 218
 219static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
 220                                 struct c4iw_wr_wait *wr_waitp,
 221                                 u32 hwtid, u32 qpid,
 222                                 const char *func)
 223{
 224        int ret;
 225
 226        if (c4iw_fatal_error(rdev)) {
 227                wr_waitp->ret = -EIO;
 228                goto out;
 229        }
 230
 231        ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
 232        if (!ret) {
 233                pr_debug("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
 234                         func, pci_name(rdev->lldi.pdev), hwtid, qpid);
 235                rdev->flags |= T4_FATAL_ERROR;
 236                wr_waitp->ret = -EIO;
 237        }
 238out:
 239        if (wr_waitp->ret)
 240                pr_debug("%s: FW reply %d tid %u qpid %u\n",
 241                         pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
 242        return wr_waitp->ret;
 243}
 244
 245enum db_state {
 246        NORMAL = 0,
 247        FLOW_CONTROL = 1,
 248        RECOVERY = 2,
 249        STOPPED = 3
 250};
 251
 252struct c4iw_dev {
 253        struct ib_device ibdev;
 254        struct c4iw_rdev rdev;
 255        u32 device_cap_flags;
 256        struct idr cqidr;
 257        struct idr qpidr;
 258        struct idr mmidr;
 259        spinlock_t lock;
 260        struct mutex db_mutex;
 261        struct dentry *debugfs_root;
 262        enum db_state db_state;
 263        struct idr hwtid_idr;
 264        struct idr atid_idr;
 265        struct idr stid_idr;
 266        struct list_head db_fc_list;
 267        u32 avail_ird;
 268        wait_queue_head_t wait;
 269};
 270
 271static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
 272{
 273        return container_of(ibdev, struct c4iw_dev, ibdev);
 274}
 275
 276static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
 277{
 278        return container_of(rdev, struct c4iw_dev, rdev);
 279}
 280
 281static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
 282{
 283        return idr_find(&rhp->cqidr, cqid);
 284}
 285
 286static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
 287{
 288        return idr_find(&rhp->qpidr, qpid);
 289}
 290
 291static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
 292{
 293        return idr_find(&rhp->mmidr, mmid);
 294}
 295
 296static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
 297                                 void *handle, u32 id, int lock)
 298{
 299        int ret;
 300
 301        if (lock) {
 302                idr_preload(GFP_KERNEL);
 303                spin_lock_irq(&rhp->lock);
 304        }
 305
 306        ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
 307
 308        if (lock) {
 309                spin_unlock_irq(&rhp->lock);
 310                idr_preload_end();
 311        }
 312
 313        BUG_ON(ret == -ENOSPC);
 314        return ret < 0 ? ret : 0;
 315}
 316
 317static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
 318                                void *handle, u32 id)
 319{
 320        return _insert_handle(rhp, idr, handle, id, 1);
 321}
 322
 323static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
 324                                       void *handle, u32 id)
 325{
 326        return _insert_handle(rhp, idr, handle, id, 0);
 327}
 328
 329static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
 330                                   u32 id, int lock)
 331{
 332        if (lock)
 333                spin_lock_irq(&rhp->lock);
 334        idr_remove(idr, id);
 335        if (lock)
 336                spin_unlock_irq(&rhp->lock);
 337}
 338
 339static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
 340{
 341        _remove_handle(rhp, idr, id, 1);
 342}
 343
 344static inline void remove_handle_nolock(struct c4iw_dev *rhp,
 345                                         struct idr *idr, u32 id)
 346{
 347        _remove_handle(rhp, idr, id, 0);
 348}
 349
 350extern uint c4iw_max_read_depth;
 351
 352static inline int cur_max_read_depth(struct c4iw_dev *dev)
 353{
 354        return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
 355}
 356
 357struct c4iw_pd {
 358        struct ib_pd ibpd;
 359        u32 pdid;
 360        struct c4iw_dev *rhp;
 361};
 362
 363static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
 364{
 365        return container_of(ibpd, struct c4iw_pd, ibpd);
 366}
 367
 368struct tpt_attributes {
 369        u64 len;
 370        u64 va_fbo;
 371        enum fw_ri_mem_perms perms;
 372        u32 stag;
 373        u32 pdid;
 374        u32 qpid;
 375        u32 pbl_addr;
 376        u32 pbl_size;
 377        u32 state:1;
 378        u32 type:2;
 379        u32 rsvd:1;
 380        u32 remote_invaliate_disable:1;
 381        u32 zbva:1;
 382        u32 mw_bind_enable:1;
 383        u32 page_size:5;
 384};
 385
 386struct c4iw_mr {
 387        struct ib_mr ibmr;
 388        struct ib_umem *umem;
 389        struct c4iw_dev *rhp;
 390        struct sk_buff *dereg_skb;
 391        u64 kva;
 392        struct tpt_attributes attr;
 393        u64 *mpl;
 394        dma_addr_t mpl_addr;
 395        u32 max_mpl_len;
 396        u32 mpl_len;
 397};
 398
 399static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
 400{
 401        return container_of(ibmr, struct c4iw_mr, ibmr);
 402}
 403
 404struct c4iw_mw {
 405        struct ib_mw ibmw;
 406        struct c4iw_dev *rhp;
 407        struct sk_buff *dereg_skb;
 408        u64 kva;
 409        struct tpt_attributes attr;
 410};
 411
 412static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
 413{
 414        return container_of(ibmw, struct c4iw_mw, ibmw);
 415}
 416
 417struct c4iw_cq {
 418        struct ib_cq ibcq;
 419        struct c4iw_dev *rhp;
 420        struct sk_buff *destroy_skb;
 421        struct t4_cq cq;
 422        spinlock_t lock;
 423        spinlock_t comp_handler_lock;
 424        atomic_t refcnt;
 425        wait_queue_head_t wait;
 426};
 427
 428static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
 429{
 430        return container_of(ibcq, struct c4iw_cq, ibcq);
 431}
 432
 433struct c4iw_mpa_attributes {
 434        u8 initiator;
 435        u8 recv_marker_enabled;
 436        u8 xmit_marker_enabled;
 437        u8 crc_enabled;
 438        u8 enhanced_rdma_conn;
 439        u8 version;
 440        u8 p2p_type;
 441};
 442
 443struct c4iw_qp_attributes {
 444        u32 scq;
 445        u32 rcq;
 446        u32 sq_num_entries;
 447        u32 rq_num_entries;
 448        u32 sq_max_sges;
 449        u32 sq_max_sges_rdma_write;
 450        u32 rq_max_sges;
 451        u32 state;
 452        u8 enable_rdma_read;
 453        u8 enable_rdma_write;
 454        u8 enable_bind;
 455        u8 enable_mmid0_fastreg;
 456        u32 max_ord;
 457        u32 max_ird;
 458        u32 pd;
 459        u32 next_state;
 460        char terminate_buffer[52];
 461        u32 terminate_msg_len;
 462        u8 is_terminate_local;
 463        struct c4iw_mpa_attributes mpa_attr;
 464        struct c4iw_ep *llp_stream_handle;
 465        u8 layer_etype;
 466        u8 ecode;
 467        u16 sq_db_inc;
 468        u16 rq_db_inc;
 469        u8 send_term;
 470};
 471
 472struct c4iw_qp {
 473        struct ib_qp ibqp;
 474        struct list_head db_fc_entry;
 475        struct c4iw_dev *rhp;
 476        struct c4iw_ep *ep;
 477        struct c4iw_qp_attributes attr;
 478        struct t4_wq wq;
 479        spinlock_t lock;
 480        struct mutex mutex;
 481        struct kref kref;
 482        wait_queue_head_t wait;
 483        struct timer_list timer;
 484        int sq_sig_all;
 485        struct work_struct free_work;
 486        struct c4iw_ucontext *ucontext;
 487};
 488
 489static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
 490{
 491        return container_of(ibqp, struct c4iw_qp, ibqp);
 492}
 493
 494struct c4iw_ucontext {
 495        struct ib_ucontext ibucontext;
 496        struct c4iw_dev_ucontext uctx;
 497        u32 key;
 498        spinlock_t mmap_lock;
 499        struct list_head mmaps;
 500        struct kref kref;
 501};
 502
 503static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
 504{
 505        return container_of(c, struct c4iw_ucontext, ibucontext);
 506}
 507
 508void _c4iw_free_ucontext(struct kref *kref);
 509
 510static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
 511{
 512        kref_put(&ucontext->kref, _c4iw_free_ucontext);
 513}
 514
 515static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
 516{
 517        kref_get(&ucontext->kref);
 518}
 519
 520struct c4iw_mm_entry {
 521        struct list_head entry;
 522        u64 addr;
 523        u32 key;
 524        unsigned len;
 525};
 526
 527static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
 528                                                u32 key, unsigned len)
 529{
 530        struct list_head *pos, *nxt;
 531        struct c4iw_mm_entry *mm;
 532
 533        spin_lock(&ucontext->mmap_lock);
 534        list_for_each_safe(pos, nxt, &ucontext->mmaps) {
 535
 536                mm = list_entry(pos, struct c4iw_mm_entry, entry);
 537                if (mm->key == key && mm->len == len) {
 538                        list_del_init(&mm->entry);
 539                        spin_unlock(&ucontext->mmap_lock);
 540                        pr_debug("%s key 0x%x addr 0x%llx len %d\n",
 541                                 __func__, key,
 542                                 (unsigned long long)mm->addr, mm->len);
 543                        return mm;
 544                }
 545        }
 546        spin_unlock(&ucontext->mmap_lock);
 547        return NULL;
 548}
 549
 550static inline void insert_mmap(struct c4iw_ucontext *ucontext,
 551                               struct c4iw_mm_entry *mm)
 552{
 553        spin_lock(&ucontext->mmap_lock);
 554        pr_debug("%s key 0x%x addr 0x%llx len %d\n",
 555                 __func__, mm->key, (unsigned long long)mm->addr, mm->len);
 556        list_add_tail(&mm->entry, &ucontext->mmaps);
 557        spin_unlock(&ucontext->mmap_lock);
 558}
 559
 560enum c4iw_qp_attr_mask {
 561        C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
 562        C4IW_QP_ATTR_SQ_DB = 1<<1,
 563        C4IW_QP_ATTR_RQ_DB = 1<<2,
 564        C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
 565        C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
 566        C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
 567        C4IW_QP_ATTR_MAX_ORD = 1 << 11,
 568        C4IW_QP_ATTR_MAX_IRD = 1 << 12,
 569        C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
 570        C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
 571        C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
 572        C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
 573        C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
 574                                     C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
 575                                     C4IW_QP_ATTR_MAX_ORD |
 576                                     C4IW_QP_ATTR_MAX_IRD |
 577                                     C4IW_QP_ATTR_LLP_STREAM_HANDLE |
 578                                     C4IW_QP_ATTR_STREAM_MSG_BUFFER |
 579                                     C4IW_QP_ATTR_MPA_ATTR |
 580                                     C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
 581};
 582
 583int c4iw_modify_qp(struct c4iw_dev *rhp,
 584                                struct c4iw_qp *qhp,
 585                                enum c4iw_qp_attr_mask mask,
 586                                struct c4iw_qp_attributes *attrs,
 587                                int internal);
 588
 589enum c4iw_qp_state {
 590        C4IW_QP_STATE_IDLE,
 591        C4IW_QP_STATE_RTS,
 592        C4IW_QP_STATE_ERROR,
 593        C4IW_QP_STATE_TERMINATE,
 594        C4IW_QP_STATE_CLOSING,
 595        C4IW_QP_STATE_TOT
 596};
 597
 598static inline int c4iw_convert_state(enum ib_qp_state ib_state)
 599{
 600        switch (ib_state) {
 601        case IB_QPS_RESET:
 602        case IB_QPS_INIT:
 603                return C4IW_QP_STATE_IDLE;
 604        case IB_QPS_RTS:
 605                return C4IW_QP_STATE_RTS;
 606        case IB_QPS_SQD:
 607                return C4IW_QP_STATE_CLOSING;
 608        case IB_QPS_SQE:
 609                return C4IW_QP_STATE_TERMINATE;
 610        case IB_QPS_ERR:
 611                return C4IW_QP_STATE_ERROR;
 612        default:
 613                return -1;
 614        }
 615}
 616
 617static inline int to_ib_qp_state(int c4iw_qp_state)
 618{
 619        switch (c4iw_qp_state) {
 620        case C4IW_QP_STATE_IDLE:
 621                return IB_QPS_INIT;
 622        case C4IW_QP_STATE_RTS:
 623                return IB_QPS_RTS;
 624        case C4IW_QP_STATE_CLOSING:
 625                return IB_QPS_SQD;
 626        case C4IW_QP_STATE_TERMINATE:
 627                return IB_QPS_SQE;
 628        case C4IW_QP_STATE_ERROR:
 629                return IB_QPS_ERR;
 630        }
 631        return IB_QPS_ERR;
 632}
 633
 634#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
 635
 636static inline u32 c4iw_ib_to_tpt_access(int a)
 637{
 638        return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
 639               (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
 640               (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
 641               FW_RI_MEM_ACCESS_LOCAL_READ;
 642}
 643
 644static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
 645{
 646        return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
 647               (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
 648}
 649
 650enum c4iw_mmid_state {
 651        C4IW_STAG_STATE_VALID,
 652        C4IW_STAG_STATE_INVALID
 653};
 654
 655#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
 656
 657#define MPA_KEY_REQ "MPA ID Req Frame"
 658#define MPA_KEY_REP "MPA ID Rep Frame"
 659
 660#define MPA_MAX_PRIVATE_DATA    256
 661#define MPA_ENHANCED_RDMA_CONN  0x10
 662#define MPA_REJECT              0x20
 663#define MPA_CRC                 0x40
 664#define MPA_MARKERS             0x80
 665#define MPA_FLAGS_MASK          0xE0
 666
 667#define MPA_V2_PEER2PEER_MODEL          0x8000
 668#define MPA_V2_ZERO_LEN_FPDU_RTR        0x4000
 669#define MPA_V2_RDMA_WRITE_RTR           0x8000
 670#define MPA_V2_RDMA_READ_RTR            0x4000
 671#define MPA_V2_IRD_ORD_MASK             0x3FFF
 672
 673#define c4iw_put_ep(ep) {                                               \
 674        pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n",                \
 675                 __func__, __LINE__,                                    \
 676                 ep, kref_read(&((ep)->kref)));                         \
 677        WARN_ON(kref_read(&((ep)->kref)) < 1);                          \
 678        kref_put(&((ep)->kref), _c4iw_free_ep);                         \
 679}
 680
 681#define c4iw_get_ep(ep) {                                               \
 682        pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n",               \
 683                 __func__, __LINE__,                                    \
 684                 ep, kref_read(&((ep)->kref)));                         \
 685        kref_get(&((ep)->kref));                                        \
 686}
 687void _c4iw_free_ep(struct kref *kref);
 688
 689struct mpa_message {
 690        u8 key[16];
 691        u8 flags;
 692        u8 revision;
 693        __be16 private_data_size;
 694        u8 private_data[0];
 695};
 696
 697struct mpa_v2_conn_params {
 698        __be16 ird;
 699        __be16 ord;
 700};
 701
 702struct terminate_message {
 703        u8 layer_etype;
 704        u8 ecode;
 705        __be16 hdrct_rsvd;
 706        u8 len_hdrs[0];
 707};
 708
 709#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
 710
 711enum c4iw_layers_types {
 712        LAYER_RDMAP             = 0x00,
 713        LAYER_DDP               = 0x10,
 714        LAYER_MPA               = 0x20,
 715        RDMAP_LOCAL_CATA        = 0x00,
 716        RDMAP_REMOTE_PROT       = 0x01,
 717        RDMAP_REMOTE_OP         = 0x02,
 718        DDP_LOCAL_CATA          = 0x00,
 719        DDP_TAGGED_ERR          = 0x01,
 720        DDP_UNTAGGED_ERR        = 0x02,
 721        DDP_LLP                 = 0x03
 722};
 723
 724enum c4iw_rdma_ecodes {
 725        RDMAP_INV_STAG          = 0x00,
 726        RDMAP_BASE_BOUNDS       = 0x01,
 727        RDMAP_ACC_VIOL          = 0x02,
 728        RDMAP_STAG_NOT_ASSOC    = 0x03,
 729        RDMAP_TO_WRAP           = 0x04,
 730        RDMAP_INV_VERS          = 0x05,
 731        RDMAP_INV_OPCODE        = 0x06,
 732        RDMAP_STREAM_CATA       = 0x07,
 733        RDMAP_GLOBAL_CATA       = 0x08,
 734        RDMAP_CANT_INV_STAG     = 0x09,
 735        RDMAP_UNSPECIFIED       = 0xff
 736};
 737
 738enum c4iw_ddp_ecodes {
 739        DDPT_INV_STAG           = 0x00,
 740        DDPT_BASE_BOUNDS        = 0x01,
 741        DDPT_STAG_NOT_ASSOC     = 0x02,
 742        DDPT_TO_WRAP            = 0x03,
 743        DDPT_INV_VERS           = 0x04,
 744        DDPU_INV_QN             = 0x01,
 745        DDPU_INV_MSN_NOBUF      = 0x02,
 746        DDPU_INV_MSN_RANGE      = 0x03,
 747        DDPU_INV_MO             = 0x04,
 748        DDPU_MSG_TOOBIG         = 0x05,
 749        DDPU_INV_VERS           = 0x06
 750};
 751
 752enum c4iw_mpa_ecodes {
 753        MPA_CRC_ERR             = 0x02,
 754        MPA_MARKER_ERR          = 0x03,
 755        MPA_LOCAL_CATA          = 0x05,
 756        MPA_INSUFF_IRD          = 0x06,
 757        MPA_NOMATCH_RTR         = 0x07,
 758};
 759
 760enum c4iw_ep_state {
 761        IDLE = 0,
 762        LISTEN,
 763        CONNECTING,
 764        MPA_REQ_WAIT,
 765        MPA_REQ_SENT,
 766        MPA_REQ_RCVD,
 767        MPA_REP_SENT,
 768        FPDU_MODE,
 769        ABORTING,
 770        CLOSING,
 771        MORIBUND,
 772        DEAD,
 773};
 774
 775enum c4iw_ep_flags {
 776        PEER_ABORT_IN_PROGRESS  = 0,
 777        ABORT_REQ_IN_PROGRESS   = 1,
 778        RELEASE_RESOURCES       = 2,
 779        CLOSE_SENT              = 3,
 780        TIMEOUT                 = 4,
 781        QP_REFERENCED           = 5,
 782        STOP_MPA_TIMER          = 7,
 783};
 784
 785enum c4iw_ep_history {
 786        ACT_OPEN_REQ            = 0,
 787        ACT_OFLD_CONN           = 1,
 788        ACT_OPEN_RPL            = 2,
 789        ACT_ESTAB               = 3,
 790        PASS_ACCEPT_REQ         = 4,
 791        PASS_ESTAB              = 5,
 792        ABORT_UPCALL            = 6,
 793        ESTAB_UPCALL            = 7,
 794        CLOSE_UPCALL            = 8,
 795        ULP_ACCEPT              = 9,
 796        ULP_REJECT              = 10,
 797        TIMEDOUT                = 11,
 798        PEER_ABORT              = 12,
 799        PEER_CLOSE              = 13,
 800        CONNREQ_UPCALL          = 14,
 801        ABORT_CONN              = 15,
 802        DISCONN_UPCALL          = 16,
 803        EP_DISC_CLOSE           = 17,
 804        EP_DISC_ABORT           = 18,
 805        CONN_RPL_UPCALL         = 19,
 806        ACT_RETRY_NOMEM         = 20,
 807        ACT_RETRY_INUSE         = 21,
 808        CLOSE_CON_RPL           = 22,
 809        EP_DISC_FAIL            = 24,
 810        QP_REFED                = 25,
 811        QP_DEREFED              = 26,
 812        CM_ID_REFED             = 27,
 813        CM_ID_DEREFED           = 28,
 814};
 815
 816enum conn_pre_alloc_buffers {
 817        CN_ABORT_REQ_BUF,
 818        CN_ABORT_RPL_BUF,
 819        CN_CLOSE_CON_REQ_BUF,
 820        CN_DESTROY_BUF,
 821        CN_FLOWC_BUF,
 822        CN_MAX_CON_BUF
 823};
 824
 825#define FLOWC_LEN 80
 826union cpl_wr_size {
 827        struct cpl_abort_req abrt_req;
 828        struct cpl_abort_rpl abrt_rpl;
 829        struct fw_ri_wr ri_req;
 830        struct cpl_close_con_req close_req;
 831        char flowc_buf[FLOWC_LEN];
 832};
 833
 834struct c4iw_ep_common {
 835        struct iw_cm_id *cm_id;
 836        struct c4iw_qp *qp;
 837        struct c4iw_dev *dev;
 838        struct sk_buff_head ep_skb_list;
 839        enum c4iw_ep_state state;
 840        struct kref kref;
 841        struct mutex mutex;
 842        struct sockaddr_storage local_addr;
 843        struct sockaddr_storage remote_addr;
 844        struct c4iw_wr_wait wr_wait;
 845        unsigned long flags;
 846        unsigned long history;
 847};
 848
 849struct c4iw_listen_ep {
 850        struct c4iw_ep_common com;
 851        unsigned int stid;
 852        int backlog;
 853};
 854
 855struct c4iw_ep_stats {
 856        unsigned connect_neg_adv;
 857        unsigned abort_neg_adv;
 858};
 859
 860struct c4iw_ep {
 861        struct c4iw_ep_common com;
 862        struct c4iw_ep *parent_ep;
 863        struct timer_list timer;
 864        struct list_head entry;
 865        unsigned int atid;
 866        u32 hwtid;
 867        u32 snd_seq;
 868        u32 rcv_seq;
 869        struct l2t_entry *l2t;
 870        struct dst_entry *dst;
 871        struct sk_buff *mpa_skb;
 872        struct c4iw_mpa_attributes mpa_attr;
 873        u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
 874        unsigned int mpa_pkt_len;
 875        u32 ird;
 876        u32 ord;
 877        u32 smac_idx;
 878        u32 tx_chan;
 879        u32 mtu;
 880        u16 mss;
 881        u16 emss;
 882        u16 plen;
 883        u16 rss_qid;
 884        u16 txq_idx;
 885        u16 ctrlq_idx;
 886        u8 tos;
 887        u8 retry_with_mpa_v1;
 888        u8 tried_with_mpa_v1;
 889        unsigned int retry_count;
 890        int snd_win;
 891        int rcv_win;
 892        struct c4iw_ep_stats stats;
 893};
 894
 895static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
 896{
 897        return cm_id->provider_data;
 898}
 899
 900static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
 901{
 902        return cm_id->provider_data;
 903}
 904
 905static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
 906{
 907#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
 908        return infop->vr->ocq.size > 0;
 909#else
 910        return 0;
 911#endif
 912}
 913
 914u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
 915void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
 916int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
 917                        u32 reserved, u32 flags);
 918void c4iw_id_table_free(struct c4iw_id_table *alloc);
 919
 920typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
 921
 922int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
 923                     struct l2t_entry *l2t);
 924void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
 925                   struct c4iw_dev_ucontext *uctx);
 926u32 c4iw_get_resource(struct c4iw_id_table *id_table);
 927void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
 928int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
 929int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
 930int c4iw_pblpool_create(struct c4iw_rdev *rdev);
 931int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
 932int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
 933void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
 934void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
 935void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
 936void c4iw_destroy_resource(struct c4iw_resource *rscp);
 937int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
 938int c4iw_register_device(struct c4iw_dev *dev);
 939void c4iw_unregister_device(struct c4iw_dev *dev);
 940int __init c4iw_cm_init(void);
 941void c4iw_cm_term(void);
 942void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
 943                               struct c4iw_dev_ucontext *uctx);
 944void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
 945                            struct c4iw_dev_ucontext *uctx);
 946int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 947int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 948                      struct ib_send_wr **bad_wr);
 949int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 950                      struct ib_recv_wr **bad_wr);
 951int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
 952int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
 953int c4iw_destroy_listen(struct iw_cm_id *cm_id);
 954int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
 955int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
 956void c4iw_qp_add_ref(struct ib_qp *qp);
 957void c4iw_qp_rem_ref(struct ib_qp *qp);
 958struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
 959                            enum ib_mr_type mr_type,
 960                            u32 max_num_sg);
 961int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
 962                   unsigned int *sg_offset);
 963int c4iw_dealloc_mw(struct ib_mw *mw);
 964struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 965                            struct ib_udata *udata);
 966struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
 967                                           u64 length, u64 virt, int acc,
 968                                           struct ib_udata *udata);
 969struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
 970int c4iw_dereg_mr(struct ib_mr *ib_mr);
 971int c4iw_destroy_cq(struct ib_cq *ib_cq);
 972struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
 973                             const struct ib_cq_init_attr *attr,
 974                             struct ib_ucontext *ib_context,
 975                             struct ib_udata *udata);
 976int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
 977int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
 978int c4iw_destroy_qp(struct ib_qp *ib_qp);
 979struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
 980                             struct ib_qp_init_attr *attrs,
 981                             struct ib_udata *udata);
 982int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 983                                 int attr_mask, struct ib_udata *udata);
 984int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 985                     int attr_mask, struct ib_qp_init_attr *init_attr);
 986struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
 987u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
 988void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 989u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
 990void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 991u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
 992void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 993int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
 994void c4iw_flush_hw_cq(struct c4iw_cq *chp);
 995void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
 996int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
 997int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
 998int c4iw_flush_sq(struct c4iw_qp *qhp);
 999int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
1000u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
1001int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
1002u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
1003void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
1004                struct c4iw_dev_ucontext *uctx);
1005u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
1006void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
1007                struct c4iw_dev_ucontext *uctx);
1008void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
1009
1010extern struct cxgb4_client t4c_client;
1011extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
1012void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
1013                              enum cxgb4_bar2_qtype qtype,
1014                              unsigned int *pbar2_qid, u64 *pbar2_pa);
1015extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
1016extern int c4iw_wr_log;
1017extern int db_fc_threshold;
1018extern int db_coalescing_threshold;
1019extern int use_dsgl;
1020void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
1021
1022#endif
1023