linux/drivers/infiniband/hw/cxgb3/iwch.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef __IWCH_H__
  33#define __IWCH_H__
  34
  35#include <linux/mutex.h>
  36#include <linux/list.h>
  37#include <linux/spinlock.h>
  38#include <linux/idr.h>
  39#include <linux/workqueue.h>
  40
  41#include <rdma/ib_verbs.h>
  42
  43#include "cxio_hal.h"
  44#include "cxgb3_offload.h"
  45
  46struct iwch_pd;
  47struct iwch_cq;
  48struct iwch_qp;
  49struct iwch_mr;
  50
  51struct iwch_rnic_attributes {
  52        u32 max_qps;
  53        u32 max_wrs;                            /* Max for any SQ/RQ */
  54        u32 max_sge_per_wr;
  55        u32 max_sge_per_rdma_write_wr;  /* for RDMA Write WR */
  56        u32 max_cqs;
  57        u32 max_cqes_per_cq;
  58        u32 max_mem_regs;
  59        u32 max_phys_buf_entries;               /* for phys buf list */
  60        u32 max_pds;
  61
  62        /*
  63         * The memory page sizes supported by this RNIC.
  64         * Bit position i in bitmap indicates page of
  65         * size (4k)^i.  Phys block list mode unsupported.
  66         */
  67        u32 mem_pgsizes_bitmask;
  68        u64 max_mr_size;
  69        u8 can_resize_wq;
  70
  71        /*
  72         * The maximum number of RDMA Reads that can be outstanding
  73         * per QP with this RNIC as the target.
  74         */
  75        u32 max_rdma_reads_per_qp;
  76
  77        /*
  78         * The maximum number of resources used for RDMA Reads
  79         * by this RNIC with this RNIC as the target.
  80         */
  81        u32 max_rdma_read_resources;
  82
  83        /*
  84         * The max depth per QP for initiation of RDMA Read
  85         * by this RNIC.
  86         */
  87        u32 max_rdma_read_qp_depth;
  88
  89        /*
  90         * The maximum depth for initiation of RDMA Read
  91         * operations by this RNIC on all QPs
  92         */
  93        u32 max_rdma_read_depth;
  94        u8 rq_overflow_handled;
  95        u32 can_modify_ird;
  96        u32 can_modify_ord;
  97        u32 max_mem_windows;
  98        u32 stag0_value;
  99        u8 zbva_support;
 100        u8 local_invalidate_fence;
 101        u32 cq_overflow_detection;
 102};
 103
 104struct iwch_dev {
 105        struct ib_device ibdev;
 106        struct cxio_rdev rdev;
 107        u32 device_cap_flags;
 108        struct iwch_rnic_attributes attr;
 109        struct idr cqidr;
 110        struct idr qpidr;
 111        struct idr mmidr;
 112        spinlock_t lock;
 113        struct list_head entry;
 114        struct delayed_work db_drop_task;
 115};
 116
 117static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
 118{
 119        return container_of(ibdev, struct iwch_dev, ibdev);
 120}
 121
 122static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev)
 123{
 124        return container_of(rdev, struct iwch_dev, rdev);
 125}
 126
 127static inline int t3b_device(const struct iwch_dev *rhp)
 128{
 129        return rhp->rdev.t3cdev_p->type == T3B;
 130}
 131
 132static inline int t3a_device(const struct iwch_dev *rhp)
 133{
 134        return rhp->rdev.t3cdev_p->type == T3A;
 135}
 136
 137static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
 138{
 139        return idr_find(&rhp->cqidr, cqid);
 140}
 141
 142static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
 143{
 144        return idr_find(&rhp->qpidr, qpid);
 145}
 146
 147static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
 148{
 149        return idr_find(&rhp->mmidr, mmid);
 150}
 151
 152static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
 153                                void *handle, u32 id)
 154{
 155        int ret;
 156
 157        idr_preload(GFP_KERNEL);
 158        spin_lock_irq(&rhp->lock);
 159
 160        ret = idr_alloc(idr, handle, id, id + 1, GFP_NOWAIT);
 161
 162        spin_unlock_irq(&rhp->lock);
 163        idr_preload_end();
 164
 165        BUG_ON(ret == -ENOSPC);
 166        return ret < 0 ? ret : 0;
 167}
 168
 169static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
 170{
 171        spin_lock_irq(&rhp->lock);
 172        idr_remove(idr, id);
 173        spin_unlock_irq(&rhp->lock);
 174}
 175
 176extern struct cxgb3_client t3c_client;
 177extern cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
 178extern void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb);
 179
 180#endif
 181