linux/drivers/infiniband/hw/cxgb3/iwch.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#ifndef __IWCH_H__
  33#define __IWCH_H__
  34
  35#include <linux/mutex.h>
  36#include <linux/list.h>
  37#include <linux/spinlock.h>
  38#include <linux/xarray.h>
  39#include <linux/workqueue.h>
  40
  41#include <rdma/ib_verbs.h>
  42
  43#include "cxio_hal.h"
  44#include "cxgb3_offload.h"
  45
  46struct iwch_pd;
  47struct iwch_cq;
  48struct iwch_qp;
  49struct iwch_mr;
  50
  51struct iwch_rnic_attributes {
  52        u32 max_qps;
  53        u32 max_wrs;                            /* Max for any SQ/RQ */
  54        u32 max_sge_per_wr;
  55        u32 max_sge_per_rdma_write_wr;  /* for RDMA Write WR */
  56        u32 max_cqs;
  57        u32 max_cqes_per_cq;
  58        u32 max_mem_regs;
  59        u32 max_phys_buf_entries;               /* for phys buf list */
  60        u32 max_pds;
  61
  62        /*
  63         * The memory page sizes supported by this RNIC.
  64         * Bit position i in bitmap indicates page of
  65         * size (4k)^i.  Phys block list mode unsupported.
  66         */
  67        u32 mem_pgsizes_bitmask;
  68        u64 max_mr_size;
  69        u8 can_resize_wq;
  70
  71        /*
  72         * The maximum number of RDMA Reads that can be outstanding
  73         * per QP with this RNIC as the target.
  74         */
  75        u32 max_rdma_reads_per_qp;
  76
  77        /*
  78         * The maximum number of resources used for RDMA Reads
  79         * by this RNIC with this RNIC as the target.
  80         */
  81        u32 max_rdma_read_resources;
  82
  83        /*
  84         * The max depth per QP for initiation of RDMA Read
  85         * by this RNIC.
  86         */
  87        u32 max_rdma_read_qp_depth;
  88
  89        /*
  90         * The maximum depth for initiation of RDMA Read
  91         * operations by this RNIC on all QPs
  92         */
  93        u32 max_rdma_read_depth;
  94        u8 rq_overflow_handled;
  95        u32 can_modify_ird;
  96        u32 can_modify_ord;
  97        u32 max_mem_windows;
  98        u32 stag0_value;
  99        u8 zbva_support;
 100        u8 local_invalidate_fence;
 101        u32 cq_overflow_detection;
 102};
 103
 104struct iwch_dev {
 105        struct ib_device ibdev;
 106        struct cxio_rdev rdev;
 107        u32 device_cap_flags;
 108        struct iwch_rnic_attributes attr;
 109        struct xarray cqs;
 110        struct xarray qps;
 111        struct xarray mrs;
 112        struct list_head entry;
 113        struct delayed_work db_drop_task;
 114};
 115
 116static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
 117{
 118        return container_of(ibdev, struct iwch_dev, ibdev);
 119}
 120
 121static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev)
 122{
 123        return container_of(rdev, struct iwch_dev, rdev);
 124}
 125
 126static inline int t3b_device(const struct iwch_dev *rhp)
 127{
 128        return rhp->rdev.t3cdev_p->type == T3B;
 129}
 130
 131static inline int t3a_device(const struct iwch_dev *rhp)
 132{
 133        return rhp->rdev.t3cdev_p->type == T3A;
 134}
 135
 136static inline struct iwch_cq *get_chp(struct iwch_dev *rhp, u32 cqid)
 137{
 138        return xa_load(&rhp->cqs, cqid);
 139}
 140
 141static inline struct iwch_qp *get_qhp(struct iwch_dev *rhp, u32 qpid)
 142{
 143        return xa_load(&rhp->qps, qpid);
 144}
 145
 146static inline struct iwch_mr *get_mhp(struct iwch_dev *rhp, u32 mmid)
 147{
 148        return xa_load(&rhp->mrs, mmid);
 149}
 150
 151extern struct cxgb3_client t3c_client;
 152extern cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
 153extern void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb);
 154
 155#endif
 156