linux/drivers/infiniband/sw/rxe/rxe_loc.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#ifndef RXE_LOC_H
  35#define RXE_LOC_H
  36
  37/* rxe_av.c */
  38void rxe_init_av(struct rdma_ah_attr *attr, struct rxe_av *av);
  39
  40int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr);
  41
  42void rxe_av_from_attr(u8 port_num, struct rxe_av *av,
  43                     struct rdma_ah_attr *attr);
  44
  45void rxe_av_to_attr(struct rxe_av *av, struct rdma_ah_attr *attr);
  46
  47void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr);
  48
  49struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
  50
  51/* rxe_cq.c */
  52int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
  53                    int cqe, int comp_vector);
  54
  55int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
  56                     int comp_vector, struct ib_udata *udata,
  57                     struct rxe_create_cq_resp __user *uresp);
  58
  59int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe,
  60                        struct rxe_resize_cq_resp __user *uresp,
  61                        struct ib_udata *udata);
  62
  63int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
  64
  65void rxe_cq_disable(struct rxe_cq *cq);
  66
  67void rxe_cq_cleanup(struct rxe_pool_entry *arg);
  68
  69/* rxe_mcast.c */
  70int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
  71                      struct rxe_mc_grp **grp_p);
  72
  73int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
  74                           struct rxe_mc_grp *grp);
  75
  76int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
  77                            union ib_gid *mgid);
  78
  79void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
  80
  81void rxe_mc_cleanup(struct rxe_pool_entry *arg);
  82
  83/* rxe_mmap.c */
  84struct rxe_mmap_info {
  85        struct list_head        pending_mmaps;
  86        struct ib_ucontext      *context;
  87        struct kref             ref;
  88        void                    *obj;
  89
  90        struct mminfo info;
  91};
  92
  93void rxe_mmap_release(struct kref *ref);
  94
  95struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size,
  96                                           struct ib_udata *udata, void *obj);
  97
  98int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
  99
 100/* rxe_mr.c */
 101enum copy_direction {
 102        to_mem_obj,
 103        from_mem_obj,
 104};
 105
 106int rxe_mem_init_dma(struct rxe_pd *pd,
 107                     int access, struct rxe_mem *mem);
 108
 109int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
 110                      u64 length, u64 iova, int access, struct ib_udata *udata,
 111                      struct rxe_mem *mr);
 112
 113int rxe_mem_init_fast(struct rxe_pd *pd,
 114                      int max_pages, struct rxe_mem *mem);
 115
 116int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
 117                 int length, enum copy_direction dir, u32 *crcp);
 118
 119int copy_data(struct rxe_pd *pd, int access,
 120              struct rxe_dma_info *dma, void *addr, int length,
 121              enum copy_direction dir, u32 *crcp);
 122
 123void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
 124
 125enum lookup_type {
 126        lookup_local,
 127        lookup_remote,
 128};
 129
 130struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
 131                           enum lookup_type type);
 132
 133int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
 134
 135int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
 136                      u64 *page, int num_pages, u64 iova);
 137
 138void rxe_mem_cleanup(struct rxe_pool_entry *arg);
 139
 140int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
 141
 142/* rxe_net.c */
 143void rxe_loopback(struct sk_buff *skb);
 144int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
 145struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
 146                                int paylen, struct rxe_pkt_info *pkt);
 147int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
 148enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num);
 149const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
 150struct device *rxe_dma_device(struct rxe_dev *rxe);
 151int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
 152int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
 153
 154/* rxe_qp.c */
 155int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
 156
 157int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
 158                     struct ib_qp_init_attr *init,
 159                     struct rxe_create_qp_resp __user *uresp,
 160                     struct ib_pd *ibpd, struct ib_udata *udata);
 161
 162int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
 163
 164int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
 165                    struct ib_qp_attr *attr, int mask);
 166
 167int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
 168                     int mask, struct ib_udata *udata);
 169
 170int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
 171
 172void rxe_qp_error(struct rxe_qp *qp);
 173
 174void rxe_qp_destroy(struct rxe_qp *qp);
 175
 176void rxe_qp_cleanup(struct rxe_pool_entry *arg);
 177
 178static inline int qp_num(struct rxe_qp *qp)
 179{
 180        return qp->ibqp.qp_num;
 181}
 182
 183static inline enum ib_qp_type qp_type(struct rxe_qp *qp)
 184{
 185        return qp->ibqp.qp_type;
 186}
 187
 188static inline enum ib_qp_state qp_state(struct rxe_qp *qp)
 189{
 190        return qp->attr.qp_state;
 191}
 192
 193static inline int qp_mtu(struct rxe_qp *qp)
 194{
 195        if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
 196                return qp->attr.path_mtu;
 197        else
 198                return IB_MTU_4096;
 199}
 200
 201static inline int rcv_wqe_size(int max_sge)
 202{
 203        return sizeof(struct rxe_recv_wqe) +
 204                max_sge * sizeof(struct ib_sge);
 205}
 206
 207void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
 208
 209static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
 210{
 211        qp->resp.res_head++;
 212        if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
 213                qp->resp.res_head = 0;
 214}
 215
 216void retransmit_timer(struct timer_list *t);
 217void rnr_nak_timer(struct timer_list *t);
 218
 219/* rxe_srq.c */
 220#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
 221
 222int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
 223                     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
 224
 225int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
 226                      struct ib_srq_init_attr *init, struct ib_udata *udata,
 227                      struct rxe_create_srq_resp __user *uresp);
 228
 229int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
 230                      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
 231                      struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata);
 232
 233void rxe_dealloc(struct ib_device *ib_dev);
 234
 235int rxe_completer(void *arg);
 236int rxe_requester(void *arg);
 237int rxe_responder(void *arg);
 238
 239u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
 240
 241void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
 242
 243void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb);
 244
 245static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
 246{
 247        return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
 248}
 249
 250static inline int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
 251                                  struct sk_buff *skb)
 252{
 253        int err;
 254        int is_request = pkt->mask & RXE_REQ_MASK;
 255        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 256
 257        if ((is_request && (qp->req.state != QP_STATE_READY)) ||
 258            (!is_request && (qp->resp.state != QP_STATE_READY))) {
 259                pr_info("Packet dropped. QP is not in ready state\n");
 260                goto drop;
 261        }
 262
 263        if (pkt->mask & RXE_LOOPBACK_MASK) {
 264                memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
 265                rxe_loopback(skb);
 266                err = 0;
 267        } else {
 268                err = rxe_send(pkt, skb);
 269        }
 270
 271        if (err) {
 272                rxe->xmit_errors++;
 273                rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
 274                return err;
 275        }
 276
 277        if ((qp_type(qp) != IB_QPT_RC) &&
 278            (pkt->mask & RXE_END_MASK)) {
 279                pkt->wqe->state = wqe_state_done;
 280                rxe_run_task(&qp->comp.task, 1);
 281        }
 282
 283        rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
 284        goto done;
 285
 286drop:
 287        kfree_skb(skb);
 288        err = 0;
 289done:
 290        return err;
 291}
 292
 293#endif /* RXE_LOC_H */
 294