linux/drivers/infiniband/sw/rxe/rxe_verbs.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
   2/*
   3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   5 */
   6
   7#ifndef RXE_VERBS_H
   8#define RXE_VERBS_H
   9
  10#include <linux/interrupt.h>
  11#include <linux/workqueue.h>
  12#include <rdma/rdma_user_rxe.h>
  13#include "rxe_pool.h"
  14#include "rxe_task.h"
  15#include "rxe_hw_counters.h"
  16
  17static inline int pkey_match(u16 key1, u16 key2)
  18{
  19        return (((key1 & 0x7fff) != 0) &&
  20                ((key1 & 0x7fff) == (key2 & 0x7fff)) &&
  21                ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
  22}
  23
  24/* Return >0 if psn_a > psn_b
  25 *         0 if psn_a == psn_b
  26 *        <0 if psn_a < psn_b
  27 */
  28static inline int psn_compare(u32 psn_a, u32 psn_b)
  29{
  30        s32 diff;
  31
  32        diff = (psn_a - psn_b) << 8;
  33        return diff;
  34}
  35
  36struct rxe_ucontext {
  37        struct ib_ucontext ibuc;
  38        struct rxe_pool_elem    elem;
  39};
  40
  41struct rxe_pd {
  42        struct ib_pd            ibpd;
  43        struct rxe_pool_elem    elem;
  44};
  45
  46struct rxe_ah {
  47        struct ib_ah            ibah;
  48        struct rxe_pool_elem    elem;
  49        struct rxe_av           av;
  50        bool                    is_user;
  51        int                     ah_num;
  52};
  53
  54struct rxe_cqe {
  55        union {
  56                struct ib_wc            ibwc;
  57                struct ib_uverbs_wc     uibwc;
  58        };
  59};
  60
  61struct rxe_cq {
  62        struct ib_cq            ibcq;
  63        struct rxe_pool_elem    elem;
  64        struct rxe_queue        *queue;
  65        spinlock_t              cq_lock;
  66        u8                      notify;
  67        bool                    is_dying;
  68        bool                    is_user;
  69        struct tasklet_struct   comp_task;
  70};
  71
  72enum wqe_state {
  73        wqe_state_posted,
  74        wqe_state_processing,
  75        wqe_state_pending,
  76        wqe_state_done,
  77        wqe_state_error,
  78};
  79
  80struct rxe_sq {
  81        int                     max_wr;
  82        int                     max_sge;
  83        int                     max_inline;
  84        spinlock_t              sq_lock; /* guard queue */
  85        struct rxe_queue        *queue;
  86};
  87
  88struct rxe_rq {
  89        int                     max_wr;
  90        int                     max_sge;
  91        spinlock_t              producer_lock; /* guard queue producer */
  92        spinlock_t              consumer_lock; /* guard queue consumer */
  93        struct rxe_queue        *queue;
  94};
  95
  96struct rxe_srq {
  97        struct ib_srq           ibsrq;
  98        struct rxe_pool_elem    elem;
  99        struct rxe_pd           *pd;
 100        struct rxe_rq           rq;
 101        u32                     srq_num;
 102
 103        int                     limit;
 104        int                     error;
 105};
 106
 107enum rxe_qp_state {
 108        QP_STATE_RESET,
 109        QP_STATE_INIT,
 110        QP_STATE_READY,
 111        QP_STATE_DRAIN,         /* req only */
 112        QP_STATE_DRAINED,       /* req only */
 113        QP_STATE_ERROR
 114};
 115
 116struct rxe_req_info {
 117        enum rxe_qp_state       state;
 118        int                     wqe_index;
 119        u32                     psn;
 120        int                     opcode;
 121        atomic_t                rd_atomic;
 122        int                     wait_fence;
 123        int                     need_rd_atomic;
 124        int                     wait_psn;
 125        int                     need_retry;
 126        int                     noack_pkts;
 127        struct rxe_task         task;
 128};
 129
 130struct rxe_comp_info {
 131        u32                     psn;
 132        int                     opcode;
 133        int                     timeout;
 134        int                     timeout_retry;
 135        int                     started_retry;
 136        u32                     retry_cnt;
 137        u32                     rnr_retry;
 138        struct rxe_task         task;
 139};
 140
 141enum rdatm_res_state {
 142        rdatm_res_state_next,
 143        rdatm_res_state_new,
 144        rdatm_res_state_replay,
 145};
 146
 147struct resp_res {
 148        int                     type;
 149        int                     replay;
 150        u32                     first_psn;
 151        u32                     last_psn;
 152        u32                     cur_psn;
 153        enum rdatm_res_state    state;
 154
 155        union {
 156                struct {
 157                        struct sk_buff  *skb;
 158                } atomic;
 159                struct {
 160                        struct rxe_mr   *mr;
 161                        u64             va_org;
 162                        u32             rkey;
 163                        u32             length;
 164                        u64             va;
 165                        u32             resid;
 166                } read;
 167        };
 168};
 169
 170struct rxe_resp_info {
 171        enum rxe_qp_state       state;
 172        u32                     msn;
 173        u32                     psn;
 174        u32                     ack_psn;
 175        int                     opcode;
 176        int                     drop_msg;
 177        int                     goto_error;
 178        int                     sent_psn_nak;
 179        enum ib_wc_status       status;
 180        u8                      aeth_syndrome;
 181
 182        /* Receive only */
 183        struct rxe_recv_wqe     *wqe;
 184
 185        /* RDMA read / atomic only */
 186        u64                     va;
 187        u64                     offset;
 188        struct rxe_mr           *mr;
 189        u32                     resid;
 190        u32                     rkey;
 191        u32                     length;
 192        u64                     atomic_orig;
 193
 194        /* SRQ only */
 195        struct {
 196                struct rxe_recv_wqe     wqe;
 197                struct ib_sge           sge[RXE_MAX_SGE];
 198        } srq_wqe;
 199
 200        /* Responder resources. It's a circular list where the oldest
 201         * resource is dropped first.
 202         */
 203        struct resp_res         *resources;
 204        unsigned int            res_head;
 205        unsigned int            res_tail;
 206        struct resp_res         *res;
 207        struct rxe_task         task;
 208};
 209
 210struct rxe_qp {
 211        struct ib_qp            ibqp;
 212        struct rxe_pool_elem    elem;
 213        struct ib_qp_attr       attr;
 214        unsigned int            valid;
 215        unsigned int            mtu;
 216        bool                    is_user;
 217
 218        struct rxe_pd           *pd;
 219        struct rxe_srq          *srq;
 220        struct rxe_cq           *scq;
 221        struct rxe_cq           *rcq;
 222
 223        enum ib_sig_type        sq_sig_type;
 224
 225        struct rxe_sq           sq;
 226        struct rxe_rq           rq;
 227
 228        struct socket           *sk;
 229        u32                     dst_cookie;
 230        u16                     src_port;
 231
 232        struct rxe_av           pri_av;
 233        struct rxe_av           alt_av;
 234
 235        /* list of mcast groups qp has joined (for cleanup) */
 236        struct list_head        grp_list;
 237        spinlock_t              grp_lock; /* guard grp_list */
 238
 239        struct sk_buff_head     req_pkts;
 240        struct sk_buff_head     resp_pkts;
 241
 242        struct rxe_req_info     req;
 243        struct rxe_comp_info    comp;
 244        struct rxe_resp_info    resp;
 245
 246        atomic_t                ssn;
 247        atomic_t                skb_out;
 248        int                     need_req_skb;
 249
 250        /* Timer for retranmitting packet when ACKs have been lost. RC
 251         * only. The requester sets it when it is not already
 252         * started. The responder resets it whenever an ack is
 253         * received.
 254         */
 255        struct timer_list retrans_timer;
 256        u64 qp_timeout_jiffies;
 257
 258        /* Timer for handling RNR NAKS. */
 259        struct timer_list rnr_nak_timer;
 260
 261        spinlock_t              state_lock; /* guard requester and completer */
 262
 263        struct execute_work     cleanup_work;
 264};
 265
 266enum rxe_mr_state {
 267        RXE_MR_STATE_INVALID,
 268        RXE_MR_STATE_FREE,
 269        RXE_MR_STATE_VALID,
 270};
 271
 272enum rxe_mr_copy_dir {
 273        RXE_TO_MR_OBJ,
 274        RXE_FROM_MR_OBJ,
 275};
 276
 277enum rxe_mr_lookup_type {
 278        RXE_LOOKUP_LOCAL,
 279        RXE_LOOKUP_REMOTE,
 280};
 281
 282#define RXE_BUF_PER_MAP         (PAGE_SIZE / sizeof(struct rxe_phys_buf))
 283
 284struct rxe_phys_buf {
 285        u64      addr;
 286        u64      size;
 287};
 288
 289struct rxe_map {
 290        struct rxe_phys_buf     buf[RXE_BUF_PER_MAP];
 291};
 292
 293struct rxe_map_set {
 294        struct rxe_map          **map;
 295        u64                     va;
 296        u64                     iova;
 297        size_t                  length;
 298        u32                     offset;
 299        u32                     nbuf;
 300        int                     page_shift;
 301        int                     page_mask;
 302};
 303
 304static inline int rkey_is_mw(u32 rkey)
 305{
 306        u32 index = rkey >> 8;
 307
 308        return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX);
 309}
 310
 311struct rxe_mr {
 312        struct rxe_pool_elem    elem;
 313        struct ib_mr            ibmr;
 314
 315        struct ib_umem          *umem;
 316
 317        u32                     lkey;
 318        u32                     rkey;
 319        enum rxe_mr_state       state;
 320        enum ib_mr_type         type;
 321        int                     access;
 322
 323        int                     map_shift;
 324        int                     map_mask;
 325
 326        u32                     num_buf;
 327
 328        u32                     max_buf;
 329        u32                     num_map;
 330
 331        atomic_t                num_mw;
 332
 333        struct rxe_map_set      *cur_map_set;
 334        struct rxe_map_set      *next_map_set;
 335};
 336
 337enum rxe_mw_state {
 338        RXE_MW_STATE_INVALID    = RXE_MR_STATE_INVALID,
 339        RXE_MW_STATE_FREE       = RXE_MR_STATE_FREE,
 340        RXE_MW_STATE_VALID      = RXE_MR_STATE_VALID,
 341};
 342
 343struct rxe_mw {
 344        struct ib_mw            ibmw;
 345        struct rxe_pool_elem    elem;
 346        spinlock_t              lock;
 347        enum rxe_mw_state       state;
 348        struct rxe_qp           *qp; /* Type 2 only */
 349        struct rxe_mr           *mr;
 350        u32                     rkey;
 351        int                     access;
 352        u64                     addr;
 353        u64                     length;
 354};
 355
 356struct rxe_mc_grp {
 357        struct rxe_pool_elem    elem;
 358        spinlock_t              mcg_lock; /* guard group */
 359        struct rxe_dev          *rxe;
 360        struct list_head        qp_list;
 361        union ib_gid            mgid;
 362        int                     num_qp;
 363        u32                     qkey;
 364        u16                     pkey;
 365};
 366
 367struct rxe_mc_elem {
 368        struct rxe_pool_elem    elem;
 369        struct list_head        qp_list;
 370        struct list_head        grp_list;
 371        struct rxe_qp           *qp;
 372        struct rxe_mc_grp       *grp;
 373};
 374
 375struct rxe_port {
 376        struct ib_port_attr     attr;
 377        __be64                  port_guid;
 378        __be64                  subnet_prefix;
 379        spinlock_t              port_lock; /* guard port */
 380        unsigned int            mtu_cap;
 381        /* special QPs */
 382        u32                     qp_smi_index;
 383        u32                     qp_gsi_index;
 384};
 385
 386struct rxe_dev {
 387        struct ib_device        ib_dev;
 388        struct ib_device_attr   attr;
 389        int                     max_ucontext;
 390        int                     max_inline_data;
 391        struct mutex    usdev_lock;
 392
 393        struct net_device       *ndev;
 394
 395        struct rxe_pool         uc_pool;
 396        struct rxe_pool         pd_pool;
 397        struct rxe_pool         ah_pool;
 398        struct rxe_pool         srq_pool;
 399        struct rxe_pool         qp_pool;
 400        struct rxe_pool         cq_pool;
 401        struct rxe_pool         mr_pool;
 402        struct rxe_pool         mw_pool;
 403        struct rxe_pool         mc_grp_pool;
 404        struct rxe_pool         mc_elem_pool;
 405
 406        spinlock_t              pending_lock; /* guard pending_mmaps */
 407        struct list_head        pending_mmaps;
 408
 409        spinlock_t              mmap_offset_lock; /* guard mmap_offset */
 410        u64                     mmap_offset;
 411
 412        atomic64_t              stats_counters[RXE_NUM_OF_COUNTERS];
 413
 414        struct rxe_port         port;
 415        struct crypto_shash     *tfm;
 416};
 417
 418static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
 419{
 420        atomic64_inc(&rxe->stats_counters[index]);
 421}
 422
 423static inline struct rxe_dev *to_rdev(struct ib_device *dev)
 424{
 425        return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
 426}
 427
 428static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
 429{
 430        return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
 431}
 432
 433static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
 434{
 435        return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
 436}
 437
 438static inline struct rxe_ah *to_rah(struct ib_ah *ah)
 439{
 440        return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
 441}
 442
 443static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
 444{
 445        return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
 446}
 447
 448static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
 449{
 450        return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
 451}
 452
 453static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
 454{
 455        return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
 456}
 457
 458static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
 459{
 460        return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
 461}
 462
 463static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
 464{
 465        return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
 466}
 467
 468static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah)
 469{
 470        return to_rpd(ah->ibah.pd);
 471}
 472
 473static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
 474{
 475        return to_rpd(mr->ibmr.pd);
 476}
 477
 478static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
 479{
 480        return to_rpd(mw->ibmw.pd);
 481}
 482
 483int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
 484
 485void rxe_mc_cleanup(struct rxe_pool_elem *elem);
 486
 487#endif /* RXE_VERBS_H */
 488