linux/drivers/infiniband/hw/mlx5/mlx5_ib.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#ifndef MLX5_IB_H
  34#define MLX5_IB_H
  35
  36#include <linux/kernel.h>
  37#include <linux/sched.h>
  38#include <rdma/ib_verbs.h>
  39#include <rdma/ib_smi.h>
  40#include <linux/mlx5/driver.h>
  41#include <linux/mlx5/cq.h>
  42#include <linux/mlx5/qp.h>
  43#include <linux/mlx5/srq.h>
  44#include <linux/types.h>
  45#include <linux/mlx5/transobj.h>
  46#include <rdma/ib_user_verbs.h>
  47#include <rdma/mlx5-abi.h>
  48
  49#define mlx5_ib_dbg(dev, format, arg...)                                \
  50pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,    \
  51         __LINE__, current->pid, ##arg)
  52
  53#define mlx5_ib_err(dev, format, arg...)                                \
  54pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,      \
  55        __LINE__, current->pid, ##arg)
  56
  57#define mlx5_ib_warn(dev, format, arg...)                               \
  58pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,     \
  59        __LINE__, current->pid, ##arg)
  60
  61#define field_avail(type, fld, sz) (offsetof(type, fld) +               \
  62                                    sizeof(((type *)0)->fld) <= (sz))
  63#define MLX5_IB_DEFAULT_UIDX 0xffffff
  64#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
  65
  66#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
  67
  68enum {
  69        MLX5_IB_MMAP_CMD_SHIFT  = 8,
  70        MLX5_IB_MMAP_CMD_MASK   = 0xff,
  71};
  72
  73enum mlx5_ib_mmap_cmd {
  74        MLX5_IB_MMAP_REGULAR_PAGE               = 0,
  75        MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES       = 1,
  76        MLX5_IB_MMAP_WC_PAGE                    = 2,
  77        MLX5_IB_MMAP_NC_PAGE                    = 3,
  78        /* 5 is chosen in order to be compatible with old versions of libmlx5 */
  79        MLX5_IB_MMAP_CORE_CLOCK                 = 5,
  80};
  81
  82enum {
  83        MLX5_RES_SCAT_DATA32_CQE        = 0x1,
  84        MLX5_RES_SCAT_DATA64_CQE        = 0x2,
  85        MLX5_REQ_SCAT_DATA32_CQE        = 0x11,
  86        MLX5_REQ_SCAT_DATA64_CQE        = 0x22,
  87};
  88
  89enum mlx5_ib_latency_class {
  90        MLX5_IB_LATENCY_CLASS_LOW,
  91        MLX5_IB_LATENCY_CLASS_MEDIUM,
  92        MLX5_IB_LATENCY_CLASS_HIGH,
  93};
  94
  95enum mlx5_ib_mad_ifc_flags {
  96        MLX5_MAD_IFC_IGNORE_MKEY        = 1,
  97        MLX5_MAD_IFC_IGNORE_BKEY        = 2,
  98        MLX5_MAD_IFC_NET_VIEW           = 4,
  99};
 100
 101enum {
 102        MLX5_CROSS_CHANNEL_BFREG         = 0,
 103};
 104
 105enum {
 106        MLX5_CQE_VERSION_V0,
 107        MLX5_CQE_VERSION_V1,
 108};
 109
 110struct mlx5_ib_vma_private_data {
 111        struct list_head list;
 112        struct vm_area_struct *vma;
 113};
 114
 115struct mlx5_ib_ucontext {
 116        struct ib_ucontext      ibucontext;
 117        struct list_head        db_page_list;
 118
 119        /* protect doorbell record alloc/free
 120         */
 121        struct mutex            db_page_mutex;
 122        struct mlx5_bfreg_info  bfregi;
 123        u8                      cqe_version;
 124        /* Transport Domain number */
 125        u32                     tdn;
 126        struct list_head        vma_private_list;
 127
 128        unsigned long           upd_xlt_page;
 129        /* protect ODP/KSM */
 130        struct mutex            upd_xlt_page_mutex;
 131        u64                     lib_caps;
 132};
 133
 134static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
 135{
 136        return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
 137}
 138
 139struct mlx5_ib_pd {
 140        struct ib_pd            ibpd;
 141        u32                     pdn;
 142};
 143
 144#define MLX5_IB_FLOW_MCAST_PRIO         (MLX5_BY_PASS_NUM_PRIOS - 1)
 145#define MLX5_IB_FLOW_LAST_PRIO          (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
 146#if (MLX5_IB_FLOW_LAST_PRIO <= 0)
 147#error "Invalid number of bypass priorities"
 148#endif
 149#define MLX5_IB_FLOW_LEFTOVERS_PRIO     (MLX5_IB_FLOW_MCAST_PRIO + 1)
 150
 151#define MLX5_IB_NUM_FLOW_FT             (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
 152#define MLX5_IB_NUM_SNIFFER_FTS         2
 153struct mlx5_ib_flow_prio {
 154        struct mlx5_flow_table          *flow_table;
 155        unsigned int                    refcount;
 156};
 157
 158struct mlx5_ib_flow_handler {
 159        struct list_head                list;
 160        struct ib_flow                  ibflow;
 161        struct mlx5_ib_flow_prio        *prio;
 162        struct mlx5_flow_handle         *rule;
 163};
 164
 165struct mlx5_ib_flow_db {
 166        struct mlx5_ib_flow_prio        prios[MLX5_IB_NUM_FLOW_FT];
 167        struct mlx5_ib_flow_prio        sniffer[MLX5_IB_NUM_SNIFFER_FTS];
 168        struct mlx5_flow_table          *lag_demux_ft;
 169        /* Protect flow steering bypass flow tables
 170         * when add/del flow rules.
 171         * only single add/removal of flow steering rule could be done
 172         * simultaneously.
 173         */
 174        struct mutex                    lock;
 175};
 176
 177/* Use macros here so that don't have to duplicate
 178 * enum ib_send_flags and enum ib_qp_type for low-level driver
 179 */
 180
 181#define MLX5_IB_SEND_UMR_ENABLE_MR             (IB_SEND_RESERVED_START << 0)
 182#define MLX5_IB_SEND_UMR_DISABLE_MR            (IB_SEND_RESERVED_START << 1)
 183#define MLX5_IB_SEND_UMR_FAIL_IF_FREE          (IB_SEND_RESERVED_START << 2)
 184#define MLX5_IB_SEND_UMR_UPDATE_XLT            (IB_SEND_RESERVED_START << 3)
 185#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION    (IB_SEND_RESERVED_START << 4)
 186#define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS       IB_SEND_RESERVED_END
 187
 188#define MLX5_IB_QPT_REG_UMR     IB_QPT_RESERVED1
 189/*
 190 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
 191 * creates the actual hardware QP.
 192 */
 193#define MLX5_IB_QPT_HW_GSI      IB_QPT_RESERVED2
 194#define MLX5_IB_WR_UMR          IB_WR_RESERVED1
 195
 196#define MLX5_IB_UMR_OCTOWORD           16
 197#define MLX5_IB_UMR_XLT_ALIGNMENT      64
 198
 199#define MLX5_IB_UPD_XLT_ZAP           BIT(0)
 200#define MLX5_IB_UPD_XLT_ENABLE        BIT(1)
 201#define MLX5_IB_UPD_XLT_ATOMIC        BIT(2)
 202#define MLX5_IB_UPD_XLT_ADDR          BIT(3)
 203#define MLX5_IB_UPD_XLT_PD            BIT(4)
 204#define MLX5_IB_UPD_XLT_ACCESS        BIT(5)
 205#define MLX5_IB_UPD_XLT_INDIRECT      BIT(6)
 206
 207/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
 208 *
 209 * These flags are intended for internal use by the mlx5_ib driver, and they
 210 * rely on the range reserved for that use in the ib_qp_create_flags enum.
 211 */
 212
 213/* Create a UD QP whose source QP number is 1 */
 214static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
 215{
 216        return IB_QP_CREATE_RESERVED_START;
 217}
 218
 219struct wr_list {
 220        u16     opcode;
 221        u16     next;
 222};
 223
 224enum mlx5_ib_rq_flags {
 225        MLX5_IB_RQ_CVLAN_STRIPPING      = 1 << 0,
 226};
 227
 228struct mlx5_ib_wq {
 229        u64                    *wrid;
 230        u32                    *wr_data;
 231        struct wr_list         *w_list;
 232        unsigned               *wqe_head;
 233        u16                     unsig_count;
 234
 235        /* serialize post to the work queue
 236         */
 237        spinlock_t              lock;
 238        int                     wqe_cnt;
 239        int                     max_post;
 240        int                     max_gs;
 241        int                     offset;
 242        int                     wqe_shift;
 243        unsigned                head;
 244        unsigned                tail;
 245        u16                     cur_post;
 246        u16                     last_poll;
 247        void                   *qend;
 248};
 249
 250struct mlx5_ib_rwq {
 251        struct ib_wq            ibwq;
 252        struct mlx5_core_qp     core_qp;
 253        u32                     rq_num_pas;
 254        u32                     log_rq_stride;
 255        u32                     log_rq_size;
 256        u32                     rq_page_offset;
 257        u32                     log_page_size;
 258        struct ib_umem          *umem;
 259        size_t                  buf_size;
 260        unsigned int            page_shift;
 261        int                     create_type;
 262        struct mlx5_db          db;
 263        u32                     user_index;
 264        u32                     wqe_count;
 265        u32                     wqe_shift;
 266        int                     wq_sig;
 267};
 268
 269enum {
 270        MLX5_QP_USER,
 271        MLX5_QP_KERNEL,
 272        MLX5_QP_EMPTY
 273};
 274
 275enum {
 276        MLX5_WQ_USER,
 277        MLX5_WQ_KERNEL
 278};
 279
 280struct mlx5_ib_rwq_ind_table {
 281        struct ib_rwq_ind_table ib_rwq_ind_tbl;
 282        u32                     rqtn;
 283};
 284
 285struct mlx5_ib_ubuffer {
 286        struct ib_umem         *umem;
 287        int                     buf_size;
 288        u64                     buf_addr;
 289};
 290
 291struct mlx5_ib_qp_base {
 292        struct mlx5_ib_qp       *container_mibqp;
 293        struct mlx5_core_qp     mqp;
 294        struct mlx5_ib_ubuffer  ubuffer;
 295};
 296
 297struct mlx5_ib_qp_trans {
 298        struct mlx5_ib_qp_base  base;
 299        u16                     xrcdn;
 300        u8                      alt_port;
 301        u8                      atomic_rd_en;
 302        u8                      resp_depth;
 303};
 304
 305struct mlx5_ib_rss_qp {
 306        u32     tirn;
 307};
 308
 309struct mlx5_ib_rq {
 310        struct mlx5_ib_qp_base base;
 311        struct mlx5_ib_wq       *rq;
 312        struct mlx5_ib_ubuffer  ubuffer;
 313        struct mlx5_db          *doorbell;
 314        u32                     tirn;
 315        u8                      state;
 316        u32                     flags;
 317};
 318
 319struct mlx5_ib_sq {
 320        struct mlx5_ib_qp_base base;
 321        struct mlx5_ib_wq       *sq;
 322        struct mlx5_ib_ubuffer  ubuffer;
 323        struct mlx5_db          *doorbell;
 324        u32                     tisn;
 325        u8                      state;
 326};
 327
 328struct mlx5_ib_raw_packet_qp {
 329        struct mlx5_ib_sq sq;
 330        struct mlx5_ib_rq rq;
 331};
 332
 333struct mlx5_bf {
 334        int                     buf_size;
 335        unsigned long           offset;
 336        struct mlx5_sq_bfreg   *bfreg;
 337};
 338
 339struct mlx5_ib_qp {
 340        struct ib_qp            ibqp;
 341        union {
 342                struct mlx5_ib_qp_trans trans_qp;
 343                struct mlx5_ib_raw_packet_qp raw_packet_qp;
 344                struct mlx5_ib_rss_qp rss_qp;
 345        };
 346        struct mlx5_buf         buf;
 347
 348        struct mlx5_db          db;
 349        struct mlx5_ib_wq       rq;
 350
 351        u8                      sq_signal_bits;
 352        u8                      next_fence;
 353        struct mlx5_ib_wq       sq;
 354
 355        /* serialize qp state modifications
 356         */
 357        struct mutex            mutex;
 358        u32                     flags;
 359        u8                      port;
 360        u8                      state;
 361        int                     wq_sig;
 362        int                     scat_cqe;
 363        int                     max_inline_data;
 364        struct mlx5_bf          bf;
 365        int                     has_rq;
 366
 367        /* only for user space QPs. For kernel
 368         * we have it from the bf object
 369         */
 370        int                     bfregn;
 371
 372        int                     create_type;
 373
 374        /* Store signature errors */
 375        bool                    signature_en;
 376
 377        struct list_head        qps_list;
 378        struct list_head        cq_recv_list;
 379        struct list_head        cq_send_list;
 380        u32                     rate_limit;
 381};
 382
 383struct mlx5_ib_cq_buf {
 384        struct mlx5_buf         buf;
 385        struct ib_umem          *umem;
 386        int                     cqe_size;
 387        int                     nent;
 388};
 389
 390enum mlx5_ib_qp_flags {
 391        MLX5_IB_QP_LSO                          = IB_QP_CREATE_IPOIB_UD_LSO,
 392        MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK     = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
 393        MLX5_IB_QP_CROSS_CHANNEL            = IB_QP_CREATE_CROSS_CHANNEL,
 394        MLX5_IB_QP_MANAGED_SEND             = IB_QP_CREATE_MANAGED_SEND,
 395        MLX5_IB_QP_MANAGED_RECV             = IB_QP_CREATE_MANAGED_RECV,
 396        MLX5_IB_QP_SIGNATURE_HANDLING           = 1 << 5,
 397        /* QP uses 1 as its source QP number */
 398        MLX5_IB_QP_SQPN_QP1                     = 1 << 6,
 399        MLX5_IB_QP_CAP_SCATTER_FCS              = 1 << 7,
 400        MLX5_IB_QP_RSS                          = 1 << 8,
 401        MLX5_IB_QP_CVLAN_STRIPPING              = 1 << 9,
 402};
 403
 404struct mlx5_umr_wr {
 405        struct ib_send_wr               wr;
 406        u64                             virt_addr;
 407        u64                             offset;
 408        struct ib_pd                   *pd;
 409        unsigned int                    page_shift;
 410        unsigned int                    xlt_size;
 411        u64                             length;
 412        int                             access_flags;
 413        u32                             mkey;
 414};
 415
 416static inline struct mlx5_umr_wr *umr_wr(struct ib_send_wr *wr)
 417{
 418        return container_of(wr, struct mlx5_umr_wr, wr);
 419}
 420
 421struct mlx5_shared_mr_info {
 422        int mr_id;
 423        struct ib_umem          *umem;
 424};
 425
 426struct mlx5_ib_cq {
 427        struct ib_cq            ibcq;
 428        struct mlx5_core_cq     mcq;
 429        struct mlx5_ib_cq_buf   buf;
 430        struct mlx5_db          db;
 431
 432        /* serialize access to the CQ
 433         */
 434        spinlock_t              lock;
 435
 436        /* protect resize cq
 437         */
 438        struct mutex            resize_mutex;
 439        struct mlx5_ib_cq_buf  *resize_buf;
 440        struct ib_umem         *resize_umem;
 441        int                     cqe_size;
 442        struct list_head        list_send_qp;
 443        struct list_head        list_recv_qp;
 444        u32                     create_flags;
 445        struct list_head        wc_list;
 446        enum ib_cq_notify_flags notify_flags;
 447        struct work_struct      notify_work;
 448};
 449
 450struct mlx5_ib_wc {
 451        struct ib_wc wc;
 452        struct list_head list;
 453};
 454
 455struct mlx5_ib_srq {
 456        struct ib_srq           ibsrq;
 457        struct mlx5_core_srq    msrq;
 458        struct mlx5_buf         buf;
 459        struct mlx5_db          db;
 460        u64                    *wrid;
 461        /* protect SRQ hanlding
 462         */
 463        spinlock_t              lock;
 464        int                     head;
 465        int                     tail;
 466        u16                     wqe_ctr;
 467        struct ib_umem         *umem;
 468        /* serialize arming a SRQ
 469         */
 470        struct mutex            mutex;
 471        int                     wq_sig;
 472};
 473
 474struct mlx5_ib_xrcd {
 475        struct ib_xrcd          ibxrcd;
 476        u32                     xrcdn;
 477};
 478
 479enum mlx5_ib_mtt_access_flags {
 480        MLX5_IB_MTT_READ  = (1 << 0),
 481        MLX5_IB_MTT_WRITE = (1 << 1),
 482};
 483
 484#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
 485
 486struct mlx5_ib_mr {
 487        struct ib_mr            ibmr;
 488        void                    *descs;
 489        dma_addr_t              desc_map;
 490        int                     ndescs;
 491        int                     max_descs;
 492        int                     desc_size;
 493        int                     access_mode;
 494        struct mlx5_core_mkey   mmkey;
 495        struct ib_umem         *umem;
 496        struct mlx5_shared_mr_info      *smr_info;
 497        struct list_head        list;
 498        int                     order;
 499        int                     umred;
 500        int                     npages;
 501        struct mlx5_ib_dev     *dev;
 502        u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
 503        struct mlx5_core_sig_ctx    *sig;
 504        int                     live;
 505        void                    *descs_alloc;
 506        int                     access_flags; /* Needed for rereg MR */
 507
 508        struct mlx5_ib_mr      *parent;
 509        atomic_t                num_leaf_free;
 510        wait_queue_head_t       q_leaf_free;
 511};
 512
 513struct mlx5_ib_mw {
 514        struct ib_mw            ibmw;
 515        struct mlx5_core_mkey   mmkey;
 516        int                     ndescs;
 517};
 518
 519struct mlx5_ib_umr_context {
 520        struct ib_cqe           cqe;
 521        enum ib_wc_status       status;
 522        struct completion       done;
 523};
 524
 525struct umr_common {
 526        struct ib_pd    *pd;
 527        struct ib_cq    *cq;
 528        struct ib_qp    *qp;
 529        /* control access to UMR QP
 530         */
 531        struct semaphore        sem;
 532};
 533
 534enum {
 535        MLX5_FMR_INVALID,
 536        MLX5_FMR_VALID,
 537        MLX5_FMR_BUSY,
 538};
 539
 540struct mlx5_cache_ent {
 541        struct list_head        head;
 542        /* sync access to the cahce entry
 543         */
 544        spinlock_t              lock;
 545
 546
 547        struct dentry          *dir;
 548        char                    name[4];
 549        u32                     order;
 550        u32                     xlt;
 551        u32                     access_mode;
 552        u32                     page;
 553
 554        u32                     size;
 555        u32                     cur;
 556        u32                     miss;
 557        u32                     limit;
 558
 559        struct dentry          *fsize;
 560        struct dentry          *fcur;
 561        struct dentry          *fmiss;
 562        struct dentry          *flimit;
 563
 564        struct mlx5_ib_dev     *dev;
 565        struct work_struct      work;
 566        struct delayed_work     dwork;
 567        int                     pending;
 568        struct completion       compl;
 569};
 570
 571struct mlx5_mr_cache {
 572        struct workqueue_struct *wq;
 573        struct mlx5_cache_ent   ent[MAX_MR_CACHE_ENTRIES];
 574        int                     stopped;
 575        struct dentry           *root;
 576        unsigned long           last_add;
 577};
 578
 579struct mlx5_ib_gsi_qp;
 580
 581struct mlx5_ib_port_resources {
 582        struct mlx5_ib_resources *devr;
 583        struct mlx5_ib_gsi_qp *gsi;
 584        struct work_struct pkey_change_work;
 585};
 586
 587struct mlx5_ib_resources {
 588        struct ib_cq    *c0;
 589        struct ib_xrcd  *x0;
 590        struct ib_xrcd  *x1;
 591        struct ib_pd    *p0;
 592        struct ib_srq   *s0;
 593        struct ib_srq   *s1;
 594        struct mlx5_ib_port_resources ports[2];
 595        /* Protects changes to the port resources */
 596        struct mutex    mutex;
 597};
 598
 599struct mlx5_ib_counters {
 600        const char **names;
 601        size_t *offsets;
 602        u32 num_q_counters;
 603        u32 num_cong_counters;
 604        u16 set_id;
 605};
 606
 607struct mlx5_ib_port {
 608        struct mlx5_ib_counters cnts;
 609};
 610
 611struct mlx5_roce {
 612        /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
 613         * netdev pointer
 614         */
 615        rwlock_t                netdev_lock;
 616        struct net_device       *netdev;
 617        struct notifier_block   nb;
 618        atomic_t                next_port;
 619};
 620
 621struct mlx5_ib_dev {
 622        struct ib_device                ib_dev;
 623        struct mlx5_core_dev            *mdev;
 624        struct mlx5_roce                roce;
 625        int                             num_ports;
 626        /* serialize update of capability mask
 627         */
 628        struct mutex                    cap_mask_mutex;
 629        bool                            ib_active;
 630        struct umr_common               umrc;
 631        /* sync used page count stats
 632         */
 633        struct mlx5_ib_resources        devr;
 634        struct mlx5_mr_cache            cache;
 635        struct timer_list               delay_timer;
 636        /* Prevents soft lock on massive reg MRs */
 637        struct mutex                    slow_path_mutex;
 638        int                             fill_delay;
 639#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 640        struct ib_odp_caps      odp_caps;
 641        u64                     odp_max_size;
 642        /*
 643         * Sleepable RCU that prevents destruction of MRs while they are still
 644         * being used by a page fault handler.
 645         */
 646        struct srcu_struct      mr_srcu;
 647        u32                     null_mkey;
 648#endif
 649        struct mlx5_ib_flow_db  flow_db;
 650        /* protect resources needed as part of reset flow */
 651        spinlock_t              reset_flow_resource_lock;
 652        struct list_head        qp_list;
 653        /* Array with num_ports elements */
 654        struct mlx5_ib_port     *port;
 655        struct mlx5_sq_bfreg     bfreg;
 656        struct mlx5_sq_bfreg     fp_bfreg;
 657        u8                              umr_fence;
 658};
 659
 660static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
 661{
 662        return container_of(mcq, struct mlx5_ib_cq, mcq);
 663}
 664
 665static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
 666{
 667        return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
 668}
 669
 670static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
 671{
 672        return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
 673}
 674
 675static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
 676{
 677        return container_of(ibcq, struct mlx5_ib_cq, ibcq);
 678}
 679
 680static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
 681{
 682        return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
 683}
 684
 685static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
 686{
 687        return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
 688}
 689
 690static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
 691{
 692        return container_of(mmkey, struct mlx5_ib_mr, mmkey);
 693}
 694
 695static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
 696{
 697        return container_of(ibpd, struct mlx5_ib_pd, ibpd);
 698}
 699
 700static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
 701{
 702        return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
 703}
 704
 705static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
 706{
 707        return container_of(ibqp, struct mlx5_ib_qp, ibqp);
 708}
 709
 710static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
 711{
 712        return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
 713}
 714
 715static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
 716{
 717        return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
 718}
 719
 720static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
 721{
 722        return container_of(msrq, struct mlx5_ib_srq, msrq);
 723}
 724
 725static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
 726{
 727        return container_of(ibmr, struct mlx5_ib_mr, ibmr);
 728}
 729
 730static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
 731{
 732        return container_of(ibmw, struct mlx5_ib_mw, ibmw);
 733}
 734
 735int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
 736                        struct mlx5_db *db);
 737void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
 738void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
 739void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
 740void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
 741int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
 742                 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
 743                 const void *in_mad, void *response_mad);
 744struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
 745                                struct ib_udata *udata);
 746int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
 747int mlx5_ib_destroy_ah(struct ib_ah *ah);
 748struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 749                                  struct ib_srq_init_attr *init_attr,
 750                                  struct ib_udata *udata);
 751int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
 752                       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
 753int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
 754int mlx5_ib_destroy_srq(struct ib_srq *srq);
 755int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
 756                          struct ib_recv_wr **bad_wr);
 757struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
 758                                struct ib_qp_init_attr *init_attr,
 759                                struct ib_udata *udata);
 760int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 761                      int attr_mask, struct ib_udata *udata);
 762int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
 763                     struct ib_qp_init_attr *qp_init_attr);
 764int mlx5_ib_destroy_qp(struct ib_qp *qp);
 765int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 766                      struct ib_send_wr **bad_wr);
 767int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 768                      struct ib_recv_wr **bad_wr);
 769void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
 770int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
 771                          void *buffer, u32 length,
 772                          struct mlx5_ib_qp_base *base);
 773struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
 774                                const struct ib_cq_init_attr *attr,
 775                                struct ib_ucontext *context,
 776                                struct ib_udata *udata);
 777int mlx5_ib_destroy_cq(struct ib_cq *cq);
 778int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 779int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
 780int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
 781int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
 782struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
 783struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 784                                  u64 virt_addr, int access_flags,
 785                                  struct ib_udata *udata);
 786struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 787                               struct ib_udata *udata);
 788int mlx5_ib_dealloc_mw(struct ib_mw *mw);
 789int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
 790                       int page_shift, int flags);
 791struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
 792                                             int access_flags);
 793void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
 794int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
 795                          u64 length, u64 virt_addr, int access_flags,
 796                          struct ib_pd *pd, struct ib_udata *udata);
 797int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
 798struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
 799                               enum ib_mr_type mr_type,
 800                               u32 max_num_sg);
 801int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
 802                      unsigned int *sg_offset);
 803int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
 804                        const struct ib_wc *in_wc, const struct ib_grh *in_grh,
 805                        const struct ib_mad_hdr *in, size_t in_mad_size,
 806                        struct ib_mad_hdr *out, size_t *out_mad_size,
 807                        u16 *out_mad_pkey_index);
 808struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
 809                                          struct ib_ucontext *context,
 810                                          struct ib_udata *udata);
 811int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
 812int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
 813int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
 814int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
 815                                          struct ib_smp *out_mad);
 816int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
 817                                         __be64 *sys_image_guid);
 818int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
 819                                 u16 *max_pkeys);
 820int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
 821                                 u32 *vendor_id);
 822int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
 823int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
 824int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
 825                            u16 *pkey);
 826int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
 827                            union ib_gid *gid);
 828int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
 829                            struct ib_port_attr *props);
 830int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
 831                       struct ib_port_attr *props);
 832int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
 833void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
 834void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
 835                        unsigned long max_page_shift,
 836                        int *count, int *shift,
 837                        int *ncont, int *order);
 838void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
 839                            int page_shift, size_t offset, size_t num_pages,
 840                            __be64 *pas, int access_flags);
 841void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
 842                          int page_shift, __be64 *pas, int access_flags);
 843void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
 844int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
 845int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
 846int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
 847
 848struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry);
 849void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 850int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
 851                            struct ib_mr_status *mr_status);
 852struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
 853                                struct ib_wq_init_attr *init_attr,
 854                                struct ib_udata *udata);
 855int mlx5_ib_destroy_wq(struct ib_wq *wq);
 856int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
 857                      u32 wq_attr_mask, struct ib_udata *udata);
 858struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
 859                                                      struct ib_rwq_ind_table_init_attr *init_attr,
 860                                                      struct ib_udata *udata);
 861int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
 862
 863#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 864void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
 865void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
 866                    struct mlx5_pagefault *pfault);
 867int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
 868void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
 869int __init mlx5_ib_odp_init(void);
 870void mlx5_ib_odp_cleanup(void);
 871void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
 872                              unsigned long end);
 873void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
 874void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
 875                           size_t nentries, struct mlx5_ib_mr *mr, int flags);
 876#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 877static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 878{
 879        return;
 880}
 881
 882static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
 883static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)        {}
 884static inline int mlx5_ib_odp_init(void) { return 0; }
 885static inline void mlx5_ib_odp_cleanup(void)                                {}
 886static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
 887static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
 888                                         size_t nentries, struct mlx5_ib_mr *mr,
 889                                         int flags) {}
 890
 891#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 892
 893int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
 894                          u8 port, struct ifla_vf_info *info);
 895int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
 896                              u8 port, int state);
 897int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
 898                         u8 port, struct ifla_vf_stats *stats);
 899int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
 900                        u64 guid, int type);
 901
 902__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
 903                               int index);
 904int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
 905                           int index, enum ib_gid_type *gid_type);
 906
 907/* GSI QP helper functions */
 908struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
 909                                    struct ib_qp_init_attr *init_attr);
 910int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
 911int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
 912                          int attr_mask);
 913int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
 914                         int qp_attr_mask,
 915                         struct ib_qp_init_attr *qp_init_attr);
 916int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
 917                          struct ib_send_wr **bad_wr);
 918int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
 919                          struct ib_recv_wr **bad_wr);
 920void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
 921
 922int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
 923
 924static inline void init_query_mad(struct ib_smp *mad)
 925{
 926        mad->base_version  = 1;
 927        mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
 928        mad->class_version = 1;
 929        mad->method        = IB_MGMT_METHOD_GET;
 930}
 931
 932static inline u8 convert_access(int acc)
 933{
 934        return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) |
 935               (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) |
 936               (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) |
 937               (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) |
 938               MLX5_PERM_LOCAL_READ;
 939}
 940
 941static inline int is_qp1(enum ib_qp_type qp_type)
 942{
 943        return qp_type == MLX5_IB_QPT_HW_GSI;
 944}
 945
 946#define MLX5_MAX_UMR_SHIFT 16
 947#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
 948
 949static inline u32 check_cq_create_flags(u32 flags)
 950{
 951        /*
 952         * It returns non-zero value for unsupported CQ
 953         * create flags, otherwise it returns zero.
 954         */
 955        return (flags & ~(IB_CQ_FLAGS_IGNORE_OVERRUN |
 956                          IB_CQ_FLAGS_TIMESTAMP_COMPLETION));
 957}
 958
 959static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
 960                                     u32 *user_index)
 961{
 962        if (cqe_version) {
 963                if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
 964                    (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
 965                        return -EINVAL;
 966                *user_index = cmd_uidx;
 967        } else {
 968                *user_index = MLX5_IB_DEFAULT_UIDX;
 969        }
 970
 971        return 0;
 972}
 973
 974static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
 975                                    struct mlx5_ib_create_qp *ucmd,
 976                                    int inlen,
 977                                    u32 *user_index)
 978{
 979        u8 cqe_version = ucontext->cqe_version;
 980
 981        if (field_avail(struct mlx5_ib_create_qp, uidx, inlen) &&
 982            !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
 983                return 0;
 984
 985        if (!!(field_avail(struct mlx5_ib_create_qp, uidx, inlen) !=
 986               !!cqe_version))
 987                return -EINVAL;
 988
 989        return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
 990}
 991
 992static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
 993                                     struct mlx5_ib_create_srq *ucmd,
 994                                     int inlen,
 995                                     u32 *user_index)
 996{
 997        u8 cqe_version = ucontext->cqe_version;
 998
 999        if (field_avail(struct mlx5_ib_create_srq, uidx, inlen) &&
1000            !cqe_version && (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1001                return 0;
1002
1003        if (!!(field_avail(struct mlx5_ib_create_srq, uidx, inlen) !=
1004               !!cqe_version))
1005                return -EINVAL;
1006
1007        return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1008}
1009
1010static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1011{
1012        return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1013                                MLX5_UARS_IN_PAGE : 1;
1014}
1015
1016static inline int get_num_uars(struct mlx5_ib_dev *dev,
1017                               struct mlx5_bfreg_info *bfregi)
1018{
1019        return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
1020}
1021
1022#endif /* MLX5_IB_H */
1023