linux/include/linux/mlx5/driver.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#ifndef MLX5_DRIVER_H
  34#define MLX5_DRIVER_H
  35
  36#include <linux/kernel.h>
  37#include <linux/completion.h>
  38#include <linux/pci.h>
  39#include <linux/spinlock_types.h>
  40#include <linux/semaphore.h>
  41#include <linux/slab.h>
  42#include <linux/vmalloc.h>
  43#include <linux/radix-tree.h>
  44
  45#include <linux/mlx5/device.h>
  46#include <linux/mlx5/doorbell.h>
  47
  48enum {
  49        MLX5_BOARD_ID_LEN = 64,
  50        MLX5_MAX_NAME_LEN = 16,
  51};
  52
  53enum {
  54        /* one minute for the sake of bringup. Generally, commands must always
  55         * complete and we may need to increase this timeout value
  56         */
  57        MLX5_CMD_TIMEOUT_MSEC   = 60 * 1000,
  58        MLX5_CMD_WQ_MAX_NAME    = 32,
  59};
  60
  61enum {
  62        CMD_OWNER_SW            = 0x0,
  63        CMD_OWNER_HW            = 0x1,
  64        CMD_STATUS_SUCCESS      = 0,
  65};
  66
  67enum mlx5_sqp_t {
  68        MLX5_SQP_SMI            = 0,
  69        MLX5_SQP_GSI            = 1,
  70        MLX5_SQP_IEEE_1588      = 2,
  71        MLX5_SQP_SNIFFER        = 3,
  72        MLX5_SQP_SYNC_UMR       = 4,
  73};
  74
  75enum {
  76        MLX5_MAX_PORTS  = 2,
  77};
  78
  79enum {
  80        MLX5_EQ_VEC_PAGES        = 0,
  81        MLX5_EQ_VEC_CMD          = 1,
  82        MLX5_EQ_VEC_ASYNC        = 2,
  83        MLX5_EQ_VEC_COMP_BASE,
  84};
  85
  86enum {
  87        MLX5_MAX_IRQ_NAME       = 32
  88};
  89
  90enum {
  91        MLX5_ATOMIC_MODE_IB_COMP        = 1 << 16,
  92        MLX5_ATOMIC_MODE_CX             = 2 << 16,
  93        MLX5_ATOMIC_MODE_8B             = 3 << 16,
  94        MLX5_ATOMIC_MODE_16B            = 4 << 16,
  95        MLX5_ATOMIC_MODE_32B            = 5 << 16,
  96        MLX5_ATOMIC_MODE_64B            = 6 << 16,
  97        MLX5_ATOMIC_MODE_128B           = 7 << 16,
  98        MLX5_ATOMIC_MODE_256B           = 8 << 16,
  99};
 100
 101enum {
 102        MLX5_REG_QETCR           = 0x4005,
 103        MLX5_REG_QTCT            = 0x400a,
 104        MLX5_REG_PCAP            = 0x5001,
 105        MLX5_REG_PMTU            = 0x5003,
 106        MLX5_REG_PTYS            = 0x5004,
 107        MLX5_REG_PAOS            = 0x5006,
 108        MLX5_REG_PFCC            = 0x5007,
 109        MLX5_REG_PPCNT           = 0x5008,
 110        MLX5_REG_PMAOS           = 0x5012,
 111        MLX5_REG_PUDE            = 0x5009,
 112        MLX5_REG_PMPE            = 0x5010,
 113        MLX5_REG_PELC            = 0x500e,
 114        MLX5_REG_PVLC            = 0x500f,
 115        MLX5_REG_PMLP            = 0, /* TBD */
 116        MLX5_REG_NODE_DESC       = 0x6001,
 117        MLX5_REG_HOST_ENDIANNESS = 0x7004,
 118};
 119
 120enum {
 121        MLX5_ATOMIC_OPS_CMP_SWAP        = 1 << 0,
 122        MLX5_ATOMIC_OPS_FETCH_ADD       = 1 << 1,
 123};
 124
 125enum mlx5_page_fault_resume_flags {
 126        MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
 127        MLX5_PAGE_FAULT_RESUME_WRITE     = 1 << 1,
 128        MLX5_PAGE_FAULT_RESUME_RDMA      = 1 << 2,
 129        MLX5_PAGE_FAULT_RESUME_ERROR     = 1 << 7,
 130};
 131
 132enum dbg_rsc_type {
 133        MLX5_DBG_RSC_QP,
 134        MLX5_DBG_RSC_EQ,
 135        MLX5_DBG_RSC_CQ,
 136};
 137
 138struct mlx5_field_desc {
 139        struct dentry          *dent;
 140        int                     i;
 141};
 142
 143struct mlx5_rsc_debug {
 144        struct mlx5_core_dev   *dev;
 145        void                   *object;
 146        enum dbg_rsc_type       type;
 147        struct dentry          *root;
 148        struct mlx5_field_desc  fields[0];
 149};
 150
 151enum mlx5_dev_event {
 152        MLX5_DEV_EVENT_SYS_ERROR,
 153        MLX5_DEV_EVENT_PORT_UP,
 154        MLX5_DEV_EVENT_PORT_DOWN,
 155        MLX5_DEV_EVENT_PORT_INITIALIZED,
 156        MLX5_DEV_EVENT_LID_CHANGE,
 157        MLX5_DEV_EVENT_PKEY_CHANGE,
 158        MLX5_DEV_EVENT_GUID_CHANGE,
 159        MLX5_DEV_EVENT_CLIENT_REREG,
 160};
 161
 162enum mlx5_port_status {
 163        MLX5_PORT_UP        = 1,
 164        MLX5_PORT_DOWN      = 2,
 165};
 166
 167struct mlx5_uuar_info {
 168        struct mlx5_uar        *uars;
 169        int                     num_uars;
 170        int                     num_low_latency_uuars;
 171        unsigned long          *bitmap;
 172        unsigned int           *count;
 173        struct mlx5_bf         *bfs;
 174
 175        /*
 176         * protect uuar allocation data structs
 177         */
 178        struct mutex            lock;
 179        u32                     ver;
 180};
 181
 182struct mlx5_bf {
 183        void __iomem           *reg;
 184        void __iomem           *regreg;
 185        int                     buf_size;
 186        struct mlx5_uar        *uar;
 187        unsigned long           offset;
 188        int                     need_lock;
 189        /* protect blue flame buffer selection when needed
 190         */
 191        spinlock_t              lock;
 192
 193        /* serialize 64 bit writes when done as two 32 bit accesses
 194         */
 195        spinlock_t              lock32;
 196        int                     uuarn;
 197};
 198
 199struct mlx5_cmd_first {
 200        __be32          data[4];
 201};
 202
 203struct mlx5_cmd_msg {
 204        struct list_head                list;
 205        struct cache_ent               *cache;
 206        u32                             len;
 207        struct mlx5_cmd_first           first;
 208        struct mlx5_cmd_mailbox        *next;
 209};
 210
 211struct mlx5_cmd_debug {
 212        struct dentry          *dbg_root;
 213        struct dentry          *dbg_in;
 214        struct dentry          *dbg_out;
 215        struct dentry          *dbg_outlen;
 216        struct dentry          *dbg_status;
 217        struct dentry          *dbg_run;
 218        void                   *in_msg;
 219        void                   *out_msg;
 220        u8                      status;
 221        u16                     inlen;
 222        u16                     outlen;
 223};
 224
 225struct cache_ent {
 226        /* protect block chain allocations
 227         */
 228        spinlock_t              lock;
 229        struct list_head        head;
 230};
 231
 232struct cmd_msg_cache {
 233        struct cache_ent        large;
 234        struct cache_ent        med;
 235
 236};
 237
 238struct mlx5_cmd_stats {
 239        u64             sum;
 240        u64             n;
 241        struct dentry  *root;
 242        struct dentry  *avg;
 243        struct dentry  *count;
 244        /* protect command average calculations */
 245        spinlock_t      lock;
 246};
 247
 248struct mlx5_cmd {
 249        void           *cmd_alloc_buf;
 250        dma_addr_t      alloc_dma;
 251        int             alloc_size;
 252        void           *cmd_buf;
 253        dma_addr_t      dma;
 254        u16             cmdif_rev;
 255        u8              log_sz;
 256        u8              log_stride;
 257        int             max_reg_cmds;
 258        int             events;
 259        u32 __iomem    *vector;
 260
 261        /* protect command queue allocations
 262         */
 263        spinlock_t      alloc_lock;
 264
 265        /* protect token allocations
 266         */
 267        spinlock_t      token_lock;
 268        u8              token;
 269        unsigned long   bitmask;
 270        char            wq_name[MLX5_CMD_WQ_MAX_NAME];
 271        struct workqueue_struct *wq;
 272        struct semaphore sem;
 273        struct semaphore pages_sem;
 274        int     mode;
 275        struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
 276        struct pci_pool *pool;
 277        struct mlx5_cmd_debug dbg;
 278        struct cmd_msg_cache cache;
 279        int checksum_disabled;
 280        struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
 281};
 282
 283struct mlx5_port_caps {
 284        int     gid_table_len;
 285        int     pkey_table_len;
 286        u8      ext_port_cap;
 287};
 288
 289struct mlx5_cmd_mailbox {
 290        void           *buf;
 291        dma_addr_t      dma;
 292        struct mlx5_cmd_mailbox *next;
 293};
 294
 295struct mlx5_buf_list {
 296        void                   *buf;
 297        dma_addr_t              map;
 298};
 299
 300struct mlx5_buf {
 301        struct mlx5_buf_list    direct;
 302        int                     npages;
 303        int                     size;
 304        u8                      page_shift;
 305};
 306
 307struct mlx5_eq {
 308        struct mlx5_core_dev   *dev;
 309        __be32 __iomem         *doorbell;
 310        u32                     cons_index;
 311        struct mlx5_buf         buf;
 312        int                     size;
 313        unsigned int            irqn;
 314        u8                      eqn;
 315        int                     nent;
 316        u64                     mask;
 317        struct list_head        list;
 318        int                     index;
 319        struct mlx5_rsc_debug   *dbg;
 320};
 321
 322struct mlx5_core_psv {
 323        u32     psv_idx;
 324        struct psv_layout {
 325                u32     pd;
 326                u16     syndrome;
 327                u16     reserved;
 328                u16     bg;
 329                u16     app_tag;
 330                u32     ref_tag;
 331        } psv;
 332};
 333
 334struct mlx5_core_sig_ctx {
 335        struct mlx5_core_psv    psv_memory;
 336        struct mlx5_core_psv    psv_wire;
 337        struct ib_sig_err       err_item;
 338        bool                    sig_status_checked;
 339        bool                    sig_err_exists;
 340        u32                     sigerr_count;
 341};
 342
 343struct mlx5_core_mkey {
 344        u64                     iova;
 345        u64                     size;
 346        u32                     key;
 347        u32                     pd;
 348};
 349
 350enum mlx5_res_type {
 351        MLX5_RES_QP     = MLX5_EVENT_QUEUE_TYPE_QP,
 352        MLX5_RES_RQ     = MLX5_EVENT_QUEUE_TYPE_RQ,
 353        MLX5_RES_SQ     = MLX5_EVENT_QUEUE_TYPE_SQ,
 354        MLX5_RES_SRQ    = 3,
 355        MLX5_RES_XSRQ   = 4,
 356};
 357
 358struct mlx5_core_rsc_common {
 359        enum mlx5_res_type      res;
 360        atomic_t                refcount;
 361        struct completion       free;
 362};
 363
 364struct mlx5_core_srq {
 365        struct mlx5_core_rsc_common     common; /* must be first */
 366        u32             srqn;
 367        int             max;
 368        int             max_gs;
 369        int             max_avail_gather;
 370        int             wqe_shift;
 371        void (*event)   (struct mlx5_core_srq *, enum mlx5_event);
 372
 373        atomic_t                refcount;
 374        struct completion       free;
 375};
 376
 377struct mlx5_eq_table {
 378        void __iomem           *update_ci;
 379        void __iomem           *update_arm_ci;
 380        struct list_head        comp_eqs_list;
 381        struct mlx5_eq          pages_eq;
 382        struct mlx5_eq          async_eq;
 383        struct mlx5_eq          cmd_eq;
 384        int                     num_comp_vectors;
 385        /* protect EQs list
 386         */
 387        spinlock_t              lock;
 388};
 389
 390struct mlx5_uar {
 391        u32                     index;
 392        struct list_head        bf_list;
 393        unsigned                free_bf_bmap;
 394        void __iomem           *bf_map;
 395        void __iomem           *map;
 396};
 397
 398
 399struct mlx5_core_health {
 400        struct health_buffer __iomem   *health;
 401        __be32 __iomem                 *health_counter;
 402        struct timer_list               timer;
 403        u32                             prev;
 404        int                             miss_counter;
 405        bool                            sick;
 406        struct workqueue_struct        *wq;
 407        struct work_struct              work;
 408};
 409
 410struct mlx5_cq_table {
 411        /* protect radix tree
 412         */
 413        spinlock_t              lock;
 414        struct radix_tree_root  tree;
 415};
 416
 417struct mlx5_qp_table {
 418        /* protect radix tree
 419         */
 420        spinlock_t              lock;
 421        struct radix_tree_root  tree;
 422};
 423
 424struct mlx5_srq_table {
 425        /* protect radix tree
 426         */
 427        spinlock_t              lock;
 428        struct radix_tree_root  tree;
 429};
 430
 431struct mlx5_mkey_table {
 432        /* protect radix tree
 433         */
 434        rwlock_t                lock;
 435        struct radix_tree_root  tree;
 436};
 437
 438struct mlx5_vf_context {
 439        int     enabled;
 440};
 441
 442struct mlx5_core_sriov {
 443        struct mlx5_vf_context  *vfs_ctx;
 444        int                     num_vfs;
 445        int                     enabled_vfs;
 446};
 447
 448struct mlx5_irq_info {
 449        cpumask_var_t mask;
 450        char name[MLX5_MAX_IRQ_NAME];
 451};
 452
 453struct mlx5_eswitch;
 454
 455struct mlx5_priv {
 456        char                    name[MLX5_MAX_NAME_LEN];
 457        struct mlx5_eq_table    eq_table;
 458        struct msix_entry       *msix_arr;
 459        struct mlx5_irq_info    *irq_info;
 460        struct mlx5_uuar_info   uuari;
 461        MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
 462
 463        /* pages stuff */
 464        struct workqueue_struct *pg_wq;
 465        struct rb_root          page_root;
 466        int                     fw_pages;
 467        atomic_t                reg_pages;
 468        struct list_head        free_list;
 469        int                     vfs_pages;
 470
 471        struct mlx5_core_health health;
 472
 473        struct mlx5_srq_table   srq_table;
 474
 475        /* start: qp staff */
 476        struct mlx5_qp_table    qp_table;
 477        struct dentry          *qp_debugfs;
 478        struct dentry          *eq_debugfs;
 479        struct dentry          *cq_debugfs;
 480        struct dentry          *cmdif_debugfs;
 481        /* end: qp staff */
 482
 483        /* start: cq staff */
 484        struct mlx5_cq_table    cq_table;
 485        /* end: cq staff */
 486
 487        /* start: mkey staff */
 488        struct mlx5_mkey_table  mkey_table;
 489        /* end: mkey staff */
 490
 491        /* start: alloc staff */
 492        /* protect buffer alocation according to numa node */
 493        struct mutex            alloc_mutex;
 494        int                     numa_node;
 495
 496        struct mutex            pgdir_mutex;
 497        struct list_head        pgdir_list;
 498        /* end: alloc staff */
 499        struct dentry          *dbg_root;
 500
 501        /* protect mkey key part */
 502        spinlock_t              mkey_lock;
 503        u8                      mkey_key;
 504
 505        struct list_head        dev_list;
 506        struct list_head        ctx_list;
 507        spinlock_t              ctx_lock;
 508
 509        struct mlx5_eswitch     *eswitch;
 510        struct mlx5_core_sriov  sriov;
 511        unsigned long           pci_dev_data;
 512        struct mlx5_flow_root_namespace *root_ns;
 513        struct mlx5_flow_root_namespace *fdb_root_ns;
 514};
 515
 516enum mlx5_device_state {
 517        MLX5_DEVICE_STATE_UP,
 518        MLX5_DEVICE_STATE_INTERNAL_ERROR,
 519};
 520
 521enum mlx5_interface_state {
 522        MLX5_INTERFACE_STATE_DOWN = BIT(0),
 523        MLX5_INTERFACE_STATE_UP = BIT(1),
 524        MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
 525};
 526
 527enum mlx5_pci_status {
 528        MLX5_PCI_STATUS_DISABLED,
 529        MLX5_PCI_STATUS_ENABLED,
 530};
 531
 532struct mlx5_core_dev {
 533        struct pci_dev         *pdev;
 534        /* sync pci state */
 535        struct mutex            pci_status_mutex;
 536        enum mlx5_pci_status    pci_status;
 537        u8                      rev_id;
 538        char                    board_id[MLX5_BOARD_ID_LEN];
 539        struct mlx5_cmd         cmd;
 540        struct mlx5_port_caps   port_caps[MLX5_MAX_PORTS];
 541        u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
 542        u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
 543        phys_addr_t             iseg_base;
 544        struct mlx5_init_seg __iomem *iseg;
 545        enum mlx5_device_state  state;
 546        /* sync interface state */
 547        struct mutex            intf_state_mutex;
 548        unsigned long           intf_state;
 549        void                    (*event) (struct mlx5_core_dev *dev,
 550                                          enum mlx5_dev_event event,
 551                                          unsigned long param);
 552        struct mlx5_priv        priv;
 553        struct mlx5_profile     *profile;
 554        atomic_t                num_qps;
 555        u32                     issi;
 556};
 557
 558struct mlx5_db {
 559        __be32                  *db;
 560        union {
 561                struct mlx5_db_pgdir            *pgdir;
 562                struct mlx5_ib_user_db_page     *user_page;
 563        }                       u;
 564        dma_addr_t              dma;
 565        int                     index;
 566};
 567
 568enum {
 569        MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
 570};
 571
 572enum {
 573        MLX5_COMP_EQ_SIZE = 1024,
 574};
 575
 576enum {
 577        MLX5_PTYS_IB = 1 << 0,
 578        MLX5_PTYS_EN = 1 << 2,
 579};
 580
 581struct mlx5_db_pgdir {
 582        struct list_head        list;
 583        DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
 584        __be32                 *db_page;
 585        dma_addr_t              db_dma;
 586};
 587
 588typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
 589
 590struct mlx5_cmd_work_ent {
 591        struct mlx5_cmd_msg    *in;
 592        struct mlx5_cmd_msg    *out;
 593        void                   *uout;
 594        int                     uout_size;
 595        mlx5_cmd_cbk_t          callback;
 596        void                   *context;
 597        int                     idx;
 598        struct completion       done;
 599        struct mlx5_cmd        *cmd;
 600        struct work_struct      work;
 601        struct mlx5_cmd_layout *lay;
 602        int                     ret;
 603        int                     page_queue;
 604        u8                      status;
 605        u8                      token;
 606        u64                     ts1;
 607        u64                     ts2;
 608        u16                     op;
 609};
 610
 611struct mlx5_pas {
 612        u64     pa;
 613        u8      log_sz;
 614};
 615
 616enum port_state_policy {
 617        MLX5_POLICY_DOWN        = 0,
 618        MLX5_POLICY_UP          = 1,
 619        MLX5_POLICY_FOLLOW      = 2,
 620        MLX5_POLICY_INVALID     = 0xffffffff
 621};
 622
 623enum phy_port_state {
 624        MLX5_AAA_111
 625};
 626
 627struct mlx5_hca_vport_context {
 628        u32                     field_select;
 629        bool                    sm_virt_aware;
 630        bool                    has_smi;
 631        bool                    has_raw;
 632        enum port_state_policy  policy;
 633        enum phy_port_state     phys_state;
 634        enum ib_port_state      vport_state;
 635        u8                      port_physical_state;
 636        u64                     sys_image_guid;
 637        u64                     port_guid;
 638        u64                     node_guid;
 639        u32                     cap_mask1;
 640        u32                     cap_mask1_perm;
 641        u32                     cap_mask2;
 642        u32                     cap_mask2_perm;
 643        u16                     lid;
 644        u8                      init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
 645        u8                      lmc;
 646        u8                      subnet_timeout;
 647        u16                     sm_lid;
 648        u8                      sm_sl;
 649        u16                     qkey_violation_counter;
 650        u16                     pkey_violation_counter;
 651        bool                    grh_required;
 652};
 653
 654static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
 655{
 656                return buf->direct.buf + offset;
 657}
 658
 659extern struct workqueue_struct *mlx5_core_wq;
 660
 661#define STRUCT_FIELD(header, field) \
 662        .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field),      \
 663        .struct_size_bytes   = sizeof((struct ib_unpacked_ ## header *)0)->field
 664
 665static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
 666{
 667        return pci_get_drvdata(pdev);
 668}
 669
 670extern struct dentry *mlx5_debugfs_root;
 671
 672static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
 673{
 674        return ioread32be(&dev->iseg->fw_rev) & 0xffff;
 675}
 676
 677static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
 678{
 679        return ioread32be(&dev->iseg->fw_rev) >> 16;
 680}
 681
 682static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
 683{
 684        return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
 685}
 686
 687static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
 688{
 689        return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
 690}
 691
 692static inline void *mlx5_vzalloc(unsigned long size)
 693{
 694        void *rtn;
 695
 696        rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
 697        if (!rtn)
 698                rtn = vzalloc(size);
 699        return rtn;
 700}
 701
 702static inline u32 mlx5_base_mkey(const u32 key)
 703{
 704        return key & 0xffffff00u;
 705}
 706
 707int mlx5_cmd_init(struct mlx5_core_dev *dev);
 708void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
 709void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
 710void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
 711int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
 712int mlx5_cmd_status_to_err_v2(void *ptr);
 713int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
 714int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
 715                  int out_size);
 716int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
 717                     void *out, int out_size, mlx5_cmd_cbk_t callback,
 718                     void *context);
 719int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
 720int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
 721int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
 722int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
 723int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
 724                       bool map_wc);
 725void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
 726void mlx5_health_cleanup(struct mlx5_core_dev *dev);
 727int mlx5_health_init(struct mlx5_core_dev *dev);
 728void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 729void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
 730int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
 731                        struct mlx5_buf *buf, int node);
 732int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
 733void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
 734struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
 735                                                      gfp_t flags, int npages);
 736void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
 737                                 struct mlx5_cmd_mailbox *head);
 738int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 739                         struct mlx5_create_srq_mbox_in *in, int inlen,
 740                         int is_xrc);
 741int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
 742int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 743                        struct mlx5_query_srq_mbox_out *out);
 744int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
 745                      u16 lwm, int is_srq);
 746void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
 747void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
 748int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
 749                          struct mlx5_core_mkey *mkey,
 750                          struct mlx5_create_mkey_mbox_in *in, int inlen,
 751                          mlx5_cmd_cbk_t callback, void *context,
 752                          struct mlx5_create_mkey_mbox_out *out);
 753int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
 754                           struct mlx5_core_mkey *mkey);
 755int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
 756                         struct mlx5_query_mkey_mbox_out *out, int outlen);
 757int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
 758                             u32 *mkey);
 759int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
 760int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
 761int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
 762                      u16 opmod, u8 port);
 763void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
 764void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
 765int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
 766void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
 767int mlx5_sriov_init(struct mlx5_core_dev *dev);
 768int mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
 769void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
 770                                 s32 npages);
 771int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
 772int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
 773void mlx5_register_debugfs(void);
 774void mlx5_unregister_debugfs(void);
 775int mlx5_eq_init(struct mlx5_core_dev *dev);
 776void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
 777void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
 778void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
 779void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
 780#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 781void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
 782#endif
 783void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
 784struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
 785void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
 786void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
 787int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 788                       int nent, u64 mask, const char *name, struct mlx5_uar *uar);
 789int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 790int mlx5_start_eqs(struct mlx5_core_dev *dev);
 791int mlx5_stop_eqs(struct mlx5_core_dev *dev);
 792int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
 793                    unsigned int *irqn);
 794int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 795int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 796
 797int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
 798void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
 799int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
 800                         int size_in, void *data_out, int size_out,
 801                         u16 reg_num, int arg, int write);
 802
 803int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 804void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 805int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
 806                       struct mlx5_query_eq_mbox_out *out, int outlen);
 807int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
 808void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
 809int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
 810void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
 811int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
 812int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
 813                       int node);
 814void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
 815
 816const char *mlx5_command_str(int command);
 817int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
 818void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
 819int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
 820                         int npsvs, u32 *sig_index);
 821int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
 822void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
 823int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
 824                        struct mlx5_odp_caps *odp_caps);
 825int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
 826                             u8 port_num, void *out, size_t sz);
 827
 828static inline int fw_initializing(struct mlx5_core_dev *dev)
 829{
 830        return ioread32be(&dev->iseg->initializing) >> 31;
 831}
 832
 833static inline u32 mlx5_mkey_to_idx(u32 mkey)
 834{
 835        return mkey >> 8;
 836}
 837
 838static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
 839{
 840        return mkey_idx << 8;
 841}
 842
 843static inline u8 mlx5_mkey_variant(u32 mkey)
 844{
 845        return mkey & 0xff;
 846}
 847
 848enum {
 849        MLX5_PROF_MASK_QP_SIZE          = (u64)1 << 0,
 850        MLX5_PROF_MASK_MR_CACHE         = (u64)1 << 1,
 851};
 852
 853enum {
 854        MAX_MR_CACHE_ENTRIES    = 16,
 855};
 856
 857enum {
 858        MLX5_INTERFACE_PROTOCOL_IB  = 0,
 859        MLX5_INTERFACE_PROTOCOL_ETH = 1,
 860};
 861
 862struct mlx5_interface {
 863        void *                  (*add)(struct mlx5_core_dev *dev);
 864        void                    (*remove)(struct mlx5_core_dev *dev, void *context);
 865        void                    (*event)(struct mlx5_core_dev *dev, void *context,
 866                                         enum mlx5_dev_event event, unsigned long param);
 867        void *                  (*get_dev)(void *context);
 868        int                     protocol;
 869        struct list_head        list;
 870};
 871
 872void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
 873int mlx5_register_interface(struct mlx5_interface *intf);
 874void mlx5_unregister_interface(struct mlx5_interface *intf);
 875int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
 876
 877struct mlx5_profile {
 878        u64     mask;
 879        u8      log_max_qp;
 880        struct {
 881                int     size;
 882                int     limit;
 883        } mr_cache[MAX_MR_CACHE_ENTRIES];
 884};
 885
 886enum {
 887        MLX5_PCI_DEV_IS_VF              = 1 << 0,
 888};
 889
 890static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
 891{
 892        return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
 893}
 894
 895static inline int mlx5_get_gid_table_len(u16 param)
 896{
 897        if (param > 4) {
 898                pr_warn("gid table length is zero\n");
 899                return 0;
 900        }
 901
 902        return 8 * (1 << param);
 903}
 904
 905enum {
 906        MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
 907};
 908
 909#endif /* MLX5_DRIVER_H */
 910