linux/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of EITHER the GNU General Public License
   6 * version 2 as published by the Free Software Foundation or the BSD
   7 * 2-Clause License. This program is distributed in the hope that it
   8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
   9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
  10 * See the GNU General Public License version 2 for more details at
  11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program available in the file COPYING in the main
  15 * directory of this source tree.
  16 *
  17 * The BSD 2-Clause License
  18 *
  19 *     Redistribution and use in source and binary forms, with or
  20 *     without modification, are permitted provided that the following
  21 *     conditions are met:
  22 *
  23 *      - Redistributions of source code must retain the above
  24 *        copyright notice, this list of conditions and the following
  25 *        disclaimer.
  26 *
  27 *      - Redistributions in binary form must reproduce the above
  28 *        copyright notice, this list of conditions and the following
  29 *        disclaimer in the documentation and/or other materials
  30 *        provided with the distribution.
  31 *
  32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
  37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
  43 * OF THE POSSIBILITY OF SUCH DAMAGE.
  44 */
  45
  46#ifndef __PVRDMA_H__
  47#define __PVRDMA_H__
  48
  49#include <linux/compiler.h>
  50#include <linux/interrupt.h>
  51#include <linux/list.h>
  52#include <linux/mutex.h>
  53#include <linux/pci.h>
  54#include <linux/semaphore.h>
  55#include <linux/workqueue.h>
  56#include <rdma/ib_umem.h>
  57#include <rdma/ib_verbs.h>
  58#include <rdma/vmw_pvrdma-abi.h>
  59
  60#include "pvrdma_ring.h"
  61#include "pvrdma_dev_api.h"
  62#include "pvrdma_verbs.h"
  63
  64/* NOT the same as BIT_MASK(). */
  65#define PVRDMA_MASK(n) ((n << 1) - 1)
  66
  67/*
  68 * VMware PVRDMA PCI device id.
  69 */
  70#define PCI_DEVICE_ID_VMWARE_PVRDMA     0x0820
  71
  72#define PVRDMA_NUM_RING_PAGES           4
  73#define PVRDMA_QP_NUM_HEADER_PAGES      1
  74
  75struct pvrdma_dev;
  76
  77struct pvrdma_page_dir {
  78        dma_addr_t dir_dma;
  79        u64 *dir;
  80        int ntables;
  81        u64 **tables;
  82        u64 npages;
  83        void **pages;
  84};
  85
  86struct pvrdma_cq {
  87        struct ib_cq ibcq;
  88        int offset;
  89        spinlock_t cq_lock; /* Poll lock. */
  90        struct pvrdma_uar_map *uar;
  91        struct ib_umem *umem;
  92        struct pvrdma_ring_state *ring_state;
  93        struct pvrdma_page_dir pdir;
  94        u32 cq_handle;
  95        bool is_kernel;
  96        refcount_t refcnt;
  97        struct completion free;
  98};
  99
 100struct pvrdma_id_table {
 101        u32 last;
 102        u32 top;
 103        u32 max;
 104        u32 mask;
 105        spinlock_t lock; /* Table lock. */
 106        unsigned long *table;
 107};
 108
 109struct pvrdma_uar_map {
 110        unsigned long pfn;
 111        void __iomem *map;
 112        int index;
 113};
 114
 115struct pvrdma_uar_table {
 116        struct pvrdma_id_table tbl;
 117        int size;
 118};
 119
 120struct pvrdma_ucontext {
 121        struct ib_ucontext ibucontext;
 122        struct pvrdma_dev *dev;
 123        struct pvrdma_uar_map uar;
 124        u64 ctx_handle;
 125};
 126
 127struct pvrdma_pd {
 128        struct ib_pd ibpd;
 129        u32 pdn;
 130        u32 pd_handle;
 131        int privileged;
 132};
 133
 134struct pvrdma_mr {
 135        u32 mr_handle;
 136        u64 iova;
 137        u64 size;
 138};
 139
 140struct pvrdma_user_mr {
 141        struct ib_mr ibmr;
 142        struct ib_umem *umem;
 143        struct pvrdma_mr mmr;
 144        struct pvrdma_page_dir pdir;
 145        u64 *pages;
 146        u32 npages;
 147        u32 max_pages;
 148        u32 page_shift;
 149};
 150
 151struct pvrdma_wq {
 152        struct pvrdma_ring *ring;
 153        spinlock_t lock; /* Work queue lock. */
 154        int wqe_cnt;
 155        int wqe_size;
 156        int max_sg;
 157        int offset;
 158};
 159
 160struct pvrdma_ah {
 161        struct ib_ah ibah;
 162        struct pvrdma_av av;
 163};
 164
 165struct pvrdma_srq {
 166        struct ib_srq ibsrq;
 167        int offset;
 168        spinlock_t lock; /* SRQ lock. */
 169        int wqe_cnt;
 170        int wqe_size;
 171        int max_gs;
 172        struct ib_umem *umem;
 173        struct pvrdma_ring_state *ring;
 174        struct pvrdma_page_dir pdir;
 175        u32 srq_handle;
 176        int npages;
 177        refcount_t refcnt;
 178        struct completion free;
 179};
 180
 181struct pvrdma_qp {
 182        struct ib_qp ibqp;
 183        u32 qp_handle;
 184        u32 qkey;
 185        struct pvrdma_wq sq;
 186        struct pvrdma_wq rq;
 187        struct ib_umem *rumem;
 188        struct ib_umem *sumem;
 189        struct pvrdma_page_dir pdir;
 190        struct pvrdma_srq *srq;
 191        int npages;
 192        int npages_send;
 193        int npages_recv;
 194        u32 flags;
 195        u8 port;
 196        u8 state;
 197        bool is_kernel;
 198        struct mutex mutex; /* QP state mutex. */
 199        refcount_t refcnt;
 200        struct completion free;
 201};
 202
 203struct pvrdma_dev {
 204        /* PCI device-related information. */
 205        struct ib_device ib_dev;
 206        struct pci_dev *pdev;
 207        void __iomem *regs;
 208        struct pvrdma_device_shared_region *dsr; /* Shared region pointer */
 209        dma_addr_t dsrbase; /* Shared region base address */
 210        void *cmd_slot;
 211        void *resp_slot;
 212        unsigned long flags;
 213        struct list_head device_link;
 214        unsigned int dsr_version;
 215
 216        /* Locking and interrupt information. */
 217        spinlock_t cmd_lock; /* Command lock. */
 218        struct semaphore cmd_sema;
 219        struct completion cmd_done;
 220        unsigned int nr_vectors;
 221
 222        /* RDMA-related device information. */
 223        union ib_gid *sgid_tbl;
 224        struct pvrdma_ring_state *async_ring_state;
 225        struct pvrdma_page_dir async_pdir;
 226        struct pvrdma_ring_state *cq_ring_state;
 227        struct pvrdma_page_dir cq_pdir;
 228        struct pvrdma_cq **cq_tbl;
 229        spinlock_t cq_tbl_lock;
 230        struct pvrdma_srq **srq_tbl;
 231        spinlock_t srq_tbl_lock;
 232        struct pvrdma_qp **qp_tbl;
 233        spinlock_t qp_tbl_lock;
 234        struct pvrdma_uar_table uar_table;
 235        struct pvrdma_uar_map driver_uar;
 236        __be64 sys_image_guid;
 237        spinlock_t desc_lock; /* Device modification lock. */
 238        u32 port_cap_mask;
 239        struct mutex port_mutex; /* Port modification mutex. */
 240        bool ib_active;
 241        atomic_t num_qps;
 242        atomic_t num_cqs;
 243        atomic_t num_srqs;
 244        atomic_t num_pds;
 245        atomic_t num_ahs;
 246
 247        /* Network device information. */
 248        struct net_device *netdev;
 249        struct notifier_block nb_netdev;
 250};
 251
 252struct pvrdma_netdevice_work {
 253        struct work_struct work;
 254        struct net_device *event_netdev;
 255        unsigned long event;
 256};
 257
 258static inline struct pvrdma_dev *to_vdev(struct ib_device *ibdev)
 259{
 260        return container_of(ibdev, struct pvrdma_dev, ib_dev);
 261}
 262
 263static inline struct
 264pvrdma_ucontext *to_vucontext(struct ib_ucontext *ibucontext)
 265{
 266        return container_of(ibucontext, struct pvrdma_ucontext, ibucontext);
 267}
 268
 269static inline struct pvrdma_pd *to_vpd(struct ib_pd *ibpd)
 270{
 271        return container_of(ibpd, struct pvrdma_pd, ibpd);
 272}
 273
 274static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq)
 275{
 276        return container_of(ibcq, struct pvrdma_cq, ibcq);
 277}
 278
 279static inline struct pvrdma_srq *to_vsrq(struct ib_srq *ibsrq)
 280{
 281        return container_of(ibsrq, struct pvrdma_srq, ibsrq);
 282}
 283
 284static inline struct pvrdma_user_mr *to_vmr(struct ib_mr *ibmr)
 285{
 286        return container_of(ibmr, struct pvrdma_user_mr, ibmr);
 287}
 288
 289static inline struct pvrdma_qp *to_vqp(struct ib_qp *ibqp)
 290{
 291        return container_of(ibqp, struct pvrdma_qp, ibqp);
 292}
 293
 294static inline struct pvrdma_ah *to_vah(struct ib_ah *ibah)
 295{
 296        return container_of(ibah, struct pvrdma_ah, ibah);
 297}
 298
 299static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val)
 300{
 301        writel(cpu_to_le32(val), dev->regs + reg);
 302}
 303
 304static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg)
 305{
 306        return le32_to_cpu(readl(dev->regs + reg));
 307}
 308
 309static inline void pvrdma_write_uar_cq(struct pvrdma_dev *dev, u32 val)
 310{
 311        writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET);
 312}
 313
 314static inline void pvrdma_write_uar_qp(struct pvrdma_dev *dev, u32 val)
 315{
 316        writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET);
 317}
 318
 319static inline void *pvrdma_page_dir_get_ptr(struct pvrdma_page_dir *pdir,
 320                                            u64 offset)
 321{
 322        return pdir->pages[offset / PAGE_SIZE] + (offset % PAGE_SIZE);
 323}
 324
 325static inline enum pvrdma_mtu ib_mtu_to_pvrdma(enum ib_mtu mtu)
 326{
 327        return (enum pvrdma_mtu)mtu;
 328}
 329
 330static inline enum ib_mtu pvrdma_mtu_to_ib(enum pvrdma_mtu mtu)
 331{
 332        return (enum ib_mtu)mtu;
 333}
 334
 335static inline enum pvrdma_port_state ib_port_state_to_pvrdma(
 336                                        enum ib_port_state state)
 337{
 338        return (enum pvrdma_port_state)state;
 339}
 340
 341static inline enum ib_port_state pvrdma_port_state_to_ib(
 342                                        enum pvrdma_port_state state)
 343{
 344        return (enum ib_port_state)state;
 345}
 346
 347static inline int ib_port_cap_flags_to_pvrdma(int flags)
 348{
 349        return flags & PVRDMA_MASK(PVRDMA_PORT_CAP_FLAGS_MAX);
 350}
 351
 352static inline int pvrdma_port_cap_flags_to_ib(int flags)
 353{
 354        return flags;
 355}
 356
 357static inline enum pvrdma_port_width ib_port_width_to_pvrdma(
 358                                        enum ib_port_width width)
 359{
 360        return (enum pvrdma_port_width)width;
 361}
 362
 363static inline enum ib_port_width pvrdma_port_width_to_ib(
 364                                        enum pvrdma_port_width width)
 365{
 366        return (enum ib_port_width)width;
 367}
 368
 369static inline enum pvrdma_port_speed ib_port_speed_to_pvrdma(
 370                                        enum ib_port_speed speed)
 371{
 372        return (enum pvrdma_port_speed)speed;
 373}
 374
 375static inline enum ib_port_speed pvrdma_port_speed_to_ib(
 376                                        enum pvrdma_port_speed speed)
 377{
 378        return (enum ib_port_speed)speed;
 379}
 380
 381static inline int pvrdma_qp_attr_mask_to_ib(int attr_mask)
 382{
 383        return attr_mask;
 384}
 385
 386static inline int ib_qp_attr_mask_to_pvrdma(int attr_mask)
 387{
 388        return attr_mask & PVRDMA_MASK(PVRDMA_QP_ATTR_MASK_MAX);
 389}
 390
 391static inline enum pvrdma_mig_state ib_mig_state_to_pvrdma(
 392                                        enum ib_mig_state state)
 393{
 394        return (enum pvrdma_mig_state)state;
 395}
 396
 397static inline enum ib_mig_state pvrdma_mig_state_to_ib(
 398                                        enum pvrdma_mig_state state)
 399{
 400        return (enum ib_mig_state)state;
 401}
 402
 403static inline int ib_access_flags_to_pvrdma(int flags)
 404{
 405        return flags;
 406}
 407
 408static inline int pvrdma_access_flags_to_ib(int flags)
 409{
 410        return flags & PVRDMA_MASK(PVRDMA_ACCESS_FLAGS_MAX);
 411}
 412
 413static inline enum pvrdma_qp_type ib_qp_type_to_pvrdma(enum ib_qp_type type)
 414{
 415        return (enum pvrdma_qp_type)type;
 416}
 417
 418static inline enum ib_qp_type pvrdma_qp_type_to_ib(enum pvrdma_qp_type type)
 419{
 420        return (enum ib_qp_type)type;
 421}
 422
 423static inline enum pvrdma_qp_state ib_qp_state_to_pvrdma(enum ib_qp_state state)
 424{
 425        return (enum pvrdma_qp_state)state;
 426}
 427
 428static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
 429{
 430        return (enum ib_qp_state)state;
 431}
 432
 433static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
 434{
 435        return (enum pvrdma_wr_opcode)op;
 436}
 437
 438static inline enum ib_wc_status pvrdma_wc_status_to_ib(
 439                                        enum pvrdma_wc_status status)
 440{
 441        return (enum ib_wc_status)status;
 442}
 443
 444static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
 445{
 446        switch (opcode) {
 447        case PVRDMA_WC_SEND:
 448                return IB_WC_SEND;
 449        case PVRDMA_WC_RDMA_WRITE:
 450                return IB_WC_RDMA_WRITE;
 451        case PVRDMA_WC_RDMA_READ:
 452                return IB_WC_RDMA_READ;
 453        case PVRDMA_WC_COMP_SWAP:
 454                return IB_WC_COMP_SWAP;
 455        case PVRDMA_WC_FETCH_ADD:
 456                return IB_WC_FETCH_ADD;
 457        case PVRDMA_WC_LOCAL_INV:
 458                return IB_WC_LOCAL_INV;
 459        case PVRDMA_WC_FAST_REG_MR:
 460                return IB_WC_REG_MR;
 461        case PVRDMA_WC_MASKED_COMP_SWAP:
 462                return IB_WC_MASKED_COMP_SWAP;
 463        case PVRDMA_WC_MASKED_FETCH_ADD:
 464                return IB_WC_MASKED_FETCH_ADD;
 465        case PVRDMA_WC_RECV:
 466                return IB_WC_RECV;
 467        case PVRDMA_WC_RECV_RDMA_WITH_IMM:
 468                return IB_WC_RECV_RDMA_WITH_IMM;
 469        default:
 470                return IB_WC_SEND;
 471        }
 472}
 473
 474static inline int pvrdma_wc_flags_to_ib(int flags)
 475{
 476        return flags;
 477}
 478
 479static inline int ib_send_flags_to_pvrdma(int flags)
 480{
 481        return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
 482}
 483
 484void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
 485                         const struct pvrdma_qp_cap *src);
 486void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
 487                         const struct ib_qp_cap *src);
 488void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src);
 489void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src);
 490void pvrdma_global_route_to_ib(struct ib_global_route *dst,
 491                               const struct pvrdma_global_route *src);
 492void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
 493                               const struct ib_global_route *src);
 494void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst,
 495                            const struct pvrdma_ah_attr *src);
 496void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
 497                            const struct rdma_ah_attr *src);
 498u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type);
 499
 500int pvrdma_uar_table_init(struct pvrdma_dev *dev);
 501void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev);
 502
 503int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
 504void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar);
 505
 506void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq);
 507
 508int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
 509                         u64 npages, bool alloc_pages);
 510void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
 511                             struct pvrdma_page_dir *pdir);
 512int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
 513                               dma_addr_t daddr);
 514int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
 515                                struct ib_umem *umem, u64 offset);
 516dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx);
 517int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
 518                                     u64 *page_list, int num_pages);
 519
 520int pvrdma_cmd_post(struct pvrdma_dev *dev, union pvrdma_cmd_req *req,
 521                    union pvrdma_cmd_resp *rsp, unsigned resp_code);
 522
 523#endif /* __PVRDMA_H__ */
 524