linux/drivers/infiniband/hw/i40iw/i40iw_verbs.c
<<
>>
Prefs
   1/*******************************************************************************
   2*
   3* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
   4*
   5* This software is available to you under a choice of one of two
   6* licenses.  You may choose to be licensed under the terms of the GNU
   7* General Public License (GPL) Version 2, available from the file
   8* COPYING in the main directory of this source tree, or the
   9* OpenFabrics.org BSD license below:
  10*
  11*   Redistribution and use in source and binary forms, with or
  12*   without modification, are permitted provided that the following
  13*   conditions are met:
  14*
  15*    - Redistributions of source code must retain the above
  16*       copyright notice, this list of conditions and the following
  17*       disclaimer.
  18*
  19*    - Redistributions in binary form must reproduce the above
  20*       copyright notice, this list of conditions and the following
  21*       disclaimer in the documentation and/or other materials
  22*       provided with the distribution.
  23*
  24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31* SOFTWARE.
  32*
  33*******************************************************************************/
  34
  35#include <linux/module.h>
  36#include <linux/moduleparam.h>
  37#include <linux/random.h>
  38#include <linux/highmem.h>
  39#include <linux/time.h>
  40#include <linux/hugetlb.h>
  41#include <linux/irq.h>
  42#include <asm/byteorder.h>
  43#include <net/ip.h>
  44#include <rdma/ib_verbs.h>
  45#include <rdma/iw_cm.h>
  46#include <rdma/ib_user_verbs.h>
  47#include <rdma/ib_umem.h>
  48#include <rdma/uverbs_ioctl.h>
  49#include "i40iw.h"
  50
  51/**
  52 * i40iw_query_device - get device attributes
  53 * @ibdev: device pointer from stack
  54 * @props: returning device attributes
  55 * @udata: user data
  56 */
  57static int i40iw_query_device(struct ib_device *ibdev,
  58                              struct ib_device_attr *props,
  59                              struct ib_udata *udata)
  60{
  61        struct i40iw_device *iwdev = to_iwdev(ibdev);
  62
  63        if (udata->inlen || udata->outlen)
  64                return -EINVAL;
  65        memset(props, 0, sizeof(*props));
  66        ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
  67        props->fw_ver = I40IW_FW_VERSION;
  68        props->device_cap_flags = iwdev->device_cap_flags;
  69        props->vendor_id = iwdev->ldev->pcidev->vendor;
  70        props->vendor_part_id = iwdev->ldev->pcidev->device;
  71        props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
  72        props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
  73        props->max_qp = iwdev->max_qp - iwdev->used_qps;
  74        props->max_qp_wr = I40IW_MAX_QP_WRS;
  75        props->max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
  76        props->max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
  77        props->max_cq = iwdev->max_cq - iwdev->used_cqs;
  78        props->max_cqe = iwdev->max_cqe;
  79        props->max_mr = iwdev->max_mr - iwdev->used_mrs;
  80        props->max_pd = iwdev->max_pd - iwdev->used_pds;
  81        props->max_sge_rd = I40IW_MAX_SGE_RD;
  82        props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
  83        props->max_qp_init_rd_atom = props->max_qp_rd_atom;
  84        props->atomic_cap = IB_ATOMIC_NONE;
  85        props->max_map_per_fmr = 1;
  86        props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
  87        return 0;
  88}
  89
  90/**
  91 * i40iw_query_port - get port attrubutes
  92 * @ibdev: device pointer from stack
  93 * @port: port number for query
  94 * @props: returning device attributes
  95 */
  96static int i40iw_query_port(struct ib_device *ibdev,
  97                            u8 port,
  98                            struct ib_port_attr *props)
  99{
 100        struct i40iw_device *iwdev = to_iwdev(ibdev);
 101        struct net_device *netdev = iwdev->netdev;
 102
 103        /* props being zeroed by the caller, avoid zeroing it here */
 104        props->max_mtu = IB_MTU_4096;
 105        props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 106
 107        props->lid = 1;
 108        if (netif_carrier_ok(iwdev->netdev))
 109                props->state = IB_PORT_ACTIVE;
 110        else
 111                props->state = IB_PORT_DOWN;
 112        props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
 113                IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
 114        props->gid_tbl_len = 1;
 115        props->pkey_tbl_len = 1;
 116        props->active_width = IB_WIDTH_4X;
 117        props->active_speed = 1;
 118        props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
 119        return 0;
 120}
 121
 122/**
 123 * i40iw_alloc_ucontext - Allocate the user context data structure
 124 * @uctx: Uverbs context pointer from stack
 125 * @udata: user data
 126 *
 127 * This keeps track of all objects associated with a particular
 128 * user-mode client.
 129 */
 130static int i40iw_alloc_ucontext(struct ib_ucontext *uctx,
 131                                struct ib_udata *udata)
 132{
 133        struct ib_device *ibdev = uctx->device;
 134        struct i40iw_device *iwdev = to_iwdev(ibdev);
 135        struct i40iw_alloc_ucontext_req req;
 136        struct i40iw_alloc_ucontext_resp uresp = {};
 137        struct i40iw_ucontext *ucontext = to_ucontext(uctx);
 138
 139        if (ib_copy_from_udata(&req, udata, sizeof(req)))
 140                return -EINVAL;
 141
 142        if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
 143                i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
 144                return -EINVAL;
 145        }
 146
 147        uresp.max_qps = iwdev->max_qp;
 148        uresp.max_pds = iwdev->max_pd;
 149        uresp.wq_size = iwdev->max_qp_wr * 2;
 150        uresp.kernel_ver = req.userspace_ver;
 151
 152        ucontext->iwdev = iwdev;
 153        ucontext->abi_ver = req.userspace_ver;
 154
 155        if (ib_copy_to_udata(udata, &uresp, sizeof(uresp)))
 156                return -EFAULT;
 157
 158        INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
 159        spin_lock_init(&ucontext->cq_reg_mem_list_lock);
 160        INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
 161        spin_lock_init(&ucontext->qp_reg_mem_list_lock);
 162
 163        return 0;
 164}
 165
 166/**
 167 * i40iw_dealloc_ucontext - deallocate the user context data structure
 168 * @context: user context created during alloc
 169 */
 170static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
 171{
 172        return;
 173}
 174
 175/**
 176 * i40iw_mmap - user memory map
 177 * @context: context created during alloc
 178 * @vma: kernel info for user memory map
 179 */
 180static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
 181{
 182        struct i40iw_ucontext *ucontext;
 183        u64 db_addr_offset;
 184        u64 push_offset;
 185
 186        ucontext = to_ucontext(context);
 187        if (ucontext->iwdev->sc_dev.is_pf) {
 188                db_addr_offset = I40IW_DB_ADDR_OFFSET;
 189                push_offset = I40IW_PUSH_OFFSET;
 190                if (vma->vm_pgoff)
 191                        vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
 192        } else {
 193                db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
 194                push_offset = I40IW_VF_PUSH_OFFSET;
 195                if (vma->vm_pgoff)
 196                        vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
 197        }
 198
 199        vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
 200
 201        if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
 202                vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 203                vma->vm_private_data = ucontext;
 204        } else {
 205                if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
 206                        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 207                else
 208                        vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 209        }
 210
 211        if (io_remap_pfn_range(vma, vma->vm_start,
 212                               vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
 213                               PAGE_SIZE, vma->vm_page_prot))
 214                return -EAGAIN;
 215
 216        return 0;
 217}
 218
 219/**
 220 * i40iw_alloc_push_page - allocate a push page for qp
 221 * @iwdev: iwarp device
 222 * @qp: hardware control qp
 223 */
 224static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
 225{
 226        struct i40iw_cqp_request *cqp_request;
 227        struct cqp_commands_info *cqp_info;
 228        enum i40iw_status_code status;
 229
 230        if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
 231                return;
 232
 233        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
 234        if (!cqp_request)
 235                return;
 236
 237        atomic_inc(&cqp_request->refcount);
 238
 239        cqp_info = &cqp_request->info;
 240        cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
 241        cqp_info->post_sq = 1;
 242
 243        cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
 244        cqp_info->in.u.manage_push_page.info.free_page = 0;
 245        cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
 246        cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
 247
 248        status = i40iw_handle_cqp_op(iwdev, cqp_request);
 249        if (!status)
 250                qp->push_idx = cqp_request->compl_info.op_ret_val;
 251        else
 252                i40iw_pr_err("CQP-OP Push page fail");
 253        i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
 254}
 255
 256/**
 257 * i40iw_dealloc_push_page - free a push page for qp
 258 * @iwdev: iwarp device
 259 * @qp: hardware control qp
 260 */
 261static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
 262{
 263        struct i40iw_cqp_request *cqp_request;
 264        struct cqp_commands_info *cqp_info;
 265        enum i40iw_status_code status;
 266
 267        if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
 268                return;
 269
 270        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
 271        if (!cqp_request)
 272                return;
 273
 274        cqp_info = &cqp_request->info;
 275        cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
 276        cqp_info->post_sq = 1;
 277
 278        cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
 279        cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
 280        cqp_info->in.u.manage_push_page.info.free_page = 1;
 281        cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
 282        cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
 283
 284        status = i40iw_handle_cqp_op(iwdev, cqp_request);
 285        if (!status)
 286                qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
 287        else
 288                i40iw_pr_err("CQP-OP Push page fail");
 289}
 290
 291/**
 292 * i40iw_alloc_pd - allocate protection domain
 293 * @pd: PD pointer
 294 * @udata: user data
 295 */
 296static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
 297{
 298        struct i40iw_pd *iwpd = to_iwpd(pd);
 299        struct i40iw_device *iwdev = to_iwdev(pd->device);
 300        struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 301        struct i40iw_alloc_pd_resp uresp;
 302        struct i40iw_sc_pd *sc_pd;
 303        u32 pd_id = 0;
 304        int err;
 305
 306        if (iwdev->closing)
 307                return -ENODEV;
 308
 309        err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
 310                                   iwdev->max_pd, &pd_id, &iwdev->next_pd);
 311        if (err) {
 312                i40iw_pr_err("alloc resource failed\n");
 313                return err;
 314        }
 315
 316        sc_pd = &iwpd->sc_pd;
 317
 318        if (udata) {
 319                struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
 320                        udata, struct i40iw_ucontext, ibucontext);
 321                dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
 322                memset(&uresp, 0, sizeof(uresp));
 323                uresp.pd_id = pd_id;
 324                if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
 325                        err = -EFAULT;
 326                        goto error;
 327                }
 328        } else {
 329                dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
 330        }
 331
 332        i40iw_add_pdusecount(iwpd);
 333        return 0;
 334
 335error:
 336        i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
 337        return err;
 338}
 339
 340/**
 341 * i40iw_dealloc_pd - deallocate pd
 342 * @ibpd: ptr of pd to be deallocated
 343 * @udata: user data or null for kernel object
 344 */
 345static void i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 346{
 347        struct i40iw_pd *iwpd = to_iwpd(ibpd);
 348        struct i40iw_device *iwdev = to_iwdev(ibpd->device);
 349
 350        i40iw_rem_pdusecount(iwpd, iwdev);
 351}
 352
 353/**
 354 * i40iw_get_pbl - Retrieve pbl from a list given a virtual
 355 * address
 356 * @va: user virtual address
 357 * @pbl_list: pbl list to search in (QP's or CQ's)
 358 */
 359static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
 360                                       struct list_head *pbl_list)
 361{
 362        struct i40iw_pbl *iwpbl;
 363
 364        list_for_each_entry(iwpbl, pbl_list, list) {
 365                if (iwpbl->user_base == va) {
 366                        iwpbl->on_list = false;
 367                        list_del(&iwpbl->list);
 368                        return iwpbl;
 369                }
 370        }
 371        return NULL;
 372}
 373
 374/**
 375 * i40iw_free_qp_resources - free up memory resources for qp
 376 * @iwdev: iwarp device
 377 * @iwqp: qp ptr (user or kernel)
 378 * @qp_num: qp number assigned
 379 */
 380void i40iw_free_qp_resources(struct i40iw_device *iwdev,
 381                             struct i40iw_qp *iwqp,
 382                             u32 qp_num)
 383{
 384        struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
 385
 386        i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
 387        i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
 388        if (qp_num)
 389                i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
 390        if (iwpbl->pbl_allocated)
 391                i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
 392        i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
 393        i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
 394        kfree(iwqp->kqp.wrid_mem);
 395        iwqp->kqp.wrid_mem = NULL;
 396        kfree(iwqp->allocated_buffer);
 397}
 398
 399/**
 400 * i40iw_clean_cqes - clean cq entries for qp
 401 * @iwqp: qp ptr (user or kernel)
 402 * @iwcq: cq ptr
 403 */
 404static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
 405{
 406        struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
 407
 408        ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
 409}
 410
 411/**
 412 * i40iw_destroy_qp - destroy qp
 413 * @ibqp: qp's ib pointer also to get to device's qp address
 414 */
 415static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
 416{
 417        struct i40iw_qp *iwqp = to_iwqp(ibqp);
 418
 419        iwqp->destroyed = 1;
 420
 421        if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
 422                i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
 423
 424        if (!iwqp->user_mode) {
 425                if (iwqp->iwscq) {
 426                        i40iw_clean_cqes(iwqp, iwqp->iwscq);
 427                        if (iwqp->iwrcq != iwqp->iwscq)
 428                                i40iw_clean_cqes(iwqp, iwqp->iwrcq);
 429                }
 430        }
 431
 432        i40iw_rem_ref(&iwqp->ibqp);
 433        return 0;
 434}
 435
 436/**
 437 * i40iw_setup_virt_qp - setup for allocation of virtual qp
 438 * @dev: iwarp device
 439 * @qp: qp ptr
 440 * @init_info: initialize info to return
 441 */
 442static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
 443                               struct i40iw_qp *iwqp,
 444                               struct i40iw_qp_init_info *init_info)
 445{
 446        struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
 447        struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
 448
 449        iwqp->page = qpmr->sq_page;
 450        init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
 451        if (iwpbl->pbl_allocated) {
 452                init_info->virtual_map = true;
 453                init_info->sq_pa = qpmr->sq_pbl.idx;
 454                init_info->rq_pa = qpmr->rq_pbl.idx;
 455        } else {
 456                init_info->sq_pa = qpmr->sq_pbl.addr;
 457                init_info->rq_pa = qpmr->rq_pbl.addr;
 458        }
 459        return 0;
 460}
 461
 462/**
 463 * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
 464 * @iwdev: iwarp device
 465 * @iwqp: qp ptr (user or kernel)
 466 * @info: initialize info to return
 467 */
 468static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
 469                                struct i40iw_qp *iwqp,
 470                                struct i40iw_qp_init_info *info)
 471{
 472        struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
 473        u32 sqdepth, rqdepth;
 474        u8 sqshift;
 475        u32 size;
 476        enum i40iw_status_code status;
 477        struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
 478
 479        i40iw_get_wqe_shift(ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
 480        status = i40iw_get_sqdepth(ukinfo->sq_size, sqshift, &sqdepth);
 481        if (status)
 482                return -ENOMEM;
 483
 484        status = i40iw_get_rqdepth(ukinfo->rq_size, I40IW_MAX_RQ_WQE_SHIFT, &rqdepth);
 485        if (status)
 486                return -ENOMEM;
 487
 488        size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
 489        iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
 490
 491        ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
 492        if (!ukinfo->sq_wrtrk_array)
 493                return -ENOMEM;
 494
 495        ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
 496
 497        size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
 498        size += (I40IW_SHADOW_AREA_SIZE << 3);
 499
 500        status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
 501        if (status) {
 502                kfree(ukinfo->sq_wrtrk_array);
 503                ukinfo->sq_wrtrk_array = NULL;
 504                return -ENOMEM;
 505        }
 506
 507        ukinfo->sq = mem->va;
 508        info->sq_pa = mem->pa;
 509
 510        ukinfo->rq = &ukinfo->sq[sqdepth];
 511        info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
 512
 513        ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
 514        info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
 515
 516        ukinfo->sq_size = sqdepth >> sqshift;
 517        ukinfo->rq_size = rqdepth >> I40IW_MAX_RQ_WQE_SHIFT;
 518        ukinfo->qp_id = iwqp->ibqp.qp_num;
 519        return 0;
 520}
 521
 522/**
 523 * i40iw_create_qp - create qp
 524 * @ibpd: ptr of pd
 525 * @init_attr: attributes for qp
 526 * @udata: user data for create qp
 527 */
 528static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
 529                                     struct ib_qp_init_attr *init_attr,
 530                                     struct ib_udata *udata)
 531{
 532        struct i40iw_pd *iwpd = to_iwpd(ibpd);
 533        struct i40iw_device *iwdev = to_iwdev(ibpd->device);
 534        struct i40iw_cqp *iwcqp = &iwdev->cqp;
 535        struct i40iw_qp *iwqp;
 536        struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
 537                udata, struct i40iw_ucontext, ibucontext);
 538        struct i40iw_create_qp_req req;
 539        struct i40iw_create_qp_resp uresp;
 540        u32 qp_num = 0;
 541        void *mem;
 542        enum i40iw_status_code ret;
 543        int err_code;
 544        int sq_size;
 545        int rq_size;
 546        struct i40iw_sc_qp *qp;
 547        struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 548        struct i40iw_qp_init_info init_info;
 549        struct i40iw_create_qp_info *qp_info;
 550        struct i40iw_cqp_request *cqp_request;
 551        struct cqp_commands_info *cqp_info;
 552
 553        struct i40iw_qp_host_ctx_info *ctx_info;
 554        struct i40iwarp_offload_info *iwarp_info;
 555        unsigned long flags;
 556
 557        if (iwdev->closing)
 558                return ERR_PTR(-ENODEV);
 559
 560        if (init_attr->create_flags)
 561                return ERR_PTR(-EINVAL);
 562        if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
 563                init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
 564
 565        if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
 566                init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
 567
 568        if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
 569                init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
 570
 571        memset(&init_info, 0, sizeof(init_info));
 572
 573        sq_size = init_attr->cap.max_send_wr;
 574        rq_size = init_attr->cap.max_recv_wr;
 575
 576        init_info.vsi = &iwdev->vsi;
 577        init_info.qp_uk_init_info.sq_size = sq_size;
 578        init_info.qp_uk_init_info.rq_size = rq_size;
 579        init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
 580        init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
 581        init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
 582
 583        mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
 584        if (!mem)
 585                return ERR_PTR(-ENOMEM);
 586
 587        iwqp = (struct i40iw_qp *)mem;
 588        iwqp->allocated_buffer = mem;
 589        qp = &iwqp->sc_qp;
 590        qp->back_qp = (void *)iwqp;
 591        qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
 592
 593        iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
 594
 595        if (i40iw_allocate_dma_mem(dev->hw,
 596                                   &iwqp->q2_ctx_mem,
 597                                   I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
 598                                   256)) {
 599                i40iw_pr_err("dma_mem failed\n");
 600                err_code = -ENOMEM;
 601                goto error;
 602        }
 603
 604        init_info.q2 = iwqp->q2_ctx_mem.va;
 605        init_info.q2_pa = iwqp->q2_ctx_mem.pa;
 606
 607        init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
 608        init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
 609
 610        err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
 611                                        &qp_num, &iwdev->next_qp);
 612        if (err_code) {
 613                i40iw_pr_err("qp resource\n");
 614                goto error;
 615        }
 616
 617        iwqp->iwdev = iwdev;
 618        iwqp->iwpd = iwpd;
 619        iwqp->ibqp.qp_num = qp_num;
 620        qp = &iwqp->sc_qp;
 621        iwqp->iwscq = to_iwcq(init_attr->send_cq);
 622        iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
 623
 624        iwqp->host_ctx.va = init_info.host_ctx;
 625        iwqp->host_ctx.pa = init_info.host_ctx_pa;
 626        iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
 627
 628        init_info.pd = &iwpd->sc_pd;
 629        init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
 630        iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
 631
 632        if (init_attr->qp_type != IB_QPT_RC) {
 633                err_code = -EINVAL;
 634                goto error;
 635        }
 636        if (iwdev->push_mode)
 637                i40iw_alloc_push_page(iwdev, qp);
 638        if (udata) {
 639                err_code = ib_copy_from_udata(&req, udata, sizeof(req));
 640                if (err_code) {
 641                        i40iw_pr_err("ib_copy_from_data\n");
 642                        goto error;
 643                }
 644                iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
 645                iwqp->user_mode = 1;
 646
 647                if (req.user_wqe_buffers) {
 648                        struct i40iw_pbl *iwpbl;
 649
 650                        spin_lock_irqsave(
 651                            &ucontext->qp_reg_mem_list_lock, flags);
 652                        iwpbl = i40iw_get_pbl(
 653                            (unsigned long)req.user_wqe_buffers,
 654                            &ucontext->qp_reg_mem_list);
 655                        spin_unlock_irqrestore(
 656                            &ucontext->qp_reg_mem_list_lock, flags);
 657
 658                        if (!iwpbl) {
 659                                err_code = -ENODATA;
 660                                i40iw_pr_err("no pbl info\n");
 661                                goto error;
 662                        }
 663                        memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
 664                }
 665                err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
 666        } else {
 667                err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
 668        }
 669
 670        if (err_code) {
 671                i40iw_pr_err("setup qp failed\n");
 672                goto error;
 673        }
 674
 675        init_info.type = I40IW_QP_TYPE_IWARP;
 676        ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
 677        if (ret) {
 678                err_code = -EPROTO;
 679                i40iw_pr_err("qp_init fail\n");
 680                goto error;
 681        }
 682        ctx_info = &iwqp->ctx_info;
 683        iwarp_info = &iwqp->iwarp_info;
 684        iwarp_info->rd_enable = true;
 685        iwarp_info->wr_rdresp_en = true;
 686        if (!iwqp->user_mode) {
 687                iwarp_info->fast_reg_en = true;
 688                iwarp_info->priv_mode_en = true;
 689        }
 690        iwarp_info->ddp_ver = 1;
 691        iwarp_info->rdmap_ver = 1;
 692
 693        ctx_info->iwarp_info_valid = true;
 694        ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
 695        ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
 696        if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
 697                ctx_info->push_mode_en = false;
 698        } else {
 699                ctx_info->push_mode_en = true;
 700                ctx_info->push_idx = qp->push_idx;
 701        }
 702
 703        ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
 704                                             (u64 *)iwqp->host_ctx.va,
 705                                             ctx_info);
 706        ctx_info->iwarp_info_valid = false;
 707        cqp_request = i40iw_get_cqp_request(iwcqp, true);
 708        if (!cqp_request) {
 709                err_code = -ENOMEM;
 710                goto error;
 711        }
 712        cqp_info = &cqp_request->info;
 713        qp_info = &cqp_request->info.in.u.qp_create.info;
 714
 715        memset(qp_info, 0, sizeof(*qp_info));
 716
 717        qp_info->cq_num_valid = true;
 718        qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
 719
 720        cqp_info->cqp_cmd = OP_QP_CREATE;
 721        cqp_info->post_sq = 1;
 722        cqp_info->in.u.qp_create.qp = qp;
 723        cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
 724        ret = i40iw_handle_cqp_op(iwdev, cqp_request);
 725        if (ret) {
 726                i40iw_pr_err("CQP-OP QP create fail");
 727                err_code = -EACCES;
 728                goto error;
 729        }
 730
 731        i40iw_add_ref(&iwqp->ibqp);
 732        spin_lock_init(&iwqp->lock);
 733        iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
 734        iwdev->qp_table[qp_num] = iwqp;
 735        i40iw_add_pdusecount(iwqp->iwpd);
 736        i40iw_add_devusecount(iwdev);
 737        if (udata) {
 738                memset(&uresp, 0, sizeof(uresp));
 739                uresp.actual_sq_size = sq_size;
 740                uresp.actual_rq_size = rq_size;
 741                uresp.qp_id = qp_num;
 742                uresp.push_idx = qp->push_idx;
 743                err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
 744                if (err_code) {
 745                        i40iw_pr_err("copy_to_udata failed\n");
 746                        i40iw_destroy_qp(&iwqp->ibqp, udata);
 747                        /* let the completion of the qp destroy free the qp */
 748                        return ERR_PTR(err_code);
 749                }
 750        }
 751        init_completion(&iwqp->sq_drained);
 752        init_completion(&iwqp->rq_drained);
 753
 754        return &iwqp->ibqp;
 755error:
 756        i40iw_free_qp_resources(iwdev, iwqp, qp_num);
 757        return ERR_PTR(err_code);
 758}
 759
 760/**
 761 * i40iw_query - query qp attributes
 762 * @ibqp: qp pointer
 763 * @attr: attributes pointer
 764 * @attr_mask: Not used
 765 * @init_attr: qp attributes to return
 766 */
 767static int i40iw_query_qp(struct ib_qp *ibqp,
 768                          struct ib_qp_attr *attr,
 769                          int attr_mask,
 770                          struct ib_qp_init_attr *init_attr)
 771{
 772        struct i40iw_qp *iwqp = to_iwqp(ibqp);
 773        struct i40iw_sc_qp *qp = &iwqp->sc_qp;
 774
 775        attr->qp_state = iwqp->ibqp_state;
 776        attr->cur_qp_state = attr->qp_state;
 777        attr->qp_access_flags = 0;
 778        attr->cap.max_send_wr = qp->qp_uk.sq_size;
 779        attr->cap.max_recv_wr = qp->qp_uk.rq_size;
 780        attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
 781        attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
 782        attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
 783        attr->port_num = 1;
 784        init_attr->event_handler = iwqp->ibqp.event_handler;
 785        init_attr->qp_context = iwqp->ibqp.qp_context;
 786        init_attr->send_cq = iwqp->ibqp.send_cq;
 787        init_attr->recv_cq = iwqp->ibqp.recv_cq;
 788        init_attr->srq = iwqp->ibqp.srq;
 789        init_attr->cap = attr->cap;
 790        init_attr->port_num = 1;
 791        return 0;
 792}
 793
 794/**
 795 * i40iw_hw_modify_qp - setup cqp for modify qp
 796 * @iwdev: iwarp device
 797 * @iwqp: qp ptr (user or kernel)
 798 * @info: info for modify qp
 799 * @wait: flag to wait or not for modify qp completion
 800 */
 801void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
 802                        struct i40iw_modify_qp_info *info, bool wait)
 803{
 804        struct i40iw_cqp_request *cqp_request;
 805        struct cqp_commands_info *cqp_info;
 806        struct i40iw_modify_qp_info *m_info;
 807        struct i40iw_gen_ae_info ae_info;
 808
 809        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
 810        if (!cqp_request)
 811                return;
 812
 813        cqp_info = &cqp_request->info;
 814        m_info = &cqp_info->in.u.qp_modify.info;
 815        memcpy(m_info, info, sizeof(*m_info));
 816        cqp_info->cqp_cmd = OP_QP_MODIFY;
 817        cqp_info->post_sq = 1;
 818        cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
 819        cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
 820        if (!i40iw_handle_cqp_op(iwdev, cqp_request))
 821                return;
 822
 823        switch (m_info->next_iwarp_state) {
 824        case I40IW_QP_STATE_RTS:
 825                if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE)
 826                        i40iw_send_reset(iwqp->cm_node);
 827                /* fall through */
 828        case I40IW_QP_STATE_IDLE:
 829        case I40IW_QP_STATE_TERMINATE:
 830        case I40IW_QP_STATE_CLOSING:
 831                ae_info.ae_code = I40IW_AE_BAD_CLOSE;
 832                ae_info.ae_source = 0;
 833                i40iw_gen_ae(iwdev, &iwqp->sc_qp, &ae_info, false);
 834                break;
 835        case I40IW_QP_STATE_ERROR:
 836        default:
 837                break;
 838        }
 839}
 840
 841/**
 842 * i40iw_modify_qp - modify qp request
 843 * @ibqp: qp's pointer for modify
 844 * @attr: access attributes
 845 * @attr_mask: state mask
 846 * @udata: user data
 847 */
 848int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 849                    int attr_mask, struct ib_udata *udata)
 850{
 851        struct i40iw_qp *iwqp = to_iwqp(ibqp);
 852        struct i40iw_device *iwdev = iwqp->iwdev;
 853        struct i40iw_qp_host_ctx_info *ctx_info;
 854        struct i40iwarp_offload_info *iwarp_info;
 855        struct i40iw_modify_qp_info info;
 856        u8 issue_modify_qp = 0;
 857        u8 dont_wait = 0;
 858        u32 err;
 859        unsigned long flags;
 860
 861        memset(&info, 0, sizeof(info));
 862        ctx_info = &iwqp->ctx_info;
 863        iwarp_info = &iwqp->iwarp_info;
 864
 865        spin_lock_irqsave(&iwqp->lock, flags);
 866
 867        if (attr_mask & IB_QP_STATE) {
 868                if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
 869                        err = -EINVAL;
 870                        goto exit;
 871                }
 872
 873                switch (attr->qp_state) {
 874                case IB_QPS_INIT:
 875                case IB_QPS_RTR:
 876                        if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
 877                                err = -EINVAL;
 878                                goto exit;
 879                        }
 880                        if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
 881                                info.next_iwarp_state = I40IW_QP_STATE_IDLE;
 882                                issue_modify_qp = 1;
 883                        }
 884                        break;
 885                case IB_QPS_RTS:
 886                        if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
 887                            (!iwqp->cm_id)) {
 888                                err = -EINVAL;
 889                                goto exit;
 890                        }
 891
 892                        issue_modify_qp = 1;
 893                        iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
 894                        iwqp->hte_added = 1;
 895                        info.next_iwarp_state = I40IW_QP_STATE_RTS;
 896                        info.tcp_ctx_valid = true;
 897                        info.ord_valid = true;
 898                        info.arp_cache_idx_valid = true;
 899                        info.cq_num_valid = true;
 900                        break;
 901                case IB_QPS_SQD:
 902                        if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
 903                                err = 0;
 904                                goto exit;
 905                        }
 906                        if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
 907                            (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
 908                                err = 0;
 909                                goto exit;
 910                        }
 911                        if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
 912                                err = -EINVAL;
 913                                goto exit;
 914                        }
 915                        info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
 916                        issue_modify_qp = 1;
 917                        break;
 918                case IB_QPS_SQE:
 919                        if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
 920                                err = -EINVAL;
 921                                goto exit;
 922                        }
 923                        info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
 924                        issue_modify_qp = 1;
 925                        break;
 926                case IB_QPS_ERR:
 927                case IB_QPS_RESET:
 928                        if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
 929                                err = -EINVAL;
 930                                goto exit;
 931                        }
 932                        if (iwqp->sc_qp.term_flags)
 933                                i40iw_terminate_del_timer(&iwqp->sc_qp);
 934                        info.next_iwarp_state = I40IW_QP_STATE_ERROR;
 935                        if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
 936                            iwdev->iw_status &&
 937                            (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
 938                                info.reset_tcp_conn = true;
 939                        else
 940                                dont_wait = 1;
 941                        issue_modify_qp = 1;
 942                        info.next_iwarp_state = I40IW_QP_STATE_ERROR;
 943                        break;
 944                default:
 945                        err = -EINVAL;
 946                        goto exit;
 947                }
 948
 949                iwqp->ibqp_state = attr->qp_state;
 950
 951        }
 952        if (attr_mask & IB_QP_ACCESS_FLAGS) {
 953                ctx_info->iwarp_info_valid = true;
 954                if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
 955                        iwarp_info->wr_rdresp_en = true;
 956                if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
 957                        iwarp_info->wr_rdresp_en = true;
 958                if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
 959                        iwarp_info->rd_enable = true;
 960                if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
 961                        iwarp_info->bind_en = true;
 962
 963                if (iwqp->user_mode) {
 964                        iwarp_info->rd_enable = true;
 965                        iwarp_info->wr_rdresp_en = true;
 966                        iwarp_info->priv_mode_en = false;
 967                }
 968        }
 969
 970        if (ctx_info->iwarp_info_valid) {
 971                struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 972                int ret;
 973
 974                ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
 975                ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
 976                ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
 977                                                     (u64 *)iwqp->host_ctx.va,
 978                                                     ctx_info);
 979                if (ret) {
 980                        i40iw_pr_err("setting QP context\n");
 981                        err = -EINVAL;
 982                        goto exit;
 983                }
 984        }
 985
 986        spin_unlock_irqrestore(&iwqp->lock, flags);
 987
 988        if (issue_modify_qp) {
 989                i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
 990
 991                spin_lock_irqsave(&iwqp->lock, flags);
 992                iwqp->iwarp_state = info.next_iwarp_state;
 993                spin_unlock_irqrestore(&iwqp->lock, flags);
 994        }
 995
 996        if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
 997                if (dont_wait) {
 998                        if (iwqp->cm_id && iwqp->hw_tcp_state) {
 999                                spin_lock_irqsave(&iwqp->lock, flags);
1000                                iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1001                                iwqp->last_aeq = I40IW_AE_RESET_SENT;
1002                                spin_unlock_irqrestore(&iwqp->lock, flags);
1003                                i40iw_cm_disconn(iwqp);
1004                        }
1005                } else {
1006                        spin_lock_irqsave(&iwqp->lock, flags);
1007                        if (iwqp->cm_id) {
1008                                if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
1009                                        iwqp->cm_id->add_ref(iwqp->cm_id);
1010                                        i40iw_schedule_cm_timer(iwqp->cm_node,
1011                                                                (struct i40iw_puda_buf *)iwqp,
1012                                                                 I40IW_TIMER_TYPE_CLOSE, 1, 0);
1013                                }
1014                        }
1015                        spin_unlock_irqrestore(&iwqp->lock, flags);
1016                }
1017        }
1018        return 0;
1019exit:
1020        spin_unlock_irqrestore(&iwqp->lock, flags);
1021        return err;
1022}
1023
1024/**
1025 * cq_free_resources - free up recources for cq
1026 * @iwdev: iwarp device
1027 * @iwcq: cq ptr
1028 */
1029static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
1030{
1031        struct i40iw_sc_cq *cq = &iwcq->sc_cq;
1032
1033        if (!iwcq->user_mode)
1034                i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
1035        i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
1036}
1037
1038/**
1039 * i40iw_cq_wq_destroy - send cq destroy cqp
1040 * @iwdev: iwarp device
1041 * @cq: hardware control cq
1042 */
1043void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
1044{
1045        enum i40iw_status_code status;
1046        struct i40iw_cqp_request *cqp_request;
1047        struct cqp_commands_info *cqp_info;
1048
1049        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1050        if (!cqp_request)
1051                return;
1052
1053        cqp_info = &cqp_request->info;
1054
1055        cqp_info->cqp_cmd = OP_CQ_DESTROY;
1056        cqp_info->post_sq = 1;
1057        cqp_info->in.u.cq_destroy.cq = cq;
1058        cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1059        status = i40iw_handle_cqp_op(iwdev, cqp_request);
1060        if (status)
1061                i40iw_pr_err("CQP-OP Destroy QP fail");
1062}
1063
1064/**
1065 * i40iw_destroy_cq - destroy cq
1066 * @ib_cq: cq pointer
1067 * @udata: user data or NULL for kernel object
1068 */
1069static void i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1070{
1071        struct i40iw_cq *iwcq;
1072        struct i40iw_device *iwdev;
1073        struct i40iw_sc_cq *cq;
1074
1075        iwcq = to_iwcq(ib_cq);
1076        iwdev = to_iwdev(ib_cq->device);
1077        cq = &iwcq->sc_cq;
1078        i40iw_cq_wq_destroy(iwdev, cq);
1079        cq_free_resources(iwdev, iwcq);
1080        i40iw_rem_devusecount(iwdev);
1081}
1082
1083/**
1084 * i40iw_create_cq - create cq
1085 * @ibcq: CQ allocated
1086 * @attr: attributes for cq
1087 * @udata: user data
1088 */
1089static int i40iw_create_cq(struct ib_cq *ibcq,
1090                           const struct ib_cq_init_attr *attr,
1091                           struct ib_udata *udata)
1092{
1093        struct ib_device *ibdev = ibcq->device;
1094        struct i40iw_device *iwdev = to_iwdev(ibdev);
1095        struct i40iw_cq *iwcq = to_iwcq(ibcq);
1096        struct i40iw_pbl *iwpbl;
1097        u32 cq_num = 0;
1098        struct i40iw_sc_cq *cq;
1099        struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1100        struct i40iw_cq_init_info info = {};
1101        enum i40iw_status_code status;
1102        struct i40iw_cqp_request *cqp_request;
1103        struct cqp_commands_info *cqp_info;
1104        struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1105        unsigned long flags;
1106        int err_code;
1107        int entries = attr->cqe;
1108
1109        if (iwdev->closing)
1110                return -ENODEV;
1111
1112        if (entries > iwdev->max_cqe)
1113                return -EINVAL;
1114
1115        err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
1116                                        iwdev->max_cq, &cq_num,
1117                                        &iwdev->next_cq);
1118        if (err_code)
1119                return err_code;
1120
1121        cq = &iwcq->sc_cq;
1122        cq->back_cq = (void *)iwcq;
1123        spin_lock_init(&iwcq->lock);
1124
1125        info.dev = dev;
1126        ukinfo->cq_size = max(entries, 4);
1127        ukinfo->cq_id = cq_num;
1128        iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1129        info.ceqe_mask = 0;
1130        if (attr->comp_vector < iwdev->ceqs_count)
1131                info.ceq_id = attr->comp_vector;
1132        info.ceq_id_valid = true;
1133        info.ceqe_mask = 1;
1134        info.type = I40IW_CQ_TYPE_IWARP;
1135        if (udata) {
1136                struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
1137                        udata, struct i40iw_ucontext, ibucontext);
1138                struct i40iw_create_cq_req req;
1139                struct i40iw_cq_mr *cqmr;
1140
1141                memset(&req, 0, sizeof(req));
1142                iwcq->user_mode = true;
1143                if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
1144                        err_code = -EFAULT;
1145                        goto cq_free_resources;
1146                }
1147
1148                spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1149                iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
1150                                      &ucontext->cq_reg_mem_list);
1151                spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1152                if (!iwpbl) {
1153                        err_code = -EPROTO;
1154                        goto cq_free_resources;
1155                }
1156
1157                iwcq->iwpbl = iwpbl;
1158                iwcq->cq_mem_size = 0;
1159                cqmr = &iwpbl->cq_mr;
1160                info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
1161                if (iwpbl->pbl_allocated) {
1162                        info.virtual_map = true;
1163                        info.pbl_chunk_size = 1;
1164                        info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1165                } else {
1166                        info.cq_base_pa = cqmr->cq_pbl.addr;
1167                }
1168        } else {
1169                /* Kmode allocations */
1170                int rsize;
1171                int shadow;
1172
1173                rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
1174                rsize = round_up(rsize, 256);
1175                shadow = I40IW_SHADOW_AREA_SIZE << 3;
1176                status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
1177                                                rsize + shadow, 256);
1178                if (status) {
1179                        err_code = -ENOMEM;
1180                        goto cq_free_resources;
1181                }
1182                ukinfo->cq_base = iwcq->kmem.va;
1183                info.cq_base_pa = iwcq->kmem.pa;
1184                info.shadow_area_pa = info.cq_base_pa + rsize;
1185                ukinfo->shadow_area = iwcq->kmem.va + rsize;
1186        }
1187
1188        if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
1189                i40iw_pr_err("init cq fail\n");
1190                err_code = -EPROTO;
1191                goto cq_free_resources;
1192        }
1193
1194        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1195        if (!cqp_request) {
1196                err_code = -ENOMEM;
1197                goto cq_free_resources;
1198        }
1199
1200        cqp_info = &cqp_request->info;
1201        cqp_info->cqp_cmd = OP_CQ_CREATE;
1202        cqp_info->post_sq = 1;
1203        cqp_info->in.u.cq_create.cq = cq;
1204        cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1205        status = i40iw_handle_cqp_op(iwdev, cqp_request);
1206        if (status) {
1207                i40iw_pr_err("CQP-OP Create QP fail");
1208                err_code = -EPROTO;
1209                goto cq_free_resources;
1210        }
1211
1212        if (udata) {
1213                struct i40iw_create_cq_resp resp;
1214
1215                memset(&resp, 0, sizeof(resp));
1216                resp.cq_id = info.cq_uk_init_info.cq_id;
1217                resp.cq_size = info.cq_uk_init_info.cq_size;
1218                if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1219                        i40iw_pr_err("copy to user data\n");
1220                        err_code = -EPROTO;
1221                        goto cq_destroy;
1222                }
1223        }
1224
1225        i40iw_add_devusecount(iwdev);
1226        return 0;
1227
1228cq_destroy:
1229        i40iw_cq_wq_destroy(iwdev, cq);
1230cq_free_resources:
1231        cq_free_resources(iwdev, iwcq);
1232        return err_code;
1233}
1234
1235/**
1236 * i40iw_get_user_access - get hw access from IB access
1237 * @acc: IB access to return hw access
1238 */
1239static inline u16 i40iw_get_user_access(int acc)
1240{
1241        u16 access = 0;
1242
1243        access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
1244        access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
1245        access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
1246        access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
1247        return access;
1248}
1249
1250/**
1251 * i40iw_free_stag - free stag resource
1252 * @iwdev: iwarp device
1253 * @stag: stag to free
1254 */
1255static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
1256{
1257        u32 stag_idx;
1258
1259        stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1260        i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
1261        i40iw_rem_devusecount(iwdev);
1262}
1263
1264/**
1265 * i40iw_create_stag - create random stag
1266 * @iwdev: iwarp device
1267 */
1268static u32 i40iw_create_stag(struct i40iw_device *iwdev)
1269{
1270        u32 stag = 0;
1271        u32 stag_index = 0;
1272        u32 next_stag_index;
1273        u32 driver_key;
1274        u32 random;
1275        u8 consumer_key;
1276        int ret;
1277
1278        get_random_bytes(&random, sizeof(random));
1279        consumer_key = (u8)random;
1280
1281        driver_key = random & ~iwdev->mr_stagmask;
1282        next_stag_index = (random & iwdev->mr_stagmask) >> 8;
1283        next_stag_index %= iwdev->max_mr;
1284
1285        ret = i40iw_alloc_resource(iwdev,
1286                                   iwdev->allocated_mrs, iwdev->max_mr,
1287                                   &stag_index, &next_stag_index);
1288        if (!ret) {
1289                stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
1290                stag |= driver_key;
1291                stag += (u32)consumer_key;
1292                i40iw_add_devusecount(iwdev);
1293        }
1294        return stag;
1295}
1296
1297/**
1298 * i40iw_next_pbl_addr - Get next pbl address
1299 * @pbl: pointer to a pble
1300 * @pinfo: info pointer
1301 * @idx: index
1302 */
1303static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
1304                                       struct i40iw_pble_info **pinfo,
1305                                       u32 *idx)
1306{
1307        *idx += 1;
1308        if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
1309                return ++pbl;
1310        *idx = 0;
1311        (*pinfo)++;
1312        return (u64 *)(*pinfo)->addr;
1313}
1314
1315/**
1316 * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
1317 * @iwmr: iwmr for IB's user page addresses
1318 * @pbl: ple pointer to save 1 level or 0 level pble
1319 * @level: indicated level 0, 1 or 2
1320 */
1321static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
1322                                    u64 *pbl,
1323                                    enum i40iw_pble_level level)
1324{
1325        struct ib_umem *region = iwmr->region;
1326        struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1327        struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1328        struct i40iw_pble_info *pinfo;
1329        struct ib_block_iter biter;
1330        u32 idx = 0;
1331
1332        pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
1333
1334        if (iwmr->type == IW_MEMREG_TYPE_QP)
1335                iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
1336
1337        rdma_for_each_block(region->sg_head.sgl, &biter, region->nmap,
1338                            iwmr->page_size) {
1339                *pbl = rdma_block_iter_dma_address(&biter);
1340                pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
1341        }
1342}
1343
1344/**
1345 * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
1346 * @arr: lvl1 pbl array
1347 * @npages: page count
1348 * pg_size: page size
1349 *
1350 */
1351static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1352{
1353        u32 pg_idx;
1354
1355        for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1356                if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1357                        return false;
1358        }
1359        return true;
1360}
1361
1362/**
1363 * i40iw_check_mr_contiguous - check if MR is physically contiguous
1364 * @palloc: pbl allocation struct
1365 * pg_size: page size
1366 */
1367static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
1368{
1369        struct i40iw_pble_level2 *lvl2 = &palloc->level2;
1370        struct i40iw_pble_info *leaf = lvl2->leaf;
1371        u64 *arr = NULL;
1372        u64 *start_addr = NULL;
1373        int i;
1374        bool ret;
1375
1376        if (palloc->level == I40IW_LEVEL_1) {
1377                arr = (u64 *)palloc->level1.addr;
1378                ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
1379                return ret;
1380        }
1381
1382        start_addr = (u64 *)leaf->addr;
1383
1384        for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1385                arr = (u64 *)leaf->addr;
1386                if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1387                        return false;
1388                ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
1389                if (!ret)
1390                        return false;
1391        }
1392
1393        return true;
1394}
1395
1396/**
1397 * i40iw_setup_pbles - copy user pg address to pble's
1398 * @iwdev: iwarp device
1399 * @iwmr: mr pointer for this memory registration
1400 * @use_pbles: flag if to use pble's
1401 */
1402static int i40iw_setup_pbles(struct i40iw_device *iwdev,
1403                             struct i40iw_mr *iwmr,
1404                             bool use_pbles)
1405{
1406        struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1407        struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1408        struct i40iw_pble_info *pinfo;
1409        u64 *pbl;
1410        enum i40iw_status_code status;
1411        enum i40iw_pble_level level = I40IW_LEVEL_1;
1412
1413        if (use_pbles) {
1414                mutex_lock(&iwdev->pbl_mutex);
1415                status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1416                mutex_unlock(&iwdev->pbl_mutex);
1417                if (status)
1418                        return -ENOMEM;
1419
1420                iwpbl->pbl_allocated = true;
1421                level = palloc->level;
1422                pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
1423                pbl = (u64 *)pinfo->addr;
1424        } else {
1425                pbl = iwmr->pgaddrmem;
1426        }
1427
1428        i40iw_copy_user_pgaddrs(iwmr, pbl, level);
1429
1430        if (use_pbles)
1431                iwmr->pgaddrmem[0] = *pbl;
1432
1433        return 0;
1434}
1435
1436/**
1437 * i40iw_handle_q_mem - handle memory for qp and cq
1438 * @iwdev: iwarp device
1439 * @req: information for q memory management
1440 * @iwpbl: pble struct
1441 * @use_pbles: flag to use pble
1442 */
1443static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
1444                              struct i40iw_mem_reg_req *req,
1445                              struct i40iw_pbl *iwpbl,
1446                              bool use_pbles)
1447{
1448        struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1449        struct i40iw_mr *iwmr = iwpbl->iwmr;
1450        struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
1451        struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
1452        struct i40iw_hmc_pble *hmc_p;
1453        u64 *arr = iwmr->pgaddrmem;
1454        u32 pg_size;
1455        int err;
1456        int total;
1457        bool ret = true;
1458
1459        total = req->sq_pages + req->rq_pages + req->cq_pages;
1460        pg_size = iwmr->page_size;
1461
1462        err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1463        if (err)
1464                return err;
1465
1466        if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
1467                i40iw_free_pble(iwdev->pble_rsrc, palloc);
1468                iwpbl->pbl_allocated = false;
1469                return -ENOMEM;
1470        }
1471
1472        if (use_pbles)
1473                arr = (u64 *)palloc->level1.addr;
1474
1475        if (iwmr->type == IW_MEMREG_TYPE_QP) {
1476                hmc_p = &qpmr->sq_pbl;
1477                qpmr->shadow = (dma_addr_t)arr[total];
1478
1479                if (use_pbles) {
1480                        ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
1481                        if (ret)
1482                                ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
1483                }
1484
1485                if (!ret) {
1486                        hmc_p->idx = palloc->level1.idx;
1487                        hmc_p = &qpmr->rq_pbl;
1488                        hmc_p->idx = palloc->level1.idx + req->sq_pages;
1489                } else {
1490                        hmc_p->addr = arr[0];
1491                        hmc_p = &qpmr->rq_pbl;
1492                        hmc_p->addr = arr[req->sq_pages];
1493                }
1494        } else {                /* CQ */
1495                hmc_p = &cqmr->cq_pbl;
1496                cqmr->shadow = (dma_addr_t)arr[total];
1497
1498                if (use_pbles)
1499                        ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
1500
1501                if (!ret)
1502                        hmc_p->idx = palloc->level1.idx;
1503                else
1504                        hmc_p->addr = arr[0];
1505        }
1506
1507        if (use_pbles && ret) {
1508                i40iw_free_pble(iwdev->pble_rsrc, palloc);
1509                iwpbl->pbl_allocated = false;
1510        }
1511
1512        return err;
1513}
1514
1515/**
1516 * i40iw_hw_alloc_stag - cqp command to allocate stag
1517 * @iwdev: iwarp device
1518 * @iwmr: iwarp mr pointer
1519 */
1520static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
1521{
1522        struct i40iw_allocate_stag_info *info;
1523        struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1524        enum i40iw_status_code status;
1525        int err = 0;
1526        struct i40iw_cqp_request *cqp_request;
1527        struct cqp_commands_info *cqp_info;
1528
1529        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1530        if (!cqp_request)
1531                return -ENOMEM;
1532
1533        cqp_info = &cqp_request->info;
1534        info = &cqp_info->in.u.alloc_stag.info;
1535        memset(info, 0, sizeof(*info));
1536        info->page_size = PAGE_SIZE;
1537        info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1538        info->pd_id = iwpd->sc_pd.pd_id;
1539        info->total_len = iwmr->length;
1540        info->remote_access = true;
1541        cqp_info->cqp_cmd = OP_ALLOC_STAG;
1542        cqp_info->post_sq = 1;
1543        cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
1544        cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
1545
1546        status = i40iw_handle_cqp_op(iwdev, cqp_request);
1547        if (status) {
1548                err = -ENOMEM;
1549                i40iw_pr_err("CQP-OP MR Reg fail");
1550        }
1551        return err;
1552}
1553
1554/**
1555 * i40iw_alloc_mr - register stag for fast memory registration
1556 * @pd: ibpd pointer
1557 * @mr_type: memory for stag registrion
1558 * @max_num_sg: man number of pages
1559 * @udata: user data or NULL for kernel objects
1560 */
1561static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1562                                    u32 max_num_sg, struct ib_udata *udata)
1563{
1564        struct i40iw_pd *iwpd = to_iwpd(pd);
1565        struct i40iw_device *iwdev = to_iwdev(pd->device);
1566        struct i40iw_pble_alloc *palloc;
1567        struct i40iw_pbl *iwpbl;
1568        struct i40iw_mr *iwmr;
1569        enum i40iw_status_code status;
1570        u32 stag;
1571        int err_code = -ENOMEM;
1572
1573        iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1574        if (!iwmr)
1575                return ERR_PTR(-ENOMEM);
1576
1577        stag = i40iw_create_stag(iwdev);
1578        if (!stag) {
1579                err_code = -EOVERFLOW;
1580                goto err;
1581        }
1582        stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
1583        iwmr->stag = stag;
1584        iwmr->ibmr.rkey = stag;
1585        iwmr->ibmr.lkey = stag;
1586        iwmr->ibmr.pd = pd;
1587        iwmr->ibmr.device = pd->device;
1588        iwpbl = &iwmr->iwpbl;
1589        iwpbl->iwmr = iwmr;
1590        iwmr->type = IW_MEMREG_TYPE_MEM;
1591        palloc = &iwpbl->pble_alloc;
1592        iwmr->page_cnt = max_num_sg;
1593        mutex_lock(&iwdev->pbl_mutex);
1594        status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1595        mutex_unlock(&iwdev->pbl_mutex);
1596        if (status)
1597                goto err1;
1598
1599        if (palloc->level != I40IW_LEVEL_1)
1600                goto err2;
1601        err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
1602        if (err_code)
1603                goto err2;
1604        iwpbl->pbl_allocated = true;
1605        i40iw_add_pdusecount(iwpd);
1606        return &iwmr->ibmr;
1607err2:
1608        i40iw_free_pble(iwdev->pble_rsrc, palloc);
1609err1:
1610        i40iw_free_stag(iwdev, stag);
1611err:
1612        kfree(iwmr);
1613        return ERR_PTR(err_code);
1614}
1615
1616/**
1617 * i40iw_set_page - populate pbl list for fmr
1618 * @ibmr: ib mem to access iwarp mr pointer
1619 * @addr: page dma address fro pbl list
1620 */
1621static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
1622{
1623        struct i40iw_mr *iwmr = to_iwmr(ibmr);
1624        struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1625        struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1626        u64 *pbl;
1627
1628        if (unlikely(iwmr->npages == iwmr->page_cnt))
1629                return -ENOMEM;
1630
1631        pbl = (u64 *)palloc->level1.addr;
1632        pbl[iwmr->npages++] = cpu_to_le64(addr);
1633        return 0;
1634}
1635
1636/**
1637 * i40iw_map_mr_sg - map of sg list for fmr
1638 * @ibmr: ib mem to access iwarp mr pointer
1639 * @sg: scatter gather list for fmr
1640 * @sg_nents: number of sg pages
1641 */
1642static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1643                           int sg_nents, unsigned int *sg_offset)
1644{
1645        struct i40iw_mr *iwmr = to_iwmr(ibmr);
1646
1647        iwmr->npages = 0;
1648        return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
1649}
1650
1651/**
1652 * i40iw_drain_sq - drain the send queue
1653 * @ibqp: ib qp pointer
1654 */
1655static void i40iw_drain_sq(struct ib_qp *ibqp)
1656{
1657        struct i40iw_qp *iwqp = to_iwqp(ibqp);
1658        struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1659
1660        if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
1661                wait_for_completion(&iwqp->sq_drained);
1662}
1663
1664/**
1665 * i40iw_drain_rq - drain the receive queue
1666 * @ibqp: ib qp pointer
1667 */
1668static void i40iw_drain_rq(struct ib_qp *ibqp)
1669{
1670        struct i40iw_qp *iwqp = to_iwqp(ibqp);
1671        struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1672
1673        if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
1674                wait_for_completion(&iwqp->rq_drained);
1675}
1676
1677/**
1678 * i40iw_hwreg_mr - send cqp command for memory registration
1679 * @iwdev: iwarp device
1680 * @iwmr: iwarp mr pointer
1681 * @access: access for MR
1682 */
1683static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
1684                          struct i40iw_mr *iwmr,
1685                          u16 access)
1686{
1687        struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1688        struct i40iw_reg_ns_stag_info *stag_info;
1689        struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1690        struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1691        enum i40iw_status_code status;
1692        int err = 0;
1693        struct i40iw_cqp_request *cqp_request;
1694        struct cqp_commands_info *cqp_info;
1695
1696        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1697        if (!cqp_request)
1698                return -ENOMEM;
1699
1700        cqp_info = &cqp_request->info;
1701        stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
1702        memset(stag_info, 0, sizeof(*stag_info));
1703        stag_info->va = (void *)(unsigned long)iwpbl->user_base;
1704        stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1705        stag_info->stag_key = (u8)iwmr->stag;
1706        stag_info->total_len = iwmr->length;
1707        stag_info->access_rights = access;
1708        stag_info->pd_id = iwpd->sc_pd.pd_id;
1709        stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
1710        stag_info->page_size = iwmr->page_size;
1711
1712        if (iwpbl->pbl_allocated) {
1713                if (palloc->level == I40IW_LEVEL_1) {
1714                        stag_info->first_pm_pbl_index = palloc->level1.idx;
1715                        stag_info->chunk_size = 1;
1716                } else {
1717                        stag_info->first_pm_pbl_index = palloc->level2.root.idx;
1718                        stag_info->chunk_size = 3;
1719                }
1720        } else {
1721                stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
1722        }
1723
1724        cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
1725        cqp_info->post_sq = 1;
1726        cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
1727        cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
1728
1729        status = i40iw_handle_cqp_op(iwdev, cqp_request);
1730        if (status) {
1731                err = -ENOMEM;
1732                i40iw_pr_err("CQP-OP MR Reg fail");
1733        }
1734        return err;
1735}
1736
1737/**
1738 * i40iw_reg_user_mr - Register a user memory region
1739 * @pd: ptr of pd
1740 * @start: virtual start address
1741 * @length: length of mr
1742 * @virt: virtual address
1743 * @acc: access of mr
1744 * @udata: user data
1745 */
1746static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1747                                       u64 start,
1748                                       u64 length,
1749                                       u64 virt,
1750                                       int acc,
1751                                       struct ib_udata *udata)
1752{
1753        struct i40iw_pd *iwpd = to_iwpd(pd);
1754        struct i40iw_device *iwdev = to_iwdev(pd->device);
1755        struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
1756                udata, struct i40iw_ucontext, ibucontext);
1757        struct i40iw_pble_alloc *palloc;
1758        struct i40iw_pbl *iwpbl;
1759        struct i40iw_mr *iwmr;
1760        struct ib_umem *region;
1761        struct i40iw_mem_reg_req req;
1762        u64 pbl_depth = 0;
1763        u32 stag = 0;
1764        u16 access;
1765        u64 region_length;
1766        bool use_pbles = false;
1767        unsigned long flags;
1768        int err = -ENOSYS;
1769        int ret;
1770        int pg_shift;
1771
1772        if (iwdev->closing)
1773                return ERR_PTR(-ENODEV);
1774
1775        if (length > I40IW_MAX_MR_SIZE)
1776                return ERR_PTR(-EINVAL);
1777        region = ib_umem_get(udata, start, length, acc, 0);
1778        if (IS_ERR(region))
1779                return (struct ib_mr *)region;
1780
1781        if (ib_copy_from_udata(&req, udata, sizeof(req))) {
1782                ib_umem_release(region);
1783                return ERR_PTR(-EFAULT);
1784        }
1785
1786        iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1787        if (!iwmr) {
1788                ib_umem_release(region);
1789                return ERR_PTR(-ENOMEM);
1790        }
1791
1792        iwpbl = &iwmr->iwpbl;
1793        iwpbl->iwmr = iwmr;
1794        iwmr->region = region;
1795        iwmr->ibmr.pd = pd;
1796        iwmr->ibmr.device = pd->device;
1797
1798        iwmr->page_size = PAGE_SIZE;
1799        if (req.reg_type == IW_MEMREG_TYPE_MEM)
1800                iwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M,
1801                                                         virt);
1802
1803        region_length = region->length + (start & (iwmr->page_size - 1));
1804        pg_shift = ffs(iwmr->page_size) - 1;
1805        pbl_depth = region_length >> pg_shift;
1806        pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
1807        iwmr->length = region->length;
1808
1809        iwpbl->user_base = virt;
1810        palloc = &iwpbl->pble_alloc;
1811
1812        iwmr->type = req.reg_type;
1813        iwmr->page_cnt = (u32)pbl_depth;
1814
1815        switch (req.reg_type) {
1816        case IW_MEMREG_TYPE_QP:
1817                use_pbles = ((req.sq_pages + req.rq_pages) > 2);
1818                err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1819                if (err)
1820                        goto error;
1821                spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1822                list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1823                iwpbl->on_list = true;
1824                spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1825                break;
1826        case IW_MEMREG_TYPE_CQ:
1827                use_pbles = (req.cq_pages > 1);
1828                err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1829                if (err)
1830                        goto error;
1831
1832                spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1833                list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1834                iwpbl->on_list = true;
1835                spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1836                break;
1837        case IW_MEMREG_TYPE_MEM:
1838                use_pbles = (iwmr->page_cnt != 1);
1839                access = I40IW_ACCESS_FLAGS_LOCALREAD;
1840
1841                err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1842                if (err)
1843                        goto error;
1844
1845                if (use_pbles) {
1846                        ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
1847                        if (ret) {
1848                                i40iw_free_pble(iwdev->pble_rsrc, palloc);
1849                                iwpbl->pbl_allocated = false;
1850                        }
1851                }
1852
1853                access |= i40iw_get_user_access(acc);
1854                stag = i40iw_create_stag(iwdev);
1855                if (!stag) {
1856                        err = -ENOMEM;
1857                        goto error;
1858                }
1859
1860                iwmr->stag = stag;
1861                iwmr->ibmr.rkey = stag;
1862                iwmr->ibmr.lkey = stag;
1863
1864                err = i40iw_hwreg_mr(iwdev, iwmr, access);
1865                if (err) {
1866                        i40iw_free_stag(iwdev, stag);
1867                        goto error;
1868                }
1869
1870                break;
1871        default:
1872                goto error;
1873        }
1874
1875        iwmr->type = req.reg_type;
1876        if (req.reg_type == IW_MEMREG_TYPE_MEM)
1877                i40iw_add_pdusecount(iwpd);
1878        return &iwmr->ibmr;
1879
1880error:
1881        if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
1882                i40iw_free_pble(iwdev->pble_rsrc, palloc);
1883        ib_umem_release(region);
1884        kfree(iwmr);
1885        return ERR_PTR(err);
1886}
1887
1888/**
1889 * i40iw_reg_phys_mr - register kernel physical memory
1890 * @pd: ibpd pointer
1891 * @addr: physical address of memory to register
1892 * @size: size of memory to register
1893 * @acc: Access rights
1894 * @iova_start: start of virtual address for physical buffers
1895 */
1896struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
1897                                u64 addr,
1898                                u64 size,
1899                                int acc,
1900                                u64 *iova_start)
1901{
1902        struct i40iw_pd *iwpd = to_iwpd(pd);
1903        struct i40iw_device *iwdev = to_iwdev(pd->device);
1904        struct i40iw_pbl *iwpbl;
1905        struct i40iw_mr *iwmr;
1906        enum i40iw_status_code status;
1907        u32 stag;
1908        u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1909        int ret;
1910
1911        iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1912        if (!iwmr)
1913                return ERR_PTR(-ENOMEM);
1914        iwmr->ibmr.pd = pd;
1915        iwmr->ibmr.device = pd->device;
1916        iwpbl = &iwmr->iwpbl;
1917        iwpbl->iwmr = iwmr;
1918        iwmr->type = IW_MEMREG_TYPE_MEM;
1919        iwpbl->user_base = *iova_start;
1920        stag = i40iw_create_stag(iwdev);
1921        if (!stag) {
1922                ret = -EOVERFLOW;
1923                goto err;
1924        }
1925        access |= i40iw_get_user_access(acc);
1926        iwmr->stag = stag;
1927        iwmr->ibmr.rkey = stag;
1928        iwmr->ibmr.lkey = stag;
1929        iwmr->page_cnt = 1;
1930        iwmr->pgaddrmem[0]  = addr;
1931        iwmr->length = size;
1932        status = i40iw_hwreg_mr(iwdev, iwmr, access);
1933        if (status) {
1934                i40iw_free_stag(iwdev, stag);
1935                ret = -ENOMEM;
1936                goto err;
1937        }
1938
1939        i40iw_add_pdusecount(iwpd);
1940        return &iwmr->ibmr;
1941 err:
1942        kfree(iwmr);
1943        return ERR_PTR(ret);
1944}
1945
1946/**
1947 * i40iw_get_dma_mr - register physical mem
1948 * @pd: ptr of pd
1949 * @acc: access for memory
1950 */
1951static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
1952{
1953        u64 kva = 0;
1954
1955        return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
1956}
1957
1958/**
1959 * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
1960 * @iwmr: iwmr for IB's user page addresses
1961 * @ucontext: ptr to user context
1962 */
1963static void i40iw_del_memlist(struct i40iw_mr *iwmr,
1964                              struct i40iw_ucontext *ucontext)
1965{
1966        struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1967        unsigned long flags;
1968
1969        switch (iwmr->type) {
1970        case IW_MEMREG_TYPE_CQ:
1971                spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1972                if (iwpbl->on_list) {
1973                        iwpbl->on_list = false;
1974                        list_del(&iwpbl->list);
1975                }
1976                spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1977                break;
1978        case IW_MEMREG_TYPE_QP:
1979                spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1980                if (iwpbl->on_list) {
1981                        iwpbl->on_list = false;
1982                        list_del(&iwpbl->list);
1983                }
1984                spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1985                break;
1986        default:
1987                break;
1988        }
1989}
1990
1991/**
1992 * i40iw_dereg_mr - deregister mr
1993 * @ib_mr: mr ptr for dereg
1994 */
1995static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
1996{
1997        struct ib_pd *ibpd = ib_mr->pd;
1998        struct i40iw_pd *iwpd = to_iwpd(ibpd);
1999        struct i40iw_mr *iwmr = to_iwmr(ib_mr);
2000        struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
2001        enum i40iw_status_code status;
2002        struct i40iw_dealloc_stag_info *info;
2003        struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2004        struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
2005        struct i40iw_cqp_request *cqp_request;
2006        struct cqp_commands_info *cqp_info;
2007        u32 stag_idx;
2008
2009        ib_umem_release(iwmr->region);
2010
2011        if (iwmr->type != IW_MEMREG_TYPE_MEM) {
2012                /* region is released. only test for userness. */
2013                if (iwmr->region) {
2014                        struct i40iw_ucontext *ucontext =
2015                                rdma_udata_to_drv_context(
2016                                        udata,
2017                                        struct i40iw_ucontext,
2018                                        ibucontext);
2019
2020                        i40iw_del_memlist(iwmr, ucontext);
2021                }
2022                if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
2023                        i40iw_free_pble(iwdev->pble_rsrc, palloc);
2024                kfree(iwmr);
2025                return 0;
2026        }
2027
2028        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
2029        if (!cqp_request)
2030                return -ENOMEM;
2031
2032        cqp_info = &cqp_request->info;
2033        info = &cqp_info->in.u.dealloc_stag.info;
2034        memset(info, 0, sizeof(*info));
2035
2036        info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
2037        info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
2038        stag_idx = info->stag_idx;
2039        info->mr = true;
2040        if (iwpbl->pbl_allocated)
2041                info->dealloc_pbl = true;
2042
2043        cqp_info->cqp_cmd = OP_DEALLOC_STAG;
2044        cqp_info->post_sq = 1;
2045        cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
2046        cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2047        status = i40iw_handle_cqp_op(iwdev, cqp_request);
2048        if (status)
2049                i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
2050        i40iw_rem_pdusecount(iwpd, iwdev);
2051        i40iw_free_stag(iwdev, iwmr->stag);
2052        if (iwpbl->pbl_allocated)
2053                i40iw_free_pble(iwdev->pble_rsrc, palloc);
2054        kfree(iwmr);
2055        return 0;
2056}
2057
2058/**
2059 * hw_rev_show
2060 */
2061static ssize_t hw_rev_show(struct device *dev,
2062                           struct device_attribute *attr, char *buf)
2063{
2064        struct i40iw_ib_device *iwibdev =
2065                rdma_device_to_drv_device(dev, struct i40iw_ib_device, ibdev);
2066        u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
2067
2068        return sprintf(buf, "%x\n", hw_rev);
2069}
2070static DEVICE_ATTR_RO(hw_rev);
2071
2072/**
2073 * hca_type_show
2074 */
2075static ssize_t hca_type_show(struct device *dev,
2076                             struct device_attribute *attr, char *buf)
2077{
2078        return sprintf(buf, "I40IW\n");
2079}
2080static DEVICE_ATTR_RO(hca_type);
2081
2082/**
2083 * board_id_show
2084 */
2085static ssize_t board_id_show(struct device *dev,
2086                             struct device_attribute *attr, char *buf)
2087{
2088        return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
2089}
2090static DEVICE_ATTR_RO(board_id);
2091
2092static struct attribute *i40iw_dev_attributes[] = {
2093        &dev_attr_hw_rev.attr,
2094        &dev_attr_hca_type.attr,
2095        &dev_attr_board_id.attr,
2096        NULL
2097};
2098
2099static const struct attribute_group i40iw_attr_group = {
2100        .attrs = i40iw_dev_attributes,
2101};
2102
2103/**
2104 * i40iw_copy_sg_list - copy sg list for qp
2105 * @sg_list: copied into sg_list
2106 * @sgl: copy from sgl
2107 * @num_sges: count of sg entries
2108 */
2109static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
2110{
2111        unsigned int i;
2112
2113        for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
2114                sg_list[i].tag_off = sgl[i].addr;
2115                sg_list[i].len = sgl[i].length;
2116                sg_list[i].stag = sgl[i].lkey;
2117        }
2118}
2119
2120/**
2121 * i40iw_post_send -  kernel application wr
2122 * @ibqp: qp ptr for wr
2123 * @ib_wr: work request ptr
2124 * @bad_wr: return of bad wr if err
2125 */
2126static int i40iw_post_send(struct ib_qp *ibqp,
2127                           const struct ib_send_wr *ib_wr,
2128                           const struct ib_send_wr **bad_wr)
2129{
2130        struct i40iw_qp *iwqp;
2131        struct i40iw_qp_uk *ukqp;
2132        struct i40iw_post_sq_info info;
2133        enum i40iw_status_code ret;
2134        int err = 0;
2135        unsigned long flags;
2136        bool inv_stag;
2137
2138        iwqp = (struct i40iw_qp *)ibqp;
2139        ukqp = &iwqp->sc_qp.qp_uk;
2140
2141        spin_lock_irqsave(&iwqp->lock, flags);
2142
2143        if (iwqp->flush_issued) {
2144                err = -EINVAL;
2145                goto out;
2146        }
2147
2148        while (ib_wr) {
2149                inv_stag = false;
2150                memset(&info, 0, sizeof(info));
2151                info.wr_id = (u64)(ib_wr->wr_id);
2152                if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2153                        info.signaled = true;
2154                if (ib_wr->send_flags & IB_SEND_FENCE)
2155                        info.read_fence = true;
2156
2157                switch (ib_wr->opcode) {
2158                case IB_WR_SEND:
2159                        /* fall-through */
2160                case IB_WR_SEND_WITH_INV:
2161                        if (ib_wr->opcode == IB_WR_SEND) {
2162                                if (ib_wr->send_flags & IB_SEND_SOLICITED)
2163                                        info.op_type = I40IW_OP_TYPE_SEND_SOL;
2164                                else
2165                                        info.op_type = I40IW_OP_TYPE_SEND;
2166                        } else {
2167                                if (ib_wr->send_flags & IB_SEND_SOLICITED)
2168                                        info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
2169                                else
2170                                        info.op_type = I40IW_OP_TYPE_SEND_INV;
2171                        }
2172
2173                        if (ib_wr->send_flags & IB_SEND_INLINE) {
2174                                info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2175                                info.op.inline_send.len = ib_wr->sg_list[0].length;
2176                                ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2177                        } else {
2178                                info.op.send.num_sges = ib_wr->num_sge;
2179                                info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
2180                                ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2181                        }
2182
2183                        if (ret) {
2184                                if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2185                                        err = -ENOMEM;
2186                                else
2187                                        err = -EINVAL;
2188                        }
2189                        break;
2190                case IB_WR_RDMA_WRITE:
2191                        info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
2192
2193                        if (ib_wr->send_flags & IB_SEND_INLINE) {
2194                                info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2195                                info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
2196                                info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2197                                info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2198                                ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
2199                        } else {
2200                                info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2201                                info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2202                                info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2203                                info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2204                                ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
2205                        }
2206
2207                        if (ret) {
2208                                if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2209                                        err = -ENOMEM;
2210                                else
2211                                        err = -EINVAL;
2212                        }
2213                        break;
2214                case IB_WR_RDMA_READ_WITH_INV:
2215                        inv_stag = true;
2216                        /* fall-through*/
2217                case IB_WR_RDMA_READ:
2218                        if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
2219                                err = -EINVAL;
2220                                break;
2221                        }
2222                        info.op_type = I40IW_OP_TYPE_RDMA_READ;
2223                        info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2224                        info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2225                        info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
2226                        info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
2227                        info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
2228                        ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
2229                        if (ret) {
2230                                if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2231                                        err = -ENOMEM;
2232                                else
2233                                        err = -EINVAL;
2234                        }
2235                        break;
2236                case IB_WR_LOCAL_INV:
2237                        info.op_type = I40IW_OP_TYPE_INV_STAG;
2238                        info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2239                        ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
2240                        if (ret)
2241                                err = -ENOMEM;
2242                        break;
2243                case IB_WR_REG_MR:
2244                {
2245                        struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
2246                        int flags = reg_wr(ib_wr)->access;
2247                        struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2248                        struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2249                        struct i40iw_fast_reg_stag_info info;
2250
2251                        memset(&info, 0, sizeof(info));
2252                        info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
2253                        info.access_rights |= i40iw_get_user_access(flags);
2254                        info.stag_key = reg_wr(ib_wr)->key & 0xff;
2255                        info.stag_idx = reg_wr(ib_wr)->key >> 8;
2256                        info.page_size = reg_wr(ib_wr)->mr->page_size;
2257                        info.wr_id = ib_wr->wr_id;
2258
2259                        info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
2260                        info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2261                        info.total_len = iwmr->ibmr.length;
2262                        info.reg_addr_pa = *(u64 *)palloc->level1.addr;
2263                        info.first_pm_pbl_index = palloc->level1.idx;
2264                        info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2265                        info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
2266
2267                        if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
2268                                info.chunk_size = 1;
2269
2270                        ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
2271                        if (ret)
2272                                err = -ENOMEM;
2273                        break;
2274                }
2275                default:
2276                        err = -EINVAL;
2277                        i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
2278                                     ib_wr->opcode);
2279                        break;
2280                }
2281
2282                if (err)
2283                        break;
2284                ib_wr = ib_wr->next;
2285        }
2286
2287out:
2288        if (err)
2289                *bad_wr = ib_wr;
2290        else
2291                ukqp->ops.iw_qp_post_wr(ukqp);
2292        spin_unlock_irqrestore(&iwqp->lock, flags);
2293
2294        return err;
2295}
2296
2297/**
2298 * i40iw_post_recv - post receive wr for kernel application
2299 * @ibqp: ib qp pointer
2300 * @ib_wr: work request for receive
2301 * @bad_wr: bad wr caused an error
2302 */
2303static int i40iw_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr,
2304                           const struct ib_recv_wr **bad_wr)
2305{
2306        struct i40iw_qp *iwqp;
2307        struct i40iw_qp_uk *ukqp;
2308        struct i40iw_post_rq_info post_recv;
2309        struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
2310        enum i40iw_status_code ret = 0;
2311        unsigned long flags;
2312        int err = 0;
2313
2314        iwqp = (struct i40iw_qp *)ibqp;
2315        ukqp = &iwqp->sc_qp.qp_uk;
2316
2317        memset(&post_recv, 0, sizeof(post_recv));
2318        spin_lock_irqsave(&iwqp->lock, flags);
2319
2320        if (iwqp->flush_issued) {
2321                err = -EINVAL;
2322                goto out;
2323        }
2324
2325        while (ib_wr) {
2326                post_recv.num_sges = ib_wr->num_sge;
2327                post_recv.wr_id = ib_wr->wr_id;
2328                i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2329                post_recv.sg_list = sg_list;
2330                ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
2331                if (ret) {
2332                        i40iw_pr_err(" post_recv err %d\n", ret);
2333                        if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2334                                err = -ENOMEM;
2335                        else
2336                                err = -EINVAL;
2337                        *bad_wr = ib_wr;
2338                        goto out;
2339                }
2340                ib_wr = ib_wr->next;
2341        }
2342 out:
2343        spin_unlock_irqrestore(&iwqp->lock, flags);
2344        return err;
2345}
2346
2347/**
2348 * i40iw_poll_cq - poll cq for completion (kernel apps)
2349 * @ibcq: cq to poll
2350 * @num_entries: number of entries to poll
2351 * @entry: wr of entry completed
2352 */
2353static int i40iw_poll_cq(struct ib_cq *ibcq,
2354                         int num_entries,
2355                         struct ib_wc *entry)
2356{
2357        struct i40iw_cq *iwcq;
2358        int cqe_count = 0;
2359        struct i40iw_cq_poll_info cq_poll_info;
2360        enum i40iw_status_code ret;
2361        struct i40iw_cq_uk *ukcq;
2362        struct i40iw_sc_qp *qp;
2363        struct i40iw_qp *iwqp;
2364        unsigned long flags;
2365
2366        iwcq = (struct i40iw_cq *)ibcq;
2367        ukcq = &iwcq->sc_cq.cq_uk;
2368
2369        spin_lock_irqsave(&iwcq->lock, flags);
2370        while (cqe_count < num_entries) {
2371                ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
2372                if (ret == I40IW_ERR_QUEUE_EMPTY) {
2373                        break;
2374                } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
2375                        continue;
2376                } else if (ret) {
2377                        if (!cqe_count)
2378                                cqe_count = -1;
2379                        break;
2380                }
2381                entry->wc_flags = 0;
2382                entry->wr_id = cq_poll_info.wr_id;
2383                if (cq_poll_info.error) {
2384                        entry->status = IB_WC_WR_FLUSH_ERR;
2385                        entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
2386                } else {
2387                        entry->status = IB_WC_SUCCESS;
2388                }
2389
2390                switch (cq_poll_info.op_type) {
2391                case I40IW_OP_TYPE_RDMA_WRITE:
2392                        entry->opcode = IB_WC_RDMA_WRITE;
2393                        break;
2394                case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
2395                case I40IW_OP_TYPE_RDMA_READ:
2396                        entry->opcode = IB_WC_RDMA_READ;
2397                        break;
2398                case I40IW_OP_TYPE_SEND_SOL:
2399                case I40IW_OP_TYPE_SEND_SOL_INV:
2400                case I40IW_OP_TYPE_SEND_INV:
2401                case I40IW_OP_TYPE_SEND:
2402                        entry->opcode = IB_WC_SEND;
2403                        break;
2404                case I40IW_OP_TYPE_REC:
2405                        entry->opcode = IB_WC_RECV;
2406                        break;
2407                default:
2408                        entry->opcode = IB_WC_RECV;
2409                        break;
2410                }
2411
2412                entry->ex.imm_data = 0;
2413                qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
2414                entry->qp = (struct ib_qp *)qp->back_qp;
2415                entry->src_qp = cq_poll_info.qp_id;
2416                iwqp = (struct i40iw_qp *)qp->back_qp;
2417                if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
2418                        if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
2419                                complete(&iwqp->sq_drained);
2420                        if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
2421                                complete(&iwqp->rq_drained);
2422                }
2423                entry->byte_len = cq_poll_info.bytes_xfered;
2424                entry++;
2425                cqe_count++;
2426        }
2427        spin_unlock_irqrestore(&iwcq->lock, flags);
2428        return cqe_count;
2429}
2430
2431/**
2432 * i40iw_req_notify_cq - arm cq kernel application
2433 * @ibcq: cq to arm
2434 * @notify_flags: notofication flags
2435 */
2436static int i40iw_req_notify_cq(struct ib_cq *ibcq,
2437                               enum ib_cq_notify_flags notify_flags)
2438{
2439        struct i40iw_cq *iwcq;
2440        struct i40iw_cq_uk *ukcq;
2441        unsigned long flags;
2442        enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
2443
2444        iwcq = (struct i40iw_cq *)ibcq;
2445        ukcq = &iwcq->sc_cq.cq_uk;
2446        if (notify_flags == IB_CQ_SOLICITED)
2447                cq_notify = IW_CQ_COMPL_SOLICITED;
2448        spin_lock_irqsave(&iwcq->lock, flags);
2449        ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
2450        spin_unlock_irqrestore(&iwcq->lock, flags);
2451        return 0;
2452}
2453
2454/**
2455 * i40iw_port_immutable - return port's immutable data
2456 * @ibdev: ib dev struct
2457 * @port_num: port number
2458 * @immutable: immutable data for the port return
2459 */
2460static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2461                                struct ib_port_immutable *immutable)
2462{
2463        struct ib_port_attr attr;
2464        int err;
2465
2466        immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2467
2468        err = ib_query_port(ibdev, port_num, &attr);
2469
2470        if (err)
2471                return err;
2472
2473        immutable->pkey_tbl_len = attr.pkey_tbl_len;
2474        immutable->gid_tbl_len = attr.gid_tbl_len;
2475
2476        return 0;
2477}
2478
2479static const char * const i40iw_hw_stat_names[] = {
2480        // 32bit names
2481        [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2482        [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2483        [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2484        [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2485        [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2486        [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2487        [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
2488        [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
2489        [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
2490        // 64bit names
2491        [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2492                "ip4InOctets",
2493        [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2494                "ip4InPkts",
2495        [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2496                "ip4InReasmRqd",
2497        [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2498                "ip4InMcastPkts",
2499        [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2500                "ip4OutOctets",
2501        [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2502                "ip4OutPkts",
2503        [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2504                "ip4OutSegRqd",
2505        [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2506                "ip4OutMcastPkts",
2507        [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2508                "ip6InOctets",
2509        [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2510                "ip6InPkts",
2511        [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2512                "ip6InReasmRqd",
2513        [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2514                "ip6InMcastPkts",
2515        [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2516                "ip6OutOctets",
2517        [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2518                "ip6OutPkts",
2519        [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2520                "ip6OutSegRqd",
2521        [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2522                "ip6OutMcastPkts",
2523        [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
2524                "tcpInSegs",
2525        [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
2526                "tcpOutSegs",
2527        [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2528                "iwInRdmaReads",
2529        [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2530                "iwInRdmaSends",
2531        [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2532                "iwInRdmaWrites",
2533        [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2534                "iwOutRdmaReads",
2535        [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2536                "iwOutRdmaSends",
2537        [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2538                "iwOutRdmaWrites",
2539        [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
2540                "iwRdmaBnd",
2541        [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
2542                "iwRdmaInv"
2543};
2544
2545static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
2546{
2547        u32 firmware_version = I40IW_FW_VERSION;
2548
2549        snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", firmware_version,
2550                 (firmware_version & 0x000000ff));
2551}
2552
2553/**
2554 * i40iw_alloc_hw_stats - Allocate a hw stats structure
2555 * @ibdev: device pointer from stack
2556 * @port_num: port number
2557 */
2558static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
2559                                                  u8 port_num)
2560{
2561        struct i40iw_device *iwdev = to_iwdev(ibdev);
2562        struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2563        int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
2564                I40IW_HW_STAT_INDEX_MAX_64;
2565        unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2566
2567        BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
2568                     (I40IW_HW_STAT_INDEX_MAX_32 +
2569                      I40IW_HW_STAT_INDEX_MAX_64));
2570
2571        /*
2572         * PFs get the default update lifespan, but VFs only update once
2573         * per second
2574         */
2575        if (!dev->is_pf)
2576                lifespan = 1000;
2577        return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
2578                                          lifespan);
2579}
2580
2581/**
2582 * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
2583 * @ibdev: device pointer from stack
2584 * @stats: stats pointer from stack
2585 * @port_num: port number
2586 * @index: which hw counter the stack is requesting we update
2587 */
2588static int i40iw_get_hw_stats(struct ib_device *ibdev,
2589                              struct rdma_hw_stats *stats,
2590                              u8 port_num, int index)
2591{
2592        struct i40iw_device *iwdev = to_iwdev(ibdev);
2593        struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2594        struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
2595        struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
2596
2597        if (dev->is_pf) {
2598                i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
2599        } else {
2600                if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
2601                        return -ENOSYS;
2602        }
2603
2604        memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
2605
2606        return stats->num_counters;
2607}
2608
2609/**
2610 * i40iw_query_gid - Query port GID
2611 * @ibdev: device pointer from stack
2612 * @port: port number
2613 * @index: Entry index
2614 * @gid: Global ID
2615 */
2616static int i40iw_query_gid(struct ib_device *ibdev,
2617                           u8 port,
2618                           int index,
2619                           union ib_gid *gid)
2620{
2621        struct i40iw_device *iwdev = to_iwdev(ibdev);
2622
2623        memset(gid->raw, 0, sizeof(gid->raw));
2624        ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
2625        return 0;
2626}
2627
2628/**
2629 * i40iw_query_pkey - Query partition key
2630 * @ibdev: device pointer from stack
2631 * @port: port number
2632 * @index: index of pkey
2633 * @pkey: pointer to store the pkey
2634 */
2635static int i40iw_query_pkey(struct ib_device *ibdev,
2636                            u8 port,
2637                            u16 index,
2638                            u16 *pkey)
2639{
2640        *pkey = 0;
2641        return 0;
2642}
2643
2644static const struct ib_device_ops i40iw_dev_ops = {
2645        .owner = THIS_MODULE,
2646        .driver_id = RDMA_DRIVER_I40IW,
2647        /* NOTE: Older kernels wrongly use 0 for the uverbs_abi_ver */
2648        .uverbs_abi_ver = I40IW_ABI_VER,
2649
2650        .alloc_hw_stats = i40iw_alloc_hw_stats,
2651        .alloc_mr = i40iw_alloc_mr,
2652        .alloc_pd = i40iw_alloc_pd,
2653        .alloc_ucontext = i40iw_alloc_ucontext,
2654        .create_cq = i40iw_create_cq,
2655        .create_qp = i40iw_create_qp,
2656        .dealloc_pd = i40iw_dealloc_pd,
2657        .dealloc_ucontext = i40iw_dealloc_ucontext,
2658        .dereg_mr = i40iw_dereg_mr,
2659        .destroy_cq = i40iw_destroy_cq,
2660        .destroy_qp = i40iw_destroy_qp,
2661        .drain_rq = i40iw_drain_rq,
2662        .drain_sq = i40iw_drain_sq,
2663        .get_dev_fw_str = i40iw_get_dev_fw_str,
2664        .get_dma_mr = i40iw_get_dma_mr,
2665        .get_hw_stats = i40iw_get_hw_stats,
2666        .get_port_immutable = i40iw_port_immutable,
2667        .iw_accept = i40iw_accept,
2668        .iw_add_ref = i40iw_add_ref,
2669        .iw_connect = i40iw_connect,
2670        .iw_create_listen = i40iw_create_listen,
2671        .iw_destroy_listen = i40iw_destroy_listen,
2672        .iw_get_qp = i40iw_get_qp,
2673        .iw_reject = i40iw_reject,
2674        .iw_rem_ref = i40iw_rem_ref,
2675        .map_mr_sg = i40iw_map_mr_sg,
2676        .mmap = i40iw_mmap,
2677        .modify_qp = i40iw_modify_qp,
2678        .poll_cq = i40iw_poll_cq,
2679        .post_recv = i40iw_post_recv,
2680        .post_send = i40iw_post_send,
2681        .query_device = i40iw_query_device,
2682        .query_gid = i40iw_query_gid,
2683        .query_pkey = i40iw_query_pkey,
2684        .query_port = i40iw_query_port,
2685        .query_qp = i40iw_query_qp,
2686        .reg_user_mr = i40iw_reg_user_mr,
2687        .req_notify_cq = i40iw_req_notify_cq,
2688        INIT_RDMA_OBJ_SIZE(ib_pd, i40iw_pd, ibpd),
2689        INIT_RDMA_OBJ_SIZE(ib_cq, i40iw_cq, ibcq),
2690        INIT_RDMA_OBJ_SIZE(ib_ucontext, i40iw_ucontext, ibucontext),
2691};
2692
2693/**
2694 * i40iw_init_rdma_device - initialization of iwarp device
2695 * @iwdev: iwarp device
2696 */
2697static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
2698{
2699        struct i40iw_ib_device *iwibdev;
2700        struct net_device *netdev = iwdev->netdev;
2701        struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
2702
2703        iwibdev = ib_alloc_device(i40iw_ib_device, ibdev);
2704        if (!iwibdev) {
2705                i40iw_pr_err("iwdev == NULL\n");
2706                return NULL;
2707        }
2708        iwdev->iwibdev = iwibdev;
2709        iwibdev->iwdev = iwdev;
2710
2711        iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
2712        ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
2713
2714        iwibdev->ibdev.uverbs_cmd_mask =
2715            (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2716            (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2717            (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2718            (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2719            (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2720            (1ull << IB_USER_VERBS_CMD_REG_MR) |
2721            (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2722            (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2723            (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2724            (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2725            (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2726            (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2727            (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2728            (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2729            (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2730            (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2731            (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2732            (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2733            (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2734            (1ull << IB_USER_VERBS_CMD_POST_SEND);
2735        iwibdev->ibdev.phys_port_cnt = 1;
2736        iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
2737        iwibdev->ibdev.dev.parent = &pcidev->dev;
2738        memcpy(iwibdev->ibdev.iw_ifname, netdev->name,
2739               sizeof(iwibdev->ibdev.iw_ifname));
2740        ib_set_device_ops(&iwibdev->ibdev, &i40iw_dev_ops);
2741
2742        return iwibdev;
2743}
2744
2745/**
2746 * i40iw_port_ibevent - indicate port event
2747 * @iwdev: iwarp device
2748 */
2749void i40iw_port_ibevent(struct i40iw_device *iwdev)
2750{
2751        struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
2752        struct ib_event event;
2753
2754        event.device = &iwibdev->ibdev;
2755        event.element.port_num = 1;
2756        event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2757        ib_dispatch_event(&event);
2758}
2759
2760/**
2761 * i40iw_destroy_rdma_device - destroy rdma device and free resources
2762 * @iwibdev: IB device ptr
2763 */
2764void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
2765{
2766        ib_unregister_device(&iwibdev->ibdev);
2767        wait_event_timeout(iwibdev->iwdev->close_wq,
2768                           !atomic64_read(&iwibdev->iwdev->use_count),
2769                           I40IW_EVENT_TIMEOUT);
2770        ib_dealloc_device(&iwibdev->ibdev);
2771}
2772
2773/**
2774 * i40iw_register_rdma_device - register iwarp device to IB
2775 * @iwdev: iwarp device
2776 */
2777int i40iw_register_rdma_device(struct i40iw_device *iwdev)
2778{
2779        int ret;
2780        struct i40iw_ib_device *iwibdev;
2781
2782        iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
2783        if (!iwdev->iwibdev)
2784                return -ENOMEM;
2785        iwibdev = iwdev->iwibdev;
2786        rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
2787        ret = ib_register_device(&iwibdev->ibdev, "i40iw%d");
2788        if (ret)
2789                goto error;
2790
2791        return 0;
2792error:
2793        ib_dealloc_device(&iwdev->iwibdev->ibdev);
2794        return ret;
2795}
2796