linux/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
   3 *
   4 * This program is free software; you may redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; version 2 of the License.
   7 *
   8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
   9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  15 * SOFTWARE.
  16 *
  17 */
  18#include <linux/module.h>
  19#include <linux/init.h>
  20#include <linux/slab.h>
  21#include <linux/errno.h>
  22
  23#include <rdma/ib_user_verbs.h>
  24#include <rdma/ib_addr.h>
  25
  26#include "usnic_abi.h"
  27#include "usnic_ib.h"
  28#include "usnic_common_util.h"
  29#include "usnic_ib_qp_grp.h"
  30#include "usnic_fwd.h"
  31#include "usnic_log.h"
  32#include "usnic_uiom.h"
  33#include "usnic_transport.h"
  34
  35#define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
  36
  37static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
  38{
  39        *fw_ver = (u64) *fw_ver_str;
  40}
  41
  42static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
  43                                        struct ib_udata *udata)
  44{
  45        struct usnic_ib_dev *us_ibdev;
  46        struct usnic_ib_create_qp_resp resp;
  47        struct pci_dev *pdev;
  48        struct vnic_dev_bar *bar;
  49        struct usnic_vnic_res_chunk *chunk;
  50        struct usnic_ib_qp_grp_flow *default_flow;
  51        int i, err;
  52
  53        memset(&resp, 0, sizeof(resp));
  54
  55        us_ibdev = qp_grp->vf->pf;
  56        pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
  57        if (!pdev) {
  58                usnic_err("Failed to get pdev of qp_grp %d\n",
  59                                qp_grp->grp_id);
  60                return -EFAULT;
  61        }
  62
  63        bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
  64        if (!bar) {
  65                usnic_err("Failed to get bar0 of qp_grp %d vf %s",
  66                                qp_grp->grp_id, pci_name(pdev));
  67                return -EFAULT;
  68        }
  69
  70        resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
  71        resp.bar_bus_addr = bar->bus_addr;
  72        resp.bar_len = bar->len;
  73
  74        chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
  75        if (IS_ERR_OR_NULL(chunk)) {
  76                usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
  77                        usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
  78                        qp_grp->grp_id,
  79                        PTR_ERR(chunk));
  80                return chunk ? PTR_ERR(chunk) : -ENOMEM;
  81        }
  82
  83        WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
  84        resp.rq_cnt = chunk->cnt;
  85        for (i = 0; i < chunk->cnt; i++)
  86                resp.rq_idx[i] = chunk->res[i]->vnic_idx;
  87
  88        chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
  89        if (IS_ERR_OR_NULL(chunk)) {
  90                usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
  91                        usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
  92                        qp_grp->grp_id,
  93                        PTR_ERR(chunk));
  94                return chunk ? PTR_ERR(chunk) : -ENOMEM;
  95        }
  96
  97        WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
  98        resp.wq_cnt = chunk->cnt;
  99        for (i = 0; i < chunk->cnt; i++)
 100                resp.wq_idx[i] = chunk->res[i]->vnic_idx;
 101
 102        chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
 103        if (IS_ERR_OR_NULL(chunk)) {
 104                usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
 105                        usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
 106                        qp_grp->grp_id,
 107                        PTR_ERR(chunk));
 108                return chunk ? PTR_ERR(chunk) : -ENOMEM;
 109        }
 110
 111        WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
 112        resp.cq_cnt = chunk->cnt;
 113        for (i = 0; i < chunk->cnt; i++)
 114                resp.cq_idx[i] = chunk->res[i]->vnic_idx;
 115
 116        default_flow = list_first_entry(&qp_grp->flows_lst,
 117                                        struct usnic_ib_qp_grp_flow, link);
 118        resp.transport = default_flow->trans_type;
 119
 120        err = ib_copy_to_udata(udata, &resp, sizeof(resp));
 121        if (err) {
 122                usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
 123                return err;
 124        }
 125
 126        return 0;
 127}
 128
 129static struct usnic_ib_qp_grp*
 130find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
 131                                struct usnic_ib_pd *pd,
 132                                struct usnic_transport_spec *trans_spec,
 133                                struct usnic_vnic_res_spec *res_spec)
 134{
 135        struct usnic_ib_vf *vf;
 136        struct usnic_vnic *vnic;
 137        struct usnic_ib_qp_grp *qp_grp;
 138        struct device *dev, **dev_list;
 139        int i, found = 0;
 140
 141        BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
 142
 143        if (list_empty(&us_ibdev->vf_dev_list)) {
 144                usnic_info("No vfs to allocate\n");
 145                return NULL;
 146        }
 147
 148        if (usnic_ib_share_vf) {
 149                /* Try to find resouces on a used vf which is in pd */
 150                dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
 151                for (i = 0; dev_list[i]; i++) {
 152                        dev = dev_list[i];
 153                        vf = pci_get_drvdata(to_pci_dev(dev));
 154                        spin_lock(&vf->lock);
 155                        vnic = vf->vnic;
 156                        if (!usnic_vnic_check_room(vnic, res_spec)) {
 157                                usnic_dbg("Found used vnic %s from %s\n",
 158                                                us_ibdev->ib_dev.name,
 159                                                pci_name(usnic_vnic_get_pdev(
 160                                                                        vnic)));
 161                                found = 1;
 162                                break;
 163                        }
 164                        spin_unlock(&vf->lock);
 165
 166                }
 167                usnic_uiom_free_dev_list(dev_list);
 168        }
 169
 170        if (!found) {
 171                /* Try to find resources on an unused vf */
 172                list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
 173                        spin_lock(&vf->lock);
 174                        vnic = vf->vnic;
 175                        if (vf->qp_grp_ref_cnt == 0 &&
 176                                usnic_vnic_check_room(vnic, res_spec) == 0) {
 177                                found = 1;
 178                                break;
 179                        }
 180                        spin_unlock(&vf->lock);
 181                }
 182        }
 183
 184        if (!found) {
 185                usnic_info("No free qp grp found on %s\n",
 186                                us_ibdev->ib_dev.name);
 187                return ERR_PTR(-ENOMEM);
 188        }
 189
 190        qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
 191                                                trans_spec);
 192        spin_unlock(&vf->lock);
 193        if (IS_ERR_OR_NULL(qp_grp)) {
 194                usnic_err("Failed to allocate qp_grp\n");
 195                return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
 196        }
 197
 198        return qp_grp;
 199}
 200
 201static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
 202{
 203        struct usnic_ib_vf *vf = qp_grp->vf;
 204
 205        WARN_ON(qp_grp->state != IB_QPS_RESET);
 206
 207        spin_lock(&vf->lock);
 208        usnic_ib_qp_grp_destroy(qp_grp);
 209        spin_unlock(&vf->lock);
 210}
 211
 212static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
 213                                        u8 *active_width)
 214{
 215        if (speed <= 10000) {
 216                *active_width = IB_WIDTH_1X;
 217                *active_speed = IB_SPEED_FDR10;
 218        } else if (speed <= 20000) {
 219                *active_width = IB_WIDTH_4X;
 220                *active_speed = IB_SPEED_DDR;
 221        } else if (speed <= 30000) {
 222                *active_width = IB_WIDTH_4X;
 223                *active_speed = IB_SPEED_QDR;
 224        } else if (speed <= 40000) {
 225                *active_width = IB_WIDTH_4X;
 226                *active_speed = IB_SPEED_FDR10;
 227        } else {
 228                *active_width = IB_WIDTH_4X;
 229                *active_speed = IB_SPEED_EDR;
 230        }
 231}
 232
 233static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
 234{
 235        if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
 236                        cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
 237                return -EINVAL;
 238
 239        return 0;
 240}
 241
 242/* Start of ib callback functions */
 243
 244enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
 245                                                u8 port_num)
 246{
 247        return IB_LINK_LAYER_ETHERNET;
 248}
 249
 250int usnic_ib_query_device(struct ib_device *ibdev,
 251                                struct ib_device_attr *props)
 252{
 253        struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
 254        union ib_gid gid;
 255        struct ethtool_drvinfo info;
 256        struct ethtool_cmd cmd;
 257        int qp_per_vf;
 258
 259        usnic_dbg("\n");
 260        mutex_lock(&us_ibdev->usdev_lock);
 261        us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
 262        us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
 263        memset(props, 0, sizeof(*props));
 264        usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
 265                        &gid.raw[0]);
 266        memcpy(&props->sys_image_guid, &gid.global.interface_id,
 267                sizeof(gid.global.interface_id));
 268        usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
 269        props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
 270        props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
 271        props->vendor_id = PCI_VENDOR_ID_CISCO;
 272        props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
 273        props->hw_ver = us_ibdev->pdev->subsystem_device;
 274        qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
 275                        us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
 276        props->max_qp = qp_per_vf *
 277                atomic_read(&us_ibdev->vf_cnt.refcount);
 278        props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
 279                IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
 280        props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
 281                atomic_read(&us_ibdev->vf_cnt.refcount);
 282        props->max_pd = USNIC_UIOM_MAX_PD_CNT;
 283        props->max_mr = USNIC_UIOM_MAX_MR_CNT;
 284        props->local_ca_ack_delay = 0;
 285        props->max_pkeys = 0;
 286        props->atomic_cap = IB_ATOMIC_NONE;
 287        props->masked_atomic_cap = props->atomic_cap;
 288        props->max_qp_rd_atom = 0;
 289        props->max_qp_init_rd_atom = 0;
 290        props->max_res_rd_atom = 0;
 291        props->max_srq = 0;
 292        props->max_srq_wr = 0;
 293        props->max_srq_sge = 0;
 294        props->max_fast_reg_page_list_len = 0;
 295        props->max_mcast_grp = 0;
 296        props->max_mcast_qp_attach = 0;
 297        props->max_total_mcast_qp_attach = 0;
 298        props->max_map_per_fmr = 0;
 299        /* Owned by Userspace
 300         * max_qp_wr, max_sge, max_sge_rd, max_cqe */
 301        mutex_unlock(&us_ibdev->usdev_lock);
 302
 303        return 0;
 304}
 305
 306int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
 307                                struct ib_port_attr *props)
 308{
 309        struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
 310        struct ethtool_cmd cmd;
 311
 312        usnic_dbg("\n");
 313
 314        mutex_lock(&us_ibdev->usdev_lock);
 315        us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
 316        memset(props, 0, sizeof(*props));
 317
 318        props->lid = 0;
 319        props->lmc = 1;
 320        props->sm_lid = 0;
 321        props->sm_sl = 0;
 322
 323        if (!us_ibdev->ufdev->link_up) {
 324                props->state = IB_PORT_DOWN;
 325                props->phys_state = 3;
 326        } else if (!us_ibdev->ufdev->inaddr) {
 327                props->state = IB_PORT_INIT;
 328                props->phys_state = 4;
 329        } else {
 330                props->state = IB_PORT_ACTIVE;
 331                props->phys_state = 5;
 332        }
 333
 334        props->port_cap_flags = 0;
 335        props->gid_tbl_len = 1;
 336        props->pkey_tbl_len = 1;
 337        props->bad_pkey_cntr = 0;
 338        props->qkey_viol_cntr = 0;
 339        eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
 340                                &props->active_width);
 341        props->max_mtu = IB_MTU_4096;
 342        props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
 343        /* Userspace will adjust for hdrs */
 344        props->max_msg_sz = us_ibdev->ufdev->mtu;
 345        props->max_vl_num = 1;
 346        mutex_unlock(&us_ibdev->usdev_lock);
 347
 348        return 0;
 349}
 350
 351int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
 352                                int qp_attr_mask,
 353                                struct ib_qp_init_attr *qp_init_attr)
 354{
 355        struct usnic_ib_qp_grp *qp_grp;
 356        struct usnic_ib_vf *vf;
 357        int err;
 358
 359        usnic_dbg("\n");
 360
 361        memset(qp_attr, 0, sizeof(*qp_attr));
 362        memset(qp_init_attr, 0, sizeof(*qp_init_attr));
 363
 364        qp_grp = to_uqp_grp(qp);
 365        vf = qp_grp->vf;
 366        mutex_lock(&vf->pf->usdev_lock);
 367        usnic_dbg("\n");
 368        qp_attr->qp_state = qp_grp->state;
 369        qp_attr->cur_qp_state = qp_grp->state;
 370
 371        switch (qp_grp->ibqp.qp_type) {
 372        case IB_QPT_UD:
 373                qp_attr->qkey = 0;
 374                break;
 375        default:
 376                usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
 377                err = -EINVAL;
 378                goto err_out;
 379        }
 380
 381        mutex_unlock(&vf->pf->usdev_lock);
 382        return 0;
 383
 384err_out:
 385        mutex_unlock(&vf->pf->usdev_lock);
 386        return err;
 387}
 388
 389int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
 390                                union ib_gid *gid)
 391{
 392
 393        struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
 394        usnic_dbg("\n");
 395
 396        if (index > 1)
 397                return -EINVAL;
 398
 399        mutex_lock(&us_ibdev->usdev_lock);
 400        memset(&(gid->raw[0]), 0, sizeof(gid->raw));
 401        usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
 402                        &gid->raw[0]);
 403        mutex_unlock(&us_ibdev->usdev_lock);
 404
 405        return 0;
 406}
 407
 408int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
 409                                u16 *pkey)
 410{
 411        if (index > 1)
 412                return -EINVAL;
 413
 414        *pkey = 0xffff;
 415        return 0;
 416}
 417
 418struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
 419                                        struct ib_ucontext *context,
 420                                        struct ib_udata *udata)
 421{
 422        struct usnic_ib_pd *pd;
 423        void *umem_pd;
 424
 425        usnic_dbg("\n");
 426
 427        pd = kzalloc(sizeof(*pd), GFP_KERNEL);
 428        if (!pd)
 429                return ERR_PTR(-ENOMEM);
 430
 431        umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
 432        if (IS_ERR_OR_NULL(umem_pd)) {
 433                kfree(pd);
 434                return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
 435        }
 436
 437        usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
 438                        pd, context, ibdev->name);
 439        return &pd->ibpd;
 440}
 441
 442int usnic_ib_dealloc_pd(struct ib_pd *pd)
 443{
 444        usnic_info("freeing domain 0x%p\n", pd);
 445
 446        usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
 447        kfree(pd);
 448        return 0;
 449}
 450
 451struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
 452                                        struct ib_qp_init_attr *init_attr,
 453                                        struct ib_udata *udata)
 454{
 455        int err;
 456        struct usnic_ib_dev *us_ibdev;
 457        struct usnic_ib_qp_grp *qp_grp;
 458        struct usnic_ib_ucontext *ucontext;
 459        int cq_cnt;
 460        struct usnic_vnic_res_spec res_spec;
 461        struct usnic_ib_create_qp_cmd cmd;
 462        struct usnic_transport_spec trans_spec;
 463
 464        usnic_dbg("\n");
 465
 466        ucontext = to_uucontext(pd->uobject->context);
 467        us_ibdev = to_usdev(pd->device);
 468
 469        err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
 470        if (err) {
 471                usnic_err("%s: cannot copy udata for create_qp\n",
 472                                us_ibdev->ib_dev.name);
 473                return ERR_PTR(-EINVAL);
 474        }
 475
 476        err = create_qp_validate_user_data(cmd);
 477        if (err) {
 478                usnic_err("%s: Failed to validate user data\n",
 479                                us_ibdev->ib_dev.name);
 480                return ERR_PTR(-EINVAL);
 481        }
 482
 483        if (init_attr->qp_type != IB_QPT_UD) {
 484                usnic_err("%s asked to make a non-UD QP: %d\n",
 485                                us_ibdev->ib_dev.name, init_attr->qp_type);
 486                return ERR_PTR(-EINVAL);
 487        }
 488
 489        trans_spec = cmd.spec;
 490        mutex_lock(&us_ibdev->usdev_lock);
 491        cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
 492        res_spec = min_transport_spec[trans_spec.trans_type];
 493        usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
 494        qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
 495                                                &trans_spec,
 496                                                &res_spec);
 497        if (IS_ERR_OR_NULL(qp_grp)) {
 498                err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
 499                goto out_release_mutex;
 500        }
 501
 502        err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
 503        if (err) {
 504                err = -EBUSY;
 505                goto out_release_qp_grp;
 506        }
 507
 508        qp_grp->ctx = ucontext;
 509        list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
 510        usnic_ib_log_vf(qp_grp->vf);
 511        mutex_unlock(&us_ibdev->usdev_lock);
 512        return &qp_grp->ibqp;
 513
 514out_release_qp_grp:
 515        qp_grp_destroy(qp_grp);
 516out_release_mutex:
 517        mutex_unlock(&us_ibdev->usdev_lock);
 518        return ERR_PTR(err);
 519}
 520
 521int usnic_ib_destroy_qp(struct ib_qp *qp)
 522{
 523        struct usnic_ib_qp_grp *qp_grp;
 524        struct usnic_ib_vf *vf;
 525
 526        usnic_dbg("\n");
 527
 528        qp_grp = to_uqp_grp(qp);
 529        vf = qp_grp->vf;
 530        mutex_lock(&vf->pf->usdev_lock);
 531        if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
 532                usnic_err("Failed to move qp grp %u to reset\n",
 533                                qp_grp->grp_id);
 534        }
 535
 536        list_del(&qp_grp->link);
 537        qp_grp_destroy(qp_grp);
 538        mutex_unlock(&vf->pf->usdev_lock);
 539
 540        return 0;
 541}
 542
 543int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 544                                int attr_mask, struct ib_udata *udata)
 545{
 546        struct usnic_ib_qp_grp *qp_grp;
 547        int status;
 548        usnic_dbg("\n");
 549
 550        qp_grp = to_uqp_grp(ibqp);
 551
 552        /* TODO: Future Support All States */
 553        mutex_lock(&qp_grp->vf->pf->usdev_lock);
 554        if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
 555                status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, NULL);
 556        } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
 557                status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL);
 558        } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {
 559                status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTS, NULL);
 560        } else {
 561                usnic_err("Unexpected combination mask: %u state: %u\n",
 562                                attr_mask & IB_QP_STATE, attr->qp_state);
 563                status = -EINVAL;
 564        }
 565
 566        mutex_unlock(&qp_grp->vf->pf->usdev_lock);
 567        return status;
 568}
 569
 570struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries,
 571                                        int vector, struct ib_ucontext *context,
 572                                        struct ib_udata *udata)
 573{
 574        struct ib_cq *cq;
 575
 576        usnic_dbg("\n");
 577        cq = kzalloc(sizeof(*cq), GFP_KERNEL);
 578        if (!cq)
 579                return ERR_PTR(-EBUSY);
 580
 581        return cq;
 582}
 583
 584int usnic_ib_destroy_cq(struct ib_cq *cq)
 585{
 586        usnic_dbg("\n");
 587        kfree(cq);
 588        return 0;
 589}
 590
 591struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
 592                                        u64 virt_addr, int access_flags,
 593                                        struct ib_udata *udata)
 594{
 595        struct usnic_ib_mr *mr;
 596        int err;
 597
 598        usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
 599                        virt_addr, length);
 600
 601        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
 602        if (IS_ERR_OR_NULL(mr))
 603                return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM);
 604
 605        mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
 606                                        access_flags, 0);
 607        if (IS_ERR_OR_NULL(mr->umem)) {
 608                err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
 609                goto err_free;
 610        }
 611
 612        mr->ibmr.lkey = mr->ibmr.rkey = 0;
 613        return &mr->ibmr;
 614
 615err_free:
 616        kfree(mr);
 617        return ERR_PTR(err);
 618}
 619
 620int usnic_ib_dereg_mr(struct ib_mr *ibmr)
 621{
 622        struct usnic_ib_mr *mr = to_umr(ibmr);
 623
 624        usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
 625
 626        usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
 627        kfree(mr);
 628        return 0;
 629}
 630
 631struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
 632                                                        struct ib_udata *udata)
 633{
 634        struct usnic_ib_ucontext *context;
 635        struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
 636        usnic_dbg("\n");
 637
 638        context = kmalloc(sizeof(*context), GFP_KERNEL);
 639        if (!context)
 640                return ERR_PTR(-ENOMEM);
 641
 642        INIT_LIST_HEAD(&context->qp_grp_list);
 643        mutex_lock(&us_ibdev->usdev_lock);
 644        list_add_tail(&context->link, &us_ibdev->ctx_list);
 645        mutex_unlock(&us_ibdev->usdev_lock);
 646
 647        return &context->ibucontext;
 648}
 649
 650int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
 651{
 652        struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
 653        struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
 654        usnic_dbg("\n");
 655
 656        mutex_lock(&us_ibdev->usdev_lock);
 657        BUG_ON(!list_empty(&context->qp_grp_list));
 658        list_del(&context->link);
 659        mutex_unlock(&us_ibdev->usdev_lock);
 660        kfree(context);
 661        return 0;
 662}
 663
 664int usnic_ib_mmap(struct ib_ucontext *context,
 665                                struct vm_area_struct *vma)
 666{
 667        struct usnic_ib_ucontext *uctx = to_ucontext(context);
 668        struct usnic_ib_dev *us_ibdev;
 669        struct usnic_ib_qp_grp *qp_grp;
 670        struct usnic_ib_vf *vf;
 671        struct vnic_dev_bar *bar;
 672        dma_addr_t bus_addr;
 673        unsigned int len;
 674        unsigned int vfid;
 675
 676        usnic_dbg("\n");
 677
 678        us_ibdev = to_usdev(context->device);
 679        vma->vm_flags |= VM_IO;
 680        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 681        vfid = vma->vm_pgoff;
 682        usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
 683                        vma->vm_pgoff, PAGE_SHIFT, vfid);
 684
 685        mutex_lock(&us_ibdev->usdev_lock);
 686        list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
 687                vf = qp_grp->vf;
 688                if (usnic_vnic_get_index(vf->vnic) == vfid) {
 689                        bar = usnic_vnic_get_bar(vf->vnic, 0);
 690                        if ((vma->vm_end - vma->vm_start) != bar->len) {
 691                                usnic_err("Bar0 Len %lu - Request map %lu\n",
 692                                                bar->len,
 693                                                vma->vm_end - vma->vm_start);
 694                                mutex_unlock(&us_ibdev->usdev_lock);
 695                                return -EINVAL;
 696                        }
 697                        bus_addr = bar->bus_addr;
 698                        len = bar->len;
 699                        usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
 700                                        &bus_addr, bar->vaddr, bar->len);
 701                        mutex_unlock(&us_ibdev->usdev_lock);
 702
 703                        return remap_pfn_range(vma,
 704                                                vma->vm_start,
 705                                                bus_addr >> PAGE_SHIFT,
 706                                                len, vma->vm_page_prot);
 707                }
 708        }
 709
 710        mutex_unlock(&us_ibdev->usdev_lock);
 711        usnic_err("No VF %u found\n", vfid);
 712        return -EINVAL;
 713}
 714
 715/* In ib callbacks section -  Start of stub funcs */
 716struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
 717                                        struct ib_ah_attr *ah_attr)
 718{
 719        usnic_dbg("\n");
 720        return ERR_PTR(-EPERM);
 721}
 722
 723int usnic_ib_destroy_ah(struct ib_ah *ah)
 724{
 725        usnic_dbg("\n");
 726        return -EINVAL;
 727}
 728
 729int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 730                                struct ib_send_wr **bad_wr)
 731{
 732        usnic_dbg("\n");
 733        return -EINVAL;
 734}
 735
 736int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
 737                                struct ib_recv_wr **bad_wr)
 738{
 739        usnic_dbg("\n");
 740        return -EINVAL;
 741}
 742
 743int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
 744                                struct ib_wc *wc)
 745{
 746        usnic_dbg("\n");
 747        return -EINVAL;
 748}
 749
 750int usnic_ib_req_notify_cq(struct ib_cq *cq,
 751                                        enum ib_cq_notify_flags flags)
 752{
 753        usnic_dbg("\n");
 754        return -EINVAL;
 755}
 756
 757struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
 758{
 759        usnic_dbg("\n");
 760        return ERR_PTR(-ENOMEM);
 761}
 762
 763
 764/* In ib callbacks section - End of stub funcs */
 765/* End of ib callbacks section */
 766