linux/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33#include <linux/bug.h>
  34#include <linux/errno.h>
  35#include <linux/module.h>
  36#include <linux/spinlock.h>
  37
  38#include "usnic_log.h"
  39#include "usnic_vnic.h"
  40#include "usnic_fwd.h"
  41#include "usnic_uiom.h"
  42#include "usnic_debugfs.h"
  43#include "usnic_ib_qp_grp.h"
  44#include "usnic_ib_sysfs.h"
  45#include "usnic_transport.h"
  46
  47#define DFLT_RQ_IDX     0
  48
  49const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
  50{
  51        switch (state) {
  52        case IB_QPS_RESET:
  53                return "Rst";
  54        case IB_QPS_INIT:
  55                return "Init";
  56        case IB_QPS_RTR:
  57                return "RTR";
  58        case IB_QPS_RTS:
  59                return "RTS";
  60        case IB_QPS_SQD:
  61                return "SQD";
  62        case IB_QPS_SQE:
  63                return "SQE";
  64        case IB_QPS_ERR:
  65                return "ERR";
  66        default:
  67                return "UNKNOWN STATE";
  68
  69        }
  70}
  71
  72int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
  73{
  74        return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
  75}
  76
  77int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
  78{
  79        struct usnic_ib_qp_grp *qp_grp = obj;
  80        struct usnic_ib_qp_grp_flow *default_flow;
  81        if (obj) {
  82                default_flow = list_first_entry(&qp_grp->flows_lst,
  83                                        struct usnic_ib_qp_grp_flow, link);
  84                return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
  85                                        qp_grp->ibqp.qp_num,
  86                                        usnic_ib_qp_grp_state_to_string(
  87                                                        qp_grp->state),
  88                                        qp_grp->owner_pid,
  89                                        usnic_vnic_get_index(qp_grp->vf->vnic),
  90                                        default_flow->flow->flow_id);
  91        } else {
  92                return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
  93        }
  94}
  95
  96static struct usnic_vnic_res_chunk *
  97get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
  98{
  99        lockdep_assert_held(&qp_grp->lock);
 100        /*
 101         * The QP res chunk, used to derive qp indices,
 102         * are just indices of the RQs
 103         */
 104        return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
 105}
 106
 107static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
 108{
 109
 110        int status;
 111        int i, vnic_idx;
 112        struct usnic_vnic_res_chunk *res_chunk;
 113        struct usnic_vnic_res *res;
 114
 115        lockdep_assert_held(&qp_grp->lock);
 116
 117        vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
 118
 119        res_chunk = get_qp_res_chunk(qp_grp);
 120        if (IS_ERR(res_chunk)) {
 121                usnic_err("Unable to get qp res with err %ld\n",
 122                                PTR_ERR(res_chunk));
 123                return PTR_ERR(res_chunk);
 124        }
 125
 126        for (i = 0; i < res_chunk->cnt; i++) {
 127                res = res_chunk->res[i];
 128                status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
 129                                                res->vnic_idx);
 130                if (status) {
 131                        usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
 132                                        res->vnic_idx, qp_grp->ufdev->name,
 133                                        vnic_idx, status);
 134                        goto out_err;
 135                }
 136        }
 137
 138        return 0;
 139
 140out_err:
 141        for (i--; i >= 0; i--) {
 142                res = res_chunk->res[i];
 143                usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
 144                                        res->vnic_idx);
 145        }
 146
 147        return status;
 148}
 149
 150static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
 151{
 152        int i, vnic_idx;
 153        struct usnic_vnic_res_chunk *res_chunk;
 154        struct usnic_vnic_res *res;
 155        int status = 0;
 156
 157        lockdep_assert_held(&qp_grp->lock);
 158        vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
 159
 160        res_chunk = get_qp_res_chunk(qp_grp);
 161        if (IS_ERR(res_chunk)) {
 162                usnic_err("Unable to get qp res with err %ld\n",
 163                        PTR_ERR(res_chunk));
 164                return PTR_ERR(res_chunk);
 165        }
 166
 167        for (i = 0; i < res_chunk->cnt; i++) {
 168                res = res_chunk->res[i];
 169                status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
 170                                                res->vnic_idx);
 171                if (status) {
 172                        usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
 173                                        res->vnic_idx,
 174                                        qp_grp->ufdev->name,
 175                                        vnic_idx, status);
 176                }
 177        }
 178
 179        return status;
 180
 181}
 182
 183static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
 184                                struct usnic_filter_action *uaction)
 185{
 186        struct usnic_vnic_res_chunk *res_chunk;
 187
 188        res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
 189        if (IS_ERR(res_chunk)) {
 190                usnic_err("Unable to get %s with err %ld\n",
 191                        usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
 192                        PTR_ERR(res_chunk));
 193                return PTR_ERR(res_chunk);
 194        }
 195
 196        uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
 197        uaction->action.type = FILTER_ACTION_RQ_STEERING;
 198        uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
 199
 200        return 0;
 201}
 202
 203static struct usnic_ib_qp_grp_flow*
 204create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
 205                        struct usnic_transport_spec *trans_spec)
 206{
 207        uint16_t port_num;
 208        int err;
 209        struct filter filter;
 210        struct usnic_filter_action uaction;
 211        struct usnic_ib_qp_grp_flow *qp_flow;
 212        struct usnic_fwd_flow *flow;
 213        enum usnic_transport_type trans_type;
 214
 215        trans_type = trans_spec->trans_type;
 216        port_num = trans_spec->usnic_roce.port_num;
 217
 218        /* Reserve Port */
 219        port_num = usnic_transport_rsrv_port(trans_type, port_num);
 220        if (port_num == 0)
 221                return ERR_PTR(-EINVAL);
 222
 223        /* Create Flow */
 224        usnic_fwd_init_usnic_filter(&filter, port_num);
 225        err = init_filter_action(qp_grp, &uaction);
 226        if (err)
 227                goto out_unreserve_port;
 228
 229        flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
 230        if (IS_ERR_OR_NULL(flow)) {
 231                err = flow ? PTR_ERR(flow) : -EFAULT;
 232                goto out_unreserve_port;
 233        }
 234
 235        /* Create Flow Handle */
 236        qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
 237        if (!qp_flow) {
 238                err = -ENOMEM;
 239                goto out_dealloc_flow;
 240        }
 241        qp_flow->flow = flow;
 242        qp_flow->trans_type = trans_type;
 243        qp_flow->usnic_roce.port_num = port_num;
 244        qp_flow->qp_grp = qp_grp;
 245        return qp_flow;
 246
 247out_dealloc_flow:
 248        usnic_fwd_dealloc_flow(flow);
 249out_unreserve_port:
 250        usnic_transport_unrsrv_port(trans_type, port_num);
 251        return ERR_PTR(err);
 252}
 253
 254static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
 255{
 256        usnic_fwd_dealloc_flow(qp_flow->flow);
 257        usnic_transport_unrsrv_port(qp_flow->trans_type,
 258                                        qp_flow->usnic_roce.port_num);
 259        kfree(qp_flow);
 260}
 261
 262static struct usnic_ib_qp_grp_flow*
 263create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
 264                struct usnic_transport_spec *trans_spec)
 265{
 266        struct socket *sock;
 267        int sock_fd;
 268        int err;
 269        struct filter filter;
 270        struct usnic_filter_action uaction;
 271        struct usnic_ib_qp_grp_flow *qp_flow;
 272        struct usnic_fwd_flow *flow;
 273        enum usnic_transport_type trans_type;
 274        uint32_t addr;
 275        uint16_t port_num;
 276        int proto;
 277
 278        trans_type = trans_spec->trans_type;
 279        sock_fd = trans_spec->udp.sock_fd;
 280
 281        /* Get and check socket */
 282        sock = usnic_transport_get_socket(sock_fd);
 283        if (IS_ERR_OR_NULL(sock))
 284                return ERR_CAST(sock);
 285
 286        err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
 287        if (err)
 288                goto out_put_sock;
 289
 290        if (proto != IPPROTO_UDP) {
 291                usnic_err("Protocol for fd %d is not UDP", sock_fd);
 292                err = -EPERM;
 293                goto out_put_sock;
 294        }
 295
 296        /* Create flow */
 297        usnic_fwd_init_udp_filter(&filter, addr, port_num);
 298        err = init_filter_action(qp_grp, &uaction);
 299        if (err)
 300                goto out_put_sock;
 301
 302        flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
 303        if (IS_ERR_OR_NULL(flow)) {
 304                err = flow ? PTR_ERR(flow) : -EFAULT;
 305                goto out_put_sock;
 306        }
 307
 308        /* Create qp_flow */
 309        qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
 310        if (!qp_flow) {
 311                err = -ENOMEM;
 312                goto out_dealloc_flow;
 313        }
 314        qp_flow->flow = flow;
 315        qp_flow->trans_type = trans_type;
 316        qp_flow->udp.sock = sock;
 317        qp_flow->qp_grp = qp_grp;
 318        return qp_flow;
 319
 320out_dealloc_flow:
 321        usnic_fwd_dealloc_flow(flow);
 322out_put_sock:
 323        usnic_transport_put_socket(sock);
 324        return ERR_PTR(err);
 325}
 326
 327static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
 328{
 329        usnic_fwd_dealloc_flow(qp_flow->flow);
 330        usnic_transport_put_socket(qp_flow->udp.sock);
 331        kfree(qp_flow);
 332}
 333
 334static struct usnic_ib_qp_grp_flow*
 335create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
 336                        struct usnic_transport_spec *trans_spec)
 337{
 338        struct usnic_ib_qp_grp_flow *qp_flow;
 339        enum usnic_transport_type trans_type;
 340
 341        trans_type = trans_spec->trans_type;
 342        switch (trans_type) {
 343        case USNIC_TRANSPORT_ROCE_CUSTOM:
 344                qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
 345                break;
 346        case USNIC_TRANSPORT_IPV4_UDP:
 347                qp_flow = create_udp_flow(qp_grp, trans_spec);
 348                break;
 349        default:
 350                usnic_err("Unsupported transport %u\n",
 351                                trans_spec->trans_type);
 352                return ERR_PTR(-EINVAL);
 353        }
 354
 355        if (!IS_ERR_OR_NULL(qp_flow)) {
 356                list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
 357                usnic_debugfs_flow_add(qp_flow);
 358        }
 359
 360
 361        return qp_flow;
 362}
 363
 364static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
 365{
 366        usnic_debugfs_flow_remove(qp_flow);
 367        list_del(&qp_flow->link);
 368
 369        switch (qp_flow->trans_type) {
 370        case USNIC_TRANSPORT_ROCE_CUSTOM:
 371                release_roce_custom_flow(qp_flow);
 372                break;
 373        case USNIC_TRANSPORT_IPV4_UDP:
 374                release_udp_flow(qp_flow);
 375                break;
 376        default:
 377                WARN(1, "Unsupported transport %u\n",
 378                                qp_flow->trans_type);
 379                break;
 380        }
 381}
 382
 383static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
 384{
 385        struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
 386        list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
 387                release_and_remove_flow(qp_flow);
 388}
 389
 390int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
 391                                enum ib_qp_state new_state,
 392                                void *data)
 393{
 394        int status = 0;
 395        struct ib_event ib_event;
 396        enum ib_qp_state old_state;
 397        struct usnic_transport_spec *trans_spec;
 398        struct usnic_ib_qp_grp_flow *qp_flow;
 399
 400        old_state = qp_grp->state;
 401        trans_spec = (struct usnic_transport_spec *) data;
 402
 403        spin_lock(&qp_grp->lock);
 404        switch (new_state) {
 405        case IB_QPS_RESET:
 406                switch (old_state) {
 407                case IB_QPS_RESET:
 408                        /* NO-OP */
 409                        break;
 410                case IB_QPS_INIT:
 411                        release_and_remove_all_flows(qp_grp);
 412                        status = 0;
 413                        break;
 414                case IB_QPS_RTR:
 415                case IB_QPS_RTS:
 416                case IB_QPS_ERR:
 417                        status = disable_qp_grp(qp_grp);
 418                        release_and_remove_all_flows(qp_grp);
 419                        break;
 420                default:
 421                        status = -EINVAL;
 422                }
 423                break;
 424        case IB_QPS_INIT:
 425                switch (old_state) {
 426                case IB_QPS_RESET:
 427                        if (trans_spec) {
 428                                qp_flow = create_and_add_flow(qp_grp,
 429                                                                trans_spec);
 430                                if (IS_ERR_OR_NULL(qp_flow)) {
 431                                        status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
 432                                        break;
 433                                }
 434                        } else {
 435                                /*
 436                                 * Optional to specify filters.
 437                                 */
 438                                status = 0;
 439                        }
 440                        break;
 441                case IB_QPS_INIT:
 442                        if (trans_spec) {
 443                                qp_flow = create_and_add_flow(qp_grp,
 444                                                                trans_spec);
 445                                if (IS_ERR_OR_NULL(qp_flow)) {
 446                                        status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
 447                                        break;
 448                                }
 449                        } else {
 450                                /*
 451                                 * Doesn't make sense to go into INIT state
 452                                 * from INIT state w/o adding filters.
 453                                 */
 454                                status = -EINVAL;
 455                        }
 456                        break;
 457                case IB_QPS_RTR:
 458                        status = disable_qp_grp(qp_grp);
 459                        break;
 460                case IB_QPS_RTS:
 461                        status = disable_qp_grp(qp_grp);
 462                        break;
 463                default:
 464                        status = -EINVAL;
 465                }
 466                break;
 467        case IB_QPS_RTR:
 468                switch (old_state) {
 469                case IB_QPS_INIT:
 470                        status = enable_qp_grp(qp_grp);
 471                        break;
 472                default:
 473                        status = -EINVAL;
 474                }
 475                break;
 476        case IB_QPS_RTS:
 477                switch (old_state) {
 478                case IB_QPS_RTR:
 479                        /* NO-OP FOR NOW */
 480                        break;
 481                default:
 482                        status = -EINVAL;
 483                }
 484                break;
 485        case IB_QPS_ERR:
 486                ib_event.device = &qp_grp->vf->pf->ib_dev;
 487                ib_event.element.qp = &qp_grp->ibqp;
 488                ib_event.event = IB_EVENT_QP_FATAL;
 489
 490                switch (old_state) {
 491                case IB_QPS_RESET:
 492                        qp_grp->ibqp.event_handler(&ib_event,
 493                                        qp_grp->ibqp.qp_context);
 494                        break;
 495                case IB_QPS_INIT:
 496                        release_and_remove_all_flows(qp_grp);
 497                        qp_grp->ibqp.event_handler(&ib_event,
 498                                        qp_grp->ibqp.qp_context);
 499                        break;
 500                case IB_QPS_RTR:
 501                case IB_QPS_RTS:
 502                        status = disable_qp_grp(qp_grp);
 503                        release_and_remove_all_flows(qp_grp);
 504                        qp_grp->ibqp.event_handler(&ib_event,
 505                                        qp_grp->ibqp.qp_context);
 506                        break;
 507                default:
 508                        status = -EINVAL;
 509                }
 510                break;
 511        default:
 512                status = -EINVAL;
 513        }
 514        spin_unlock(&qp_grp->lock);
 515
 516        if (!status) {
 517                qp_grp->state = new_state;
 518                usnic_info("Transitioned %u from %s to %s",
 519                qp_grp->grp_id,
 520                usnic_ib_qp_grp_state_to_string(old_state),
 521                usnic_ib_qp_grp_state_to_string(new_state));
 522        } else {
 523                usnic_err("Failed to transition %u from %s to %s",
 524                qp_grp->grp_id,
 525                usnic_ib_qp_grp_state_to_string(old_state),
 526                usnic_ib_qp_grp_state_to_string(new_state));
 527        }
 528
 529        return status;
 530}
 531
 532static struct usnic_vnic_res_chunk**
 533alloc_res_chunk_list(struct usnic_vnic *vnic,
 534                        struct usnic_vnic_res_spec *res_spec, void *owner_obj)
 535{
 536        enum usnic_vnic_res_type res_type;
 537        struct usnic_vnic_res_chunk **res_chunk_list;
 538        int err, i, res_cnt, res_lst_sz;
 539
 540        for (res_lst_sz = 0;
 541                res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
 542                res_lst_sz++) {
 543                /* Do Nothing */
 544        }
 545
 546        res_chunk_list = kcalloc(res_lst_sz + 1, sizeof(*res_chunk_list),
 547                                        GFP_ATOMIC);
 548        if (!res_chunk_list)
 549                return ERR_PTR(-ENOMEM);
 550
 551        for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
 552                i++) {
 553                res_type = res_spec->resources[i].type;
 554                res_cnt = res_spec->resources[i].cnt;
 555
 556                res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
 557                                        res_cnt, owner_obj);
 558                if (IS_ERR_OR_NULL(res_chunk_list[i])) {
 559                        err = res_chunk_list[i] ?
 560                                        PTR_ERR(res_chunk_list[i]) : -ENOMEM;
 561                        usnic_err("Failed to get %s from %s with err %d\n",
 562                                usnic_vnic_res_type_to_str(res_type),
 563                                usnic_vnic_pci_name(vnic),
 564                                err);
 565                        goto out_free_res;
 566                }
 567        }
 568
 569        return res_chunk_list;
 570
 571out_free_res:
 572        for (i--; i >= 0; i--)
 573                usnic_vnic_put_resources(res_chunk_list[i]);
 574        kfree(res_chunk_list);
 575        return ERR_PTR(err);
 576}
 577
 578static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
 579{
 580        int i;
 581        for (i = 0; res_chunk_list[i]; i++)
 582                usnic_vnic_put_resources(res_chunk_list[i]);
 583        kfree(res_chunk_list);
 584}
 585
 586static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
 587                                struct usnic_ib_pd *pd,
 588                                struct usnic_ib_qp_grp *qp_grp)
 589{
 590        int err;
 591        struct pci_dev *pdev;
 592
 593        lockdep_assert_held(&vf->lock);
 594
 595        pdev = usnic_vnic_get_pdev(vf->vnic);
 596        if (vf->qp_grp_ref_cnt == 0) {
 597                err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
 598                if (err) {
 599                        usnic_err("Failed to attach %s to domain\n",
 600                                        pci_name(pdev));
 601                        return err;
 602                }
 603                vf->pd = pd;
 604        }
 605        vf->qp_grp_ref_cnt++;
 606
 607        WARN_ON(vf->pd != pd);
 608        qp_grp->vf = vf;
 609
 610        return 0;
 611}
 612
 613static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
 614{
 615        struct pci_dev *pdev;
 616        struct usnic_ib_pd *pd;
 617
 618        lockdep_assert_held(&qp_grp->vf->lock);
 619
 620        pd = qp_grp->vf->pd;
 621        pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
 622        if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
 623                qp_grp->vf->pd = NULL;
 624                usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
 625        }
 626        qp_grp->vf = NULL;
 627}
 628
 629static void log_spec(struct usnic_vnic_res_spec *res_spec)
 630{
 631        char buf[512];
 632        usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
 633        usnic_dbg("%s\n", buf);
 634}
 635
 636static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
 637                                uint32_t *id)
 638{
 639        enum usnic_transport_type trans_type = qp_flow->trans_type;
 640        int err;
 641        uint16_t port_num = 0;
 642
 643        switch (trans_type) {
 644        case USNIC_TRANSPORT_ROCE_CUSTOM:
 645                *id = qp_flow->usnic_roce.port_num;
 646                break;
 647        case USNIC_TRANSPORT_IPV4_UDP:
 648                err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
 649                                                        NULL, NULL,
 650                                                        &port_num);
 651                if (err)
 652                        return err;
 653                /*
 654                 * Copy port_num to stack first and then to *id,
 655                 * so that the short to int cast works for little
 656                 * and big endian systems.
 657                 */
 658                *id = port_num;
 659                break;
 660        default:
 661                usnic_err("Unsupported transport %u\n", trans_type);
 662                return -EINVAL;
 663        }
 664
 665        return 0;
 666}
 667
 668struct usnic_ib_qp_grp *
 669usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
 670                        struct usnic_ib_pd *pd,
 671                        struct usnic_vnic_res_spec *res_spec,
 672                        struct usnic_transport_spec *transport_spec)
 673{
 674        struct usnic_ib_qp_grp *qp_grp;
 675        int err;
 676        enum usnic_transport_type transport = transport_spec->trans_type;
 677        struct usnic_ib_qp_grp_flow *qp_flow;
 678
 679        lockdep_assert_held(&vf->lock);
 680
 681        err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
 682                                                res_spec);
 683        if (err) {
 684                usnic_err("Spec does not meet miniumum req for transport %d\n",
 685                                transport);
 686                log_spec(res_spec);
 687                return ERR_PTR(err);
 688        }
 689
 690        qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
 691        if (!qp_grp)
 692                return NULL;
 693
 694        qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
 695                                                        qp_grp);
 696        if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
 697                err = qp_grp->res_chunk_list ?
 698                                PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
 699                goto out_free_qp_grp;
 700        }
 701
 702        err = qp_grp_and_vf_bind(vf, pd, qp_grp);
 703        if (err)
 704                goto out_free_res;
 705
 706        INIT_LIST_HEAD(&qp_grp->flows_lst);
 707        spin_lock_init(&qp_grp->lock);
 708        qp_grp->ufdev = ufdev;
 709        qp_grp->state = IB_QPS_RESET;
 710        qp_grp->owner_pid = current->pid;
 711
 712        qp_flow = create_and_add_flow(qp_grp, transport_spec);
 713        if (IS_ERR_OR_NULL(qp_flow)) {
 714                usnic_err("Unable to create and add flow with err %ld\n",
 715                                PTR_ERR(qp_flow));
 716                err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
 717                goto out_qp_grp_vf_unbind;
 718        }
 719
 720        err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
 721        if (err)
 722                goto out_release_flow;
 723        qp_grp->ibqp.qp_num = qp_grp->grp_id;
 724
 725        usnic_ib_sysfs_qpn_add(qp_grp);
 726
 727        return qp_grp;
 728
 729out_release_flow:
 730        release_and_remove_flow(qp_flow);
 731out_qp_grp_vf_unbind:
 732        qp_grp_and_vf_unbind(qp_grp);
 733out_free_res:
 734        free_qp_grp_res(qp_grp->res_chunk_list);
 735out_free_qp_grp:
 736        kfree(qp_grp);
 737
 738        return ERR_PTR(err);
 739}
 740
 741void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
 742{
 743
 744        WARN_ON(qp_grp->state != IB_QPS_RESET);
 745        lockdep_assert_held(&qp_grp->vf->lock);
 746
 747        release_and_remove_all_flows(qp_grp);
 748        usnic_ib_sysfs_qpn_remove(qp_grp);
 749        qp_grp_and_vf_unbind(qp_grp);
 750        free_qp_grp_res(qp_grp->res_chunk_list);
 751        kfree(qp_grp);
 752}
 753
 754struct usnic_vnic_res_chunk*
 755usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
 756                                enum usnic_vnic_res_type res_type)
 757{
 758        int i;
 759
 760        for (i = 0; qp_grp->res_chunk_list[i]; i++) {
 761                if (qp_grp->res_chunk_list[i]->type == res_type)
 762                        return qp_grp->res_chunk_list[i];
 763        }
 764
 765        return ERR_PTR(-EINVAL);
 766}
 767