linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
   4 * All rights reserved.
   5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/sched.h>
  37#include <linux/pci.h>
  38#include <linux/errno.h>
  39#include <linux/kernel.h>
  40#include <linux/io.h>
  41#include <linux/slab.h>
  42#include <linux/mlx4/cmd.h>
  43#include <linux/mlx4/qp.h>
  44#include <linux/if_ether.h>
  45#include <linux/etherdevice.h>
  46
  47#include "mlx4.h"
  48#include "fw.h"
  49
  50#define MLX4_MAC_VALID          (1ull << 63)
  51
  52struct mac_res {
  53        struct list_head list;
  54        u64 mac;
  55        u8 port;
  56};
  57
  58struct res_common {
  59        struct list_head        list;
  60        struct rb_node          node;
  61        u64                     res_id;
  62        int                     owner;
  63        int                     state;
  64        int                     from_state;
  65        int                     to_state;
  66        int                     removing;
  67};
  68
  69enum {
  70        RES_ANY_BUSY = 1
  71};
  72
  73struct res_gid {
  74        struct list_head        list;
  75        u8                      gid[16];
  76        enum mlx4_protocol      prot;
  77        enum mlx4_steer_type    steer;
  78        u64                     reg_id;
  79};
  80
  81enum res_qp_states {
  82        RES_QP_BUSY = RES_ANY_BUSY,
  83
  84        /* QP number was allocated */
  85        RES_QP_RESERVED,
  86
  87        /* ICM memory for QP context was mapped */
  88        RES_QP_MAPPED,
  89
  90        /* QP is in hw ownership */
  91        RES_QP_HW
  92};
  93
  94struct res_qp {
  95        struct res_common       com;
  96        struct res_mtt         *mtt;
  97        struct res_cq          *rcq;
  98        struct res_cq          *scq;
  99        struct res_srq         *srq;
 100        struct list_head        mcg_list;
 101        spinlock_t              mcg_spl;
 102        int                     local_qpn;
 103        atomic_t                ref_count;
 104        u32                     qpc_flags;
 105        u8                      sched_queue;
 106};
 107
 108enum res_mtt_states {
 109        RES_MTT_BUSY = RES_ANY_BUSY,
 110        RES_MTT_ALLOCATED,
 111};
 112
 113static inline const char *mtt_states_str(enum res_mtt_states state)
 114{
 115        switch (state) {
 116        case RES_MTT_BUSY: return "RES_MTT_BUSY";
 117        case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
 118        default: return "Unknown";
 119        }
 120}
 121
 122struct res_mtt {
 123        struct res_common       com;
 124        int                     order;
 125        atomic_t                ref_count;
 126};
 127
 128enum res_mpt_states {
 129        RES_MPT_BUSY = RES_ANY_BUSY,
 130        RES_MPT_RESERVED,
 131        RES_MPT_MAPPED,
 132        RES_MPT_HW,
 133};
 134
 135struct res_mpt {
 136        struct res_common       com;
 137        struct res_mtt         *mtt;
 138        int                     key;
 139};
 140
 141enum res_eq_states {
 142        RES_EQ_BUSY = RES_ANY_BUSY,
 143        RES_EQ_RESERVED,
 144        RES_EQ_HW,
 145};
 146
 147struct res_eq {
 148        struct res_common       com;
 149        struct res_mtt         *mtt;
 150};
 151
 152enum res_cq_states {
 153        RES_CQ_BUSY = RES_ANY_BUSY,
 154        RES_CQ_ALLOCATED,
 155        RES_CQ_HW,
 156};
 157
 158struct res_cq {
 159        struct res_common       com;
 160        struct res_mtt         *mtt;
 161        atomic_t                ref_count;
 162};
 163
 164enum res_srq_states {
 165        RES_SRQ_BUSY = RES_ANY_BUSY,
 166        RES_SRQ_ALLOCATED,
 167        RES_SRQ_HW,
 168};
 169
 170struct res_srq {
 171        struct res_common       com;
 172        struct res_mtt         *mtt;
 173        struct res_cq          *cq;
 174        atomic_t                ref_count;
 175};
 176
 177enum res_counter_states {
 178        RES_COUNTER_BUSY = RES_ANY_BUSY,
 179        RES_COUNTER_ALLOCATED,
 180};
 181
 182struct res_counter {
 183        struct res_common       com;
 184        int                     port;
 185};
 186
 187enum res_xrcdn_states {
 188        RES_XRCD_BUSY = RES_ANY_BUSY,
 189        RES_XRCD_ALLOCATED,
 190};
 191
 192struct res_xrcdn {
 193        struct res_common       com;
 194        int                     port;
 195};
 196
 197enum res_fs_rule_states {
 198        RES_FS_RULE_BUSY = RES_ANY_BUSY,
 199        RES_FS_RULE_ALLOCATED,
 200};
 201
 202struct res_fs_rule {
 203        struct res_common       com;
 204        int                     qpn;
 205};
 206
 207static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
 208{
 209        struct rb_node *node = root->rb_node;
 210
 211        while (node) {
 212                struct res_common *res = container_of(node, struct res_common,
 213                                                      node);
 214
 215                if (res_id < res->res_id)
 216                        node = node->rb_left;
 217                else if (res_id > res->res_id)
 218                        node = node->rb_right;
 219                else
 220                        return res;
 221        }
 222        return NULL;
 223}
 224
 225static int res_tracker_insert(struct rb_root *root, struct res_common *res)
 226{
 227        struct rb_node **new = &(root->rb_node), *parent = NULL;
 228
 229        /* Figure out where to put new node */
 230        while (*new) {
 231                struct res_common *this = container_of(*new, struct res_common,
 232                                                       node);
 233
 234                parent = *new;
 235                if (res->res_id < this->res_id)
 236                        new = &((*new)->rb_left);
 237                else if (res->res_id > this->res_id)
 238                        new = &((*new)->rb_right);
 239                else
 240                        return -EEXIST;
 241        }
 242
 243        /* Add new node and rebalance tree. */
 244        rb_link_node(&res->node, parent, new);
 245        rb_insert_color(&res->node, root);
 246
 247        return 0;
 248}
 249
 250enum qp_transition {
 251        QP_TRANS_INIT2RTR,
 252        QP_TRANS_RTR2RTS,
 253        QP_TRANS_RTS2RTS,
 254        QP_TRANS_SQERR2RTS,
 255        QP_TRANS_SQD2SQD,
 256        QP_TRANS_SQD2RTS
 257};
 258
 259/* For Debug uses */
 260static const char *ResourceType(enum mlx4_resource rt)
 261{
 262        switch (rt) {
 263        case RES_QP: return "RES_QP";
 264        case RES_CQ: return "RES_CQ";
 265        case RES_SRQ: return "RES_SRQ";
 266        case RES_MPT: return "RES_MPT";
 267        case RES_MTT: return "RES_MTT";
 268        case RES_MAC: return  "RES_MAC";
 269        case RES_EQ: return "RES_EQ";
 270        case RES_COUNTER: return "RES_COUNTER";
 271        case RES_FS_RULE: return "RES_FS_RULE";
 272        case RES_XRCD: return "RES_XRCD";
 273        default: return "Unknown resource type !!!";
 274        };
 275}
 276
 277int mlx4_init_resource_tracker(struct mlx4_dev *dev)
 278{
 279        struct mlx4_priv *priv = mlx4_priv(dev);
 280        int i;
 281        int t;
 282
 283        priv->mfunc.master.res_tracker.slave_list =
 284                kzalloc(dev->num_slaves * sizeof(struct slave_list),
 285                        GFP_KERNEL);
 286        if (!priv->mfunc.master.res_tracker.slave_list)
 287                return -ENOMEM;
 288
 289        for (i = 0 ; i < dev->num_slaves; i++) {
 290                for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
 291                        INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
 292                                       slave_list[i].res_list[t]);
 293                mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 294        }
 295
 296        mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
 297                 dev->num_slaves);
 298        for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
 299                priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
 300
 301        spin_lock_init(&priv->mfunc.master.res_tracker.lock);
 302        return 0 ;
 303}
 304
 305void mlx4_free_resource_tracker(struct mlx4_dev *dev,
 306                                enum mlx4_res_tracker_free_type type)
 307{
 308        struct mlx4_priv *priv = mlx4_priv(dev);
 309        int i;
 310
 311        if (priv->mfunc.master.res_tracker.slave_list) {
 312                if (type != RES_TR_FREE_STRUCTS_ONLY)
 313                        for (i = 0 ; i < dev->num_slaves; i++)
 314                                if (type == RES_TR_FREE_ALL ||
 315                                    dev->caps.function != i)
 316                                        mlx4_delete_all_resources_for_slave(dev, i);
 317
 318                if (type != RES_TR_FREE_SLAVES_ONLY) {
 319                        kfree(priv->mfunc.master.res_tracker.slave_list);
 320                        priv->mfunc.master.res_tracker.slave_list = NULL;
 321                }
 322        }
 323}
 324
 325static void update_pkey_index(struct mlx4_dev *dev, int slave,
 326                              struct mlx4_cmd_mailbox *inbox)
 327{
 328        u8 sched = *(u8 *)(inbox->buf + 64);
 329        u8 orig_index = *(u8 *)(inbox->buf + 35);
 330        u8 new_index;
 331        struct mlx4_priv *priv = mlx4_priv(dev);
 332        int port;
 333
 334        port = (sched >> 6 & 1) + 1;
 335
 336        new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
 337        *(u8 *)(inbox->buf + 35) = new_index;
 338}
 339
 340static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
 341                       u8 slave)
 342{
 343        struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
 344        enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
 345        u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
 346
 347        if (MLX4_QP_ST_UD == ts)
 348                qp_ctx->pri_path.mgid_index = 0x80 | slave;
 349
 350        if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
 351                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
 352                        qp_ctx->pri_path.mgid_index = slave & 0x7F;
 353                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
 354                        qp_ctx->alt_path.mgid_index = slave & 0x7F;
 355        }
 356}
 357
 358static int update_vport_qp_param(struct mlx4_dev *dev,
 359                                 struct mlx4_cmd_mailbox *inbox,
 360                                 u8 slave, u32 qpn)
 361{
 362        struct mlx4_qp_context  *qpc = inbox->buf + 8;
 363        struct mlx4_vport_oper_state *vp_oper;
 364        struct mlx4_priv *priv;
 365        u32 qp_type;
 366        int port;
 367
 368        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
 369        priv = mlx4_priv(dev);
 370        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 371
 372        if (MLX4_VGT != vp_oper->state.default_vlan) {
 373                qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
 374                if (MLX4_QP_ST_RC == qp_type ||
 375                    (MLX4_QP_ST_UD == qp_type &&
 376                     !mlx4_is_qp_reserved(dev, qpn)))
 377                        return -EINVAL;
 378
 379                /* the reserved QPs (special, proxy, tunnel)
 380                 * do not operate over vlans
 381                 */
 382                if (mlx4_is_qp_reserved(dev, qpn))
 383                        return 0;
 384
 385                /* force strip vlan by clear vsd */
 386                qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
 387
 388                if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
 389                    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
 390                        qpc->pri_path.vlan_control =
 391                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 392                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 393                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
 394                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 395                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
 396                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 397                } else if (0 != vp_oper->state.default_vlan) {
 398                        qpc->pri_path.vlan_control =
 399                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 400                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 401                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 402                } else { /* priority tagged */
 403                        qpc->pri_path.vlan_control =
 404                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 405                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 406                }
 407
 408                qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
 409                qpc->pri_path.vlan_index = vp_oper->vlan_idx;
 410                qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
 411                qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
 412                qpc->pri_path.sched_queue &= 0xC7;
 413                qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
 414        }
 415        if (vp_oper->state.spoofchk) {
 416                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
 417                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
 418        }
 419        return 0;
 420}
 421
 422static int mpt_mask(struct mlx4_dev *dev)
 423{
 424        return dev->caps.num_mpts - 1;
 425}
 426
 427static void *find_res(struct mlx4_dev *dev, u64 res_id,
 428                      enum mlx4_resource type)
 429{
 430        struct mlx4_priv *priv = mlx4_priv(dev);
 431
 432        return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
 433                                  res_id);
 434}
 435
 436static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
 437                   enum mlx4_resource type,
 438                   void *res)
 439{
 440        struct res_common *r;
 441        int err = 0;
 442
 443        spin_lock_irq(mlx4_tlock(dev));
 444        r = find_res(dev, res_id, type);
 445        if (!r) {
 446                err = -ENONET;
 447                goto exit;
 448        }
 449
 450        if (r->state == RES_ANY_BUSY) {
 451                err = -EBUSY;
 452                goto exit;
 453        }
 454
 455        if (r->owner != slave) {
 456                err = -EPERM;
 457                goto exit;
 458        }
 459
 460        r->from_state = r->state;
 461        r->state = RES_ANY_BUSY;
 462
 463        if (res)
 464                *((struct res_common **)res) = r;
 465
 466exit:
 467        spin_unlock_irq(mlx4_tlock(dev));
 468        return err;
 469}
 470
 471int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
 472                                    enum mlx4_resource type,
 473                                    u64 res_id, int *slave)
 474{
 475
 476        struct res_common *r;
 477        int err = -ENOENT;
 478        int id = res_id;
 479
 480        if (type == RES_QP)
 481                id &= 0x7fffff;
 482        spin_lock(mlx4_tlock(dev));
 483
 484        r = find_res(dev, id, type);
 485        if (r) {
 486                *slave = r->owner;
 487                err = 0;
 488        }
 489        spin_unlock(mlx4_tlock(dev));
 490
 491        return err;
 492}
 493
 494static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
 495                    enum mlx4_resource type)
 496{
 497        struct res_common *r;
 498
 499        spin_lock_irq(mlx4_tlock(dev));
 500        r = find_res(dev, res_id, type);
 501        if (r)
 502                r->state = r->from_state;
 503        spin_unlock_irq(mlx4_tlock(dev));
 504}
 505
 506static struct res_common *alloc_qp_tr(int id)
 507{
 508        struct res_qp *ret;
 509
 510        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 511        if (!ret)
 512                return NULL;
 513
 514        ret->com.res_id = id;
 515        ret->com.state = RES_QP_RESERVED;
 516        ret->local_qpn = id;
 517        INIT_LIST_HEAD(&ret->mcg_list);
 518        spin_lock_init(&ret->mcg_spl);
 519        atomic_set(&ret->ref_count, 0);
 520
 521        return &ret->com;
 522}
 523
 524static struct res_common *alloc_mtt_tr(int id, int order)
 525{
 526        struct res_mtt *ret;
 527
 528        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 529        if (!ret)
 530                return NULL;
 531
 532        ret->com.res_id = id;
 533        ret->order = order;
 534        ret->com.state = RES_MTT_ALLOCATED;
 535        atomic_set(&ret->ref_count, 0);
 536
 537        return &ret->com;
 538}
 539
 540static struct res_common *alloc_mpt_tr(int id, int key)
 541{
 542        struct res_mpt *ret;
 543
 544        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 545        if (!ret)
 546                return NULL;
 547
 548        ret->com.res_id = id;
 549        ret->com.state = RES_MPT_RESERVED;
 550        ret->key = key;
 551
 552        return &ret->com;
 553}
 554
 555static struct res_common *alloc_eq_tr(int id)
 556{
 557        struct res_eq *ret;
 558
 559        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 560        if (!ret)
 561                return NULL;
 562
 563        ret->com.res_id = id;
 564        ret->com.state = RES_EQ_RESERVED;
 565
 566        return &ret->com;
 567}
 568
 569static struct res_common *alloc_cq_tr(int id)
 570{
 571        struct res_cq *ret;
 572
 573        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 574        if (!ret)
 575                return NULL;
 576
 577        ret->com.res_id = id;
 578        ret->com.state = RES_CQ_ALLOCATED;
 579        atomic_set(&ret->ref_count, 0);
 580
 581        return &ret->com;
 582}
 583
 584static struct res_common *alloc_srq_tr(int id)
 585{
 586        struct res_srq *ret;
 587
 588        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 589        if (!ret)
 590                return NULL;
 591
 592        ret->com.res_id = id;
 593        ret->com.state = RES_SRQ_ALLOCATED;
 594        atomic_set(&ret->ref_count, 0);
 595
 596        return &ret->com;
 597}
 598
 599static struct res_common *alloc_counter_tr(int id)
 600{
 601        struct res_counter *ret;
 602
 603        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 604        if (!ret)
 605                return NULL;
 606
 607        ret->com.res_id = id;
 608        ret->com.state = RES_COUNTER_ALLOCATED;
 609
 610        return &ret->com;
 611}
 612
 613static struct res_common *alloc_xrcdn_tr(int id)
 614{
 615        struct res_xrcdn *ret;
 616
 617        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 618        if (!ret)
 619                return NULL;
 620
 621        ret->com.res_id = id;
 622        ret->com.state = RES_XRCD_ALLOCATED;
 623
 624        return &ret->com;
 625}
 626
 627static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
 628{
 629        struct res_fs_rule *ret;
 630
 631        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 632        if (!ret)
 633                return NULL;
 634
 635        ret->com.res_id = id;
 636        ret->com.state = RES_FS_RULE_ALLOCATED;
 637        ret->qpn = qpn;
 638        return &ret->com;
 639}
 640
 641static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
 642                                   int extra)
 643{
 644        struct res_common *ret;
 645
 646        switch (type) {
 647        case RES_QP:
 648                ret = alloc_qp_tr(id);
 649                break;
 650        case RES_MPT:
 651                ret = alloc_mpt_tr(id, extra);
 652                break;
 653        case RES_MTT:
 654                ret = alloc_mtt_tr(id, extra);
 655                break;
 656        case RES_EQ:
 657                ret = alloc_eq_tr(id);
 658                break;
 659        case RES_CQ:
 660                ret = alloc_cq_tr(id);
 661                break;
 662        case RES_SRQ:
 663                ret = alloc_srq_tr(id);
 664                break;
 665        case RES_MAC:
 666                printk(KERN_ERR "implementation missing\n");
 667                return NULL;
 668        case RES_COUNTER:
 669                ret = alloc_counter_tr(id);
 670                break;
 671        case RES_XRCD:
 672                ret = alloc_xrcdn_tr(id);
 673                break;
 674        case RES_FS_RULE:
 675                ret = alloc_fs_rule_tr(id, extra);
 676                break;
 677        default:
 678                return NULL;
 679        }
 680        if (ret)
 681                ret->owner = slave;
 682
 683        return ret;
 684}
 685
 686static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
 687                         enum mlx4_resource type, int extra)
 688{
 689        int i;
 690        int err;
 691        struct mlx4_priv *priv = mlx4_priv(dev);
 692        struct res_common **res_arr;
 693        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
 694        struct rb_root *root = &tracker->res_tree[type];
 695
 696        res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
 697        if (!res_arr)
 698                return -ENOMEM;
 699
 700        for (i = 0; i < count; ++i) {
 701                res_arr[i] = alloc_tr(base + i, type, slave, extra);
 702                if (!res_arr[i]) {
 703                        for (--i; i >= 0; --i)
 704                                kfree(res_arr[i]);
 705
 706                        kfree(res_arr);
 707                        return -ENOMEM;
 708                }
 709        }
 710
 711        spin_lock_irq(mlx4_tlock(dev));
 712        for (i = 0; i < count; ++i) {
 713                if (find_res(dev, base + i, type)) {
 714                        err = -EEXIST;
 715                        goto undo;
 716                }
 717                err = res_tracker_insert(root, res_arr[i]);
 718                if (err)
 719                        goto undo;
 720                list_add_tail(&res_arr[i]->list,
 721                              &tracker->slave_list[slave].res_list[type]);
 722        }
 723        spin_unlock_irq(mlx4_tlock(dev));
 724        kfree(res_arr);
 725
 726        return 0;
 727
 728undo:
 729        for (--i; i >= base; --i)
 730                rb_erase(&res_arr[i]->node, root);
 731
 732        spin_unlock_irq(mlx4_tlock(dev));
 733
 734        for (i = 0; i < count; ++i)
 735                kfree(res_arr[i]);
 736
 737        kfree(res_arr);
 738
 739        return err;
 740}
 741
 742static int remove_qp_ok(struct res_qp *res)
 743{
 744        if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
 745            !list_empty(&res->mcg_list)) {
 746                pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
 747                       res->com.state, atomic_read(&res->ref_count));
 748                return -EBUSY;
 749        } else if (res->com.state != RES_QP_RESERVED) {
 750                return -EPERM;
 751        }
 752
 753        return 0;
 754}
 755
 756static int remove_mtt_ok(struct res_mtt *res, int order)
 757{
 758        if (res->com.state == RES_MTT_BUSY ||
 759            atomic_read(&res->ref_count)) {
 760                printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
 761                       __func__, __LINE__,
 762                       mtt_states_str(res->com.state),
 763                       atomic_read(&res->ref_count));
 764                return -EBUSY;
 765        } else if (res->com.state != RES_MTT_ALLOCATED)
 766                return -EPERM;
 767        else if (res->order != order)
 768                return -EINVAL;
 769
 770        return 0;
 771}
 772
 773static int remove_mpt_ok(struct res_mpt *res)
 774{
 775        if (res->com.state == RES_MPT_BUSY)
 776                return -EBUSY;
 777        else if (res->com.state != RES_MPT_RESERVED)
 778                return -EPERM;
 779
 780        return 0;
 781}
 782
 783static int remove_eq_ok(struct res_eq *res)
 784{
 785        if (res->com.state == RES_MPT_BUSY)
 786                return -EBUSY;
 787        else if (res->com.state != RES_MPT_RESERVED)
 788                return -EPERM;
 789
 790        return 0;
 791}
 792
 793static int remove_counter_ok(struct res_counter *res)
 794{
 795        if (res->com.state == RES_COUNTER_BUSY)
 796                return -EBUSY;
 797        else if (res->com.state != RES_COUNTER_ALLOCATED)
 798                return -EPERM;
 799
 800        return 0;
 801}
 802
 803static int remove_xrcdn_ok(struct res_xrcdn *res)
 804{
 805        if (res->com.state == RES_XRCD_BUSY)
 806                return -EBUSY;
 807        else if (res->com.state != RES_XRCD_ALLOCATED)
 808                return -EPERM;
 809
 810        return 0;
 811}
 812
 813static int remove_fs_rule_ok(struct res_fs_rule *res)
 814{
 815        if (res->com.state == RES_FS_RULE_BUSY)
 816                return -EBUSY;
 817        else if (res->com.state != RES_FS_RULE_ALLOCATED)
 818                return -EPERM;
 819
 820        return 0;
 821}
 822
 823static int remove_cq_ok(struct res_cq *res)
 824{
 825        if (res->com.state == RES_CQ_BUSY)
 826                return -EBUSY;
 827        else if (res->com.state != RES_CQ_ALLOCATED)
 828                return -EPERM;
 829
 830        return 0;
 831}
 832
 833static int remove_srq_ok(struct res_srq *res)
 834{
 835        if (res->com.state == RES_SRQ_BUSY)
 836                return -EBUSY;
 837        else if (res->com.state != RES_SRQ_ALLOCATED)
 838                return -EPERM;
 839
 840        return 0;
 841}
 842
 843static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
 844{
 845        switch (type) {
 846        case RES_QP:
 847                return remove_qp_ok((struct res_qp *)res);
 848        case RES_CQ:
 849                return remove_cq_ok((struct res_cq *)res);
 850        case RES_SRQ:
 851                return remove_srq_ok((struct res_srq *)res);
 852        case RES_MPT:
 853                return remove_mpt_ok((struct res_mpt *)res);
 854        case RES_MTT:
 855                return remove_mtt_ok((struct res_mtt *)res, extra);
 856        case RES_MAC:
 857                return -ENOSYS;
 858        case RES_EQ:
 859                return remove_eq_ok((struct res_eq *)res);
 860        case RES_COUNTER:
 861                return remove_counter_ok((struct res_counter *)res);
 862        case RES_XRCD:
 863                return remove_xrcdn_ok((struct res_xrcdn *)res);
 864        case RES_FS_RULE:
 865                return remove_fs_rule_ok((struct res_fs_rule *)res);
 866        default:
 867                return -EINVAL;
 868        }
 869}
 870
 871static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
 872                         enum mlx4_resource type, int extra)
 873{
 874        u64 i;
 875        int err;
 876        struct mlx4_priv *priv = mlx4_priv(dev);
 877        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
 878        struct res_common *r;
 879
 880        spin_lock_irq(mlx4_tlock(dev));
 881        for (i = base; i < base + count; ++i) {
 882                r = res_tracker_lookup(&tracker->res_tree[type], i);
 883                if (!r) {
 884                        err = -ENOENT;
 885                        goto out;
 886                }
 887                if (r->owner != slave) {
 888                        err = -EPERM;
 889                        goto out;
 890                }
 891                err = remove_ok(r, type, extra);
 892                if (err)
 893                        goto out;
 894        }
 895
 896        for (i = base; i < base + count; ++i) {
 897                r = res_tracker_lookup(&tracker->res_tree[type], i);
 898                rb_erase(&r->node, &tracker->res_tree[type]);
 899                list_del(&r->list);
 900                kfree(r);
 901        }
 902        err = 0;
 903
 904out:
 905        spin_unlock_irq(mlx4_tlock(dev));
 906
 907        return err;
 908}
 909
 910static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
 911                                enum res_qp_states state, struct res_qp **qp,
 912                                int alloc)
 913{
 914        struct mlx4_priv *priv = mlx4_priv(dev);
 915        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
 916        struct res_qp *r;
 917        int err = 0;
 918
 919        spin_lock_irq(mlx4_tlock(dev));
 920        r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
 921        if (!r)
 922                err = -ENOENT;
 923        else if (r->com.owner != slave)
 924                err = -EPERM;
 925        else {
 926                switch (state) {
 927                case RES_QP_BUSY:
 928                        mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
 929                                 __func__, r->com.res_id);
 930                        err = -EBUSY;
 931                        break;
 932
 933                case RES_QP_RESERVED:
 934                        if (r->com.state == RES_QP_MAPPED && !alloc)
 935                                break;
 936
 937                        mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
 938                        err = -EINVAL;
 939                        break;
 940
 941                case RES_QP_MAPPED:
 942                        if ((r->com.state == RES_QP_RESERVED && alloc) ||
 943                            r->com.state == RES_QP_HW)
 944                                break;
 945                        else {
 946                                mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
 947                                          r->com.res_id);
 948                                err = -EINVAL;
 949                        }
 950
 951                        break;
 952
 953                case RES_QP_HW:
 954                        if (r->com.state != RES_QP_MAPPED)
 955                                err = -EINVAL;
 956                        break;
 957                default:
 958                        err = -EINVAL;
 959                }
 960
 961                if (!err) {
 962                        r->com.from_state = r->com.state;
 963                        r->com.to_state = state;
 964                        r->com.state = RES_QP_BUSY;
 965                        if (qp)
 966                                *qp = r;
 967                }
 968        }
 969
 970        spin_unlock_irq(mlx4_tlock(dev));
 971
 972        return err;
 973}
 974
 975static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
 976                                enum res_mpt_states state, struct res_mpt **mpt)
 977{
 978        struct mlx4_priv *priv = mlx4_priv(dev);
 979        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
 980        struct res_mpt *r;
 981        int err = 0;
 982
 983        spin_lock_irq(mlx4_tlock(dev));
 984        r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
 985        if (!r)
 986                err = -ENOENT;
 987        else if (r->com.owner != slave)
 988                err = -EPERM;
 989        else {
 990                switch (state) {
 991                case RES_MPT_BUSY:
 992                        err = -EINVAL;
 993                        break;
 994
 995                case RES_MPT_RESERVED:
 996                        if (r->com.state != RES_MPT_MAPPED)
 997                                err = -EINVAL;
 998                        break;
 999
1000                case RES_MPT_MAPPED:
1001                        if (r->com.state != RES_MPT_RESERVED &&
1002                            r->com.state != RES_MPT_HW)
1003                                err = -EINVAL;
1004                        break;
1005
1006                case RES_MPT_HW:
1007                        if (r->com.state != RES_MPT_MAPPED)
1008                                err = -EINVAL;
1009                        break;
1010                default:
1011                        err = -EINVAL;
1012                }
1013
1014                if (!err) {
1015                        r->com.from_state = r->com.state;
1016                        r->com.to_state = state;
1017                        r->com.state = RES_MPT_BUSY;
1018                        if (mpt)
1019                                *mpt = r;
1020                }
1021        }
1022
1023        spin_unlock_irq(mlx4_tlock(dev));
1024
1025        return err;
1026}
1027
1028static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1029                                enum res_eq_states state, struct res_eq **eq)
1030{
1031        struct mlx4_priv *priv = mlx4_priv(dev);
1032        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1033        struct res_eq *r;
1034        int err = 0;
1035
1036        spin_lock_irq(mlx4_tlock(dev));
1037        r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1038        if (!r)
1039                err = -ENOENT;
1040        else if (r->com.owner != slave)
1041                err = -EPERM;
1042        else {
1043                switch (state) {
1044                case RES_EQ_BUSY:
1045                        err = -EINVAL;
1046                        break;
1047
1048                case RES_EQ_RESERVED:
1049                        if (r->com.state != RES_EQ_HW)
1050                                err = -EINVAL;
1051                        break;
1052
1053                case RES_EQ_HW:
1054                        if (r->com.state != RES_EQ_RESERVED)
1055                                err = -EINVAL;
1056                        break;
1057
1058                default:
1059                        err = -EINVAL;
1060                }
1061
1062                if (!err) {
1063                        r->com.from_state = r->com.state;
1064                        r->com.to_state = state;
1065                        r->com.state = RES_EQ_BUSY;
1066                        if (eq)
1067                                *eq = r;
1068                }
1069        }
1070
1071        spin_unlock_irq(mlx4_tlock(dev));
1072
1073        return err;
1074}
1075
1076static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1077                                enum res_cq_states state, struct res_cq **cq)
1078{
1079        struct mlx4_priv *priv = mlx4_priv(dev);
1080        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1081        struct res_cq *r;
1082        int err;
1083
1084        spin_lock_irq(mlx4_tlock(dev));
1085        r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1086        if (!r)
1087                err = -ENOENT;
1088        else if (r->com.owner != slave)
1089                err = -EPERM;
1090        else {
1091                switch (state) {
1092                case RES_CQ_BUSY:
1093                        err = -EBUSY;
1094                        break;
1095
1096                case RES_CQ_ALLOCATED:
1097                        if (r->com.state != RES_CQ_HW)
1098                                err = -EINVAL;
1099                        else if (atomic_read(&r->ref_count))
1100                                err = -EBUSY;
1101                        else
1102                                err = 0;
1103                        break;
1104
1105                case RES_CQ_HW:
1106                        if (r->com.state != RES_CQ_ALLOCATED)
1107                                err = -EINVAL;
1108                        else
1109                                err = 0;
1110                        break;
1111
1112                default:
1113                        err = -EINVAL;
1114                }
1115
1116                if (!err) {
1117                        r->com.from_state = r->com.state;
1118                        r->com.to_state = state;
1119                        r->com.state = RES_CQ_BUSY;
1120                        if (cq)
1121                                *cq = r;
1122                }
1123        }
1124
1125        spin_unlock_irq(mlx4_tlock(dev));
1126
1127        return err;
1128}
1129
1130static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1131                                 enum res_cq_states state, struct res_srq **srq)
1132{
1133        struct mlx4_priv *priv = mlx4_priv(dev);
1134        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1135        struct res_srq *r;
1136        int err = 0;
1137
1138        spin_lock_irq(mlx4_tlock(dev));
1139        r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1140        if (!r)
1141                err = -ENOENT;
1142        else if (r->com.owner != slave)
1143                err = -EPERM;
1144        else {
1145                switch (state) {
1146                case RES_SRQ_BUSY:
1147                        err = -EINVAL;
1148                        break;
1149
1150                case RES_SRQ_ALLOCATED:
1151                        if (r->com.state != RES_SRQ_HW)
1152                                err = -EINVAL;
1153                        else if (atomic_read(&r->ref_count))
1154                                err = -EBUSY;
1155                        break;
1156
1157                case RES_SRQ_HW:
1158                        if (r->com.state != RES_SRQ_ALLOCATED)
1159                                err = -EINVAL;
1160                        break;
1161
1162                default:
1163                        err = -EINVAL;
1164                }
1165
1166                if (!err) {
1167                        r->com.from_state = r->com.state;
1168                        r->com.to_state = state;
1169                        r->com.state = RES_SRQ_BUSY;
1170                        if (srq)
1171                                *srq = r;
1172                }
1173        }
1174
1175        spin_unlock_irq(mlx4_tlock(dev));
1176
1177        return err;
1178}
1179
1180static void res_abort_move(struct mlx4_dev *dev, int slave,
1181                           enum mlx4_resource type, int id)
1182{
1183        struct mlx4_priv *priv = mlx4_priv(dev);
1184        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1185        struct res_common *r;
1186
1187        spin_lock_irq(mlx4_tlock(dev));
1188        r = res_tracker_lookup(&tracker->res_tree[type], id);
1189        if (r && (r->owner == slave))
1190                r->state = r->from_state;
1191        spin_unlock_irq(mlx4_tlock(dev));
1192}
1193
1194static void res_end_move(struct mlx4_dev *dev, int slave,
1195                         enum mlx4_resource type, int id)
1196{
1197        struct mlx4_priv *priv = mlx4_priv(dev);
1198        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1199        struct res_common *r;
1200
1201        spin_lock_irq(mlx4_tlock(dev));
1202        r = res_tracker_lookup(&tracker->res_tree[type], id);
1203        if (r && (r->owner == slave))
1204                r->state = r->to_state;
1205        spin_unlock_irq(mlx4_tlock(dev));
1206}
1207
1208static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1209{
1210        return mlx4_is_qp_reserved(dev, qpn) &&
1211                (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1212}
1213
1214static int fw_reserved(struct mlx4_dev *dev, int qpn)
1215{
1216        return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1217}
1218
1219static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1220                        u64 in_param, u64 *out_param)
1221{
1222        int err;
1223        int count;
1224        int align;
1225        int base;
1226        int qpn;
1227
1228        switch (op) {
1229        case RES_OP_RESERVE:
1230                count = get_param_l(&in_param);
1231                align = get_param_h(&in_param);
1232                err = __mlx4_qp_reserve_range(dev, count, align, &base);
1233                if (err)
1234                        return err;
1235
1236                err = add_res_range(dev, slave, base, count, RES_QP, 0);
1237                if (err) {
1238                        __mlx4_qp_release_range(dev, base, count);
1239                        return err;
1240                }
1241                set_param_l(out_param, base);
1242                break;
1243        case RES_OP_MAP_ICM:
1244                qpn = get_param_l(&in_param) & 0x7fffff;
1245                if (valid_reserved(dev, slave, qpn)) {
1246                        err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1247                        if (err)
1248                                return err;
1249                }
1250
1251                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1252                                           NULL, 1);
1253                if (err)
1254                        return err;
1255
1256                if (!fw_reserved(dev, qpn)) {
1257                        err = __mlx4_qp_alloc_icm(dev, qpn);
1258                        if (err) {
1259                                res_abort_move(dev, slave, RES_QP, qpn);
1260                                return err;
1261                        }
1262                }
1263
1264                res_end_move(dev, slave, RES_QP, qpn);
1265                break;
1266
1267        default:
1268                err = -EINVAL;
1269                break;
1270        }
1271        return err;
1272}
1273
1274static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1275                         u64 in_param, u64 *out_param)
1276{
1277        int err = -EINVAL;
1278        int base;
1279        int order;
1280
1281        if (op != RES_OP_RESERVE_AND_MAP)
1282                return err;
1283
1284        order = get_param_l(&in_param);
1285        base = __mlx4_alloc_mtt_range(dev, order);
1286        if (base == -1)
1287                return -ENOMEM;
1288
1289        err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1290        if (err)
1291                __mlx4_free_mtt_range(dev, base, order);
1292        else
1293                set_param_l(out_param, base);
1294
1295        return err;
1296}
1297
1298static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1299                         u64 in_param, u64 *out_param)
1300{
1301        int err = -EINVAL;
1302        int index;
1303        int id;
1304        struct res_mpt *mpt;
1305
1306        switch (op) {
1307        case RES_OP_RESERVE:
1308                index = __mlx4_mpt_reserve(dev);
1309                if (index == -1)
1310                        break;
1311                id = index & mpt_mask(dev);
1312
1313                err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1314                if (err) {
1315                        __mlx4_mpt_release(dev, index);
1316                        break;
1317                }
1318                set_param_l(out_param, index);
1319                break;
1320        case RES_OP_MAP_ICM:
1321                index = get_param_l(&in_param);
1322                id = index & mpt_mask(dev);
1323                err = mr_res_start_move_to(dev, slave, id,
1324                                           RES_MPT_MAPPED, &mpt);
1325                if (err)
1326                        return err;
1327
1328                err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1329                if (err) {
1330                        res_abort_move(dev, slave, RES_MPT, id);
1331                        return err;
1332                }
1333
1334                res_end_move(dev, slave, RES_MPT, id);
1335                break;
1336        }
1337        return err;
1338}
1339
1340static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1341                        u64 in_param, u64 *out_param)
1342{
1343        int cqn;
1344        int err;
1345
1346        switch (op) {
1347        case RES_OP_RESERVE_AND_MAP:
1348                err = __mlx4_cq_alloc_icm(dev, &cqn);
1349                if (err)
1350                        break;
1351
1352                err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1353                if (err) {
1354                        __mlx4_cq_free_icm(dev, cqn);
1355                        break;
1356                }
1357
1358                set_param_l(out_param, cqn);
1359                break;
1360
1361        default:
1362                err = -EINVAL;
1363        }
1364
1365        return err;
1366}
1367
1368static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1369                         u64 in_param, u64 *out_param)
1370{
1371        int srqn;
1372        int err;
1373
1374        switch (op) {
1375        case RES_OP_RESERVE_AND_MAP:
1376                err = __mlx4_srq_alloc_icm(dev, &srqn);
1377                if (err)
1378                        break;
1379
1380                err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1381                if (err) {
1382                        __mlx4_srq_free_icm(dev, srqn);
1383                        break;
1384                }
1385
1386                set_param_l(out_param, srqn);
1387                break;
1388
1389        default:
1390                err = -EINVAL;
1391        }
1392
1393        return err;
1394}
1395
1396static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1397{
1398        struct mlx4_priv *priv = mlx4_priv(dev);
1399        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1400        struct mac_res *res;
1401
1402        res = kzalloc(sizeof *res, GFP_KERNEL);
1403        if (!res)
1404                return -ENOMEM;
1405        res->mac = mac;
1406        res->port = (u8) port;
1407        list_add_tail(&res->list,
1408                      &tracker->slave_list[slave].res_list[RES_MAC]);
1409        return 0;
1410}
1411
1412static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1413                               int port)
1414{
1415        struct mlx4_priv *priv = mlx4_priv(dev);
1416        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1417        struct list_head *mac_list =
1418                &tracker->slave_list[slave].res_list[RES_MAC];
1419        struct mac_res *res, *tmp;
1420
1421        list_for_each_entry_safe(res, tmp, mac_list, list) {
1422                if (res->mac == mac && res->port == (u8) port) {
1423                        list_del(&res->list);
1424                        kfree(res);
1425                        break;
1426                }
1427        }
1428}
1429
1430static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1431{
1432        struct mlx4_priv *priv = mlx4_priv(dev);
1433        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1434        struct list_head *mac_list =
1435                &tracker->slave_list[slave].res_list[RES_MAC];
1436        struct mac_res *res, *tmp;
1437
1438        list_for_each_entry_safe(res, tmp, mac_list, list) {
1439                list_del(&res->list);
1440                __mlx4_unregister_mac(dev, res->port, res->mac);
1441                kfree(res);
1442        }
1443}
1444
1445static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1446                         u64 in_param, u64 *out_param)
1447{
1448        int err = -EINVAL;
1449        int port;
1450        u64 mac;
1451
1452        if (op != RES_OP_RESERVE_AND_MAP)
1453                return err;
1454
1455        port = get_param_l(out_param);
1456        mac = in_param;
1457
1458        err = __mlx4_register_mac(dev, port, mac);
1459        if (err >= 0) {
1460                set_param_l(out_param, err);
1461                err = 0;
1462        }
1463
1464        if (!err) {
1465                err = mac_add_to_slave(dev, slave, mac, port);
1466                if (err)
1467                        __mlx4_unregister_mac(dev, port, mac);
1468        }
1469        return err;
1470}
1471
1472static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1473                         u64 in_param, u64 *out_param)
1474{
1475        return 0;
1476}
1477
1478static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1479                             u64 in_param, u64 *out_param)
1480{
1481        u32 index;
1482        int err;
1483
1484        if (op != RES_OP_RESERVE)
1485                return -EINVAL;
1486
1487        err = __mlx4_counter_alloc(dev, &index);
1488        if (err)
1489                return err;
1490
1491        err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1492        if (err)
1493                __mlx4_counter_free(dev, index);
1494        else
1495                set_param_l(out_param, index);
1496
1497        return err;
1498}
1499
1500static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1501                           u64 in_param, u64 *out_param)
1502{
1503        u32 xrcdn;
1504        int err;
1505
1506        if (op != RES_OP_RESERVE)
1507                return -EINVAL;
1508
1509        err = __mlx4_xrcd_alloc(dev, &xrcdn);
1510        if (err)
1511                return err;
1512
1513        err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1514        if (err)
1515                __mlx4_xrcd_free(dev, xrcdn);
1516        else
1517                set_param_l(out_param, xrcdn);
1518
1519        return err;
1520}
1521
1522int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1523                           struct mlx4_vhcr *vhcr,
1524                           struct mlx4_cmd_mailbox *inbox,
1525                           struct mlx4_cmd_mailbox *outbox,
1526                           struct mlx4_cmd_info *cmd)
1527{
1528        int err;
1529        int alop = vhcr->op_modifier;
1530
1531        switch (vhcr->in_modifier) {
1532        case RES_QP:
1533                err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1534                                   vhcr->in_param, &vhcr->out_param);
1535                break;
1536
1537        case RES_MTT:
1538                err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1539                                    vhcr->in_param, &vhcr->out_param);
1540                break;
1541
1542        case RES_MPT:
1543                err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1544                                    vhcr->in_param, &vhcr->out_param);
1545                break;
1546
1547        case RES_CQ:
1548                err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1549                                   vhcr->in_param, &vhcr->out_param);
1550                break;
1551
1552        case RES_SRQ:
1553                err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1554                                    vhcr->in_param, &vhcr->out_param);
1555                break;
1556
1557        case RES_MAC:
1558                err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1559                                    vhcr->in_param, &vhcr->out_param);
1560                break;
1561
1562        case RES_VLAN:
1563                err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1564                                    vhcr->in_param, &vhcr->out_param);
1565                break;
1566
1567        case RES_COUNTER:
1568                err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1569                                        vhcr->in_param, &vhcr->out_param);
1570                break;
1571
1572        case RES_XRCD:
1573                err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1574                                      vhcr->in_param, &vhcr->out_param);
1575                break;
1576
1577        default:
1578                err = -EINVAL;
1579                break;
1580        }
1581
1582        return err;
1583}
1584
1585static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1586                       u64 in_param)
1587{
1588        int err;
1589        int count;
1590        int base;
1591        int qpn;
1592
1593        switch (op) {
1594        case RES_OP_RESERVE:
1595                base = get_param_l(&in_param) & 0x7fffff;
1596                count = get_param_h(&in_param);
1597                err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1598                if (err)
1599                        break;
1600                __mlx4_qp_release_range(dev, base, count);
1601                break;
1602        case RES_OP_MAP_ICM:
1603                qpn = get_param_l(&in_param) & 0x7fffff;
1604                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1605                                           NULL, 0);
1606                if (err)
1607                        return err;
1608
1609                if (!fw_reserved(dev, qpn))
1610                        __mlx4_qp_free_icm(dev, qpn);
1611
1612                res_end_move(dev, slave, RES_QP, qpn);
1613
1614                if (valid_reserved(dev, slave, qpn))
1615                        err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1616                break;
1617        default:
1618                err = -EINVAL;
1619                break;
1620        }
1621        return err;
1622}
1623
1624static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1625                        u64 in_param, u64 *out_param)
1626{
1627        int err = -EINVAL;
1628        int base;
1629        int order;
1630
1631        if (op != RES_OP_RESERVE_AND_MAP)
1632                return err;
1633
1634        base = get_param_l(&in_param);
1635        order = get_param_h(&in_param);
1636        err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1637        if (!err)
1638                __mlx4_free_mtt_range(dev, base, order);
1639        return err;
1640}
1641
1642static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1643                        u64 in_param)
1644{
1645        int err = -EINVAL;
1646        int index;
1647        int id;
1648        struct res_mpt *mpt;
1649
1650        switch (op) {
1651        case RES_OP_RESERVE:
1652                index = get_param_l(&in_param);
1653                id = index & mpt_mask(dev);
1654                err = get_res(dev, slave, id, RES_MPT, &mpt);
1655                if (err)
1656                        break;
1657                index = mpt->key;
1658                put_res(dev, slave, id, RES_MPT);
1659
1660                err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1661                if (err)
1662                        break;
1663                __mlx4_mpt_release(dev, index);
1664                break;
1665        case RES_OP_MAP_ICM:
1666                        index = get_param_l(&in_param);
1667                        id = index & mpt_mask(dev);
1668                        err = mr_res_start_move_to(dev, slave, id,
1669                                                   RES_MPT_RESERVED, &mpt);
1670                        if (err)
1671                                return err;
1672
1673                        __mlx4_mpt_free_icm(dev, mpt->key);
1674                        res_end_move(dev, slave, RES_MPT, id);
1675                        return err;
1676                break;
1677        default:
1678                err = -EINVAL;
1679                break;
1680        }
1681        return err;
1682}
1683
1684static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1685                       u64 in_param, u64 *out_param)
1686{
1687        int cqn;
1688        int err;
1689
1690        switch (op) {
1691        case RES_OP_RESERVE_AND_MAP:
1692                cqn = get_param_l(&in_param);
1693                err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1694                if (err)
1695                        break;
1696
1697                __mlx4_cq_free_icm(dev, cqn);
1698                break;
1699
1700        default:
1701                err = -EINVAL;
1702                break;
1703        }
1704
1705        return err;
1706}
1707
1708static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1709                        u64 in_param, u64 *out_param)
1710{
1711        int srqn;
1712        int err;
1713
1714        switch (op) {
1715        case RES_OP_RESERVE_AND_MAP:
1716                srqn = get_param_l(&in_param);
1717                err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1718                if (err)
1719                        break;
1720
1721                __mlx4_srq_free_icm(dev, srqn);
1722                break;
1723
1724        default:
1725                err = -EINVAL;
1726                break;
1727        }
1728
1729        return err;
1730}
1731
1732static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1733                            u64 in_param, u64 *out_param)
1734{
1735        int port;
1736        int err = 0;
1737
1738        switch (op) {
1739        case RES_OP_RESERVE_AND_MAP:
1740                port = get_param_l(out_param);
1741                mac_del_from_slave(dev, slave, in_param, port);
1742                __mlx4_unregister_mac(dev, port, in_param);
1743                break;
1744        default:
1745                err = -EINVAL;
1746                break;
1747        }
1748
1749        return err;
1750
1751}
1752
1753static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1754                            u64 in_param, u64 *out_param)
1755{
1756        return 0;
1757}
1758
1759static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1760                            u64 in_param, u64 *out_param)
1761{
1762        int index;
1763        int err;
1764
1765        if (op != RES_OP_RESERVE)
1766                return -EINVAL;
1767
1768        index = get_param_l(&in_param);
1769        err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1770        if (err)
1771                return err;
1772
1773        __mlx4_counter_free(dev, index);
1774
1775        return err;
1776}
1777
1778static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1779                          u64 in_param, u64 *out_param)
1780{
1781        int xrcdn;
1782        int err;
1783
1784        if (op != RES_OP_RESERVE)
1785                return -EINVAL;
1786
1787        xrcdn = get_param_l(&in_param);
1788        err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1789        if (err)
1790                return err;
1791
1792        __mlx4_xrcd_free(dev, xrcdn);
1793
1794        return err;
1795}
1796
1797int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1798                          struct mlx4_vhcr *vhcr,
1799                          struct mlx4_cmd_mailbox *inbox,
1800                          struct mlx4_cmd_mailbox *outbox,
1801                          struct mlx4_cmd_info *cmd)
1802{
1803        int err = -EINVAL;
1804        int alop = vhcr->op_modifier;
1805
1806        switch (vhcr->in_modifier) {
1807        case RES_QP:
1808                err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1809                                  vhcr->in_param);
1810                break;
1811
1812        case RES_MTT:
1813                err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1814                                   vhcr->in_param, &vhcr->out_param);
1815                break;
1816
1817        case RES_MPT:
1818                err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1819                                   vhcr->in_param);
1820                break;
1821
1822        case RES_CQ:
1823                err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1824                                  vhcr->in_param, &vhcr->out_param);
1825                break;
1826
1827        case RES_SRQ:
1828                err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1829                                   vhcr->in_param, &vhcr->out_param);
1830                break;
1831
1832        case RES_MAC:
1833                err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1834                                   vhcr->in_param, &vhcr->out_param);
1835                break;
1836
1837        case RES_VLAN:
1838                err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1839                                   vhcr->in_param, &vhcr->out_param);
1840                break;
1841
1842        case RES_COUNTER:
1843                err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1844                                       vhcr->in_param, &vhcr->out_param);
1845                break;
1846
1847        case RES_XRCD:
1848                err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1849                                     vhcr->in_param, &vhcr->out_param);
1850
1851        default:
1852                break;
1853        }
1854        return err;
1855}
1856
1857/* ugly but other choices are uglier */
1858static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1859{
1860        return (be32_to_cpu(mpt->flags) >> 9) & 1;
1861}
1862
1863static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1864{
1865        return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1866}
1867
1868static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1869{
1870        return be32_to_cpu(mpt->mtt_sz);
1871}
1872
1873static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1874{
1875        return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1876}
1877
1878static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1879{
1880        return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1881}
1882
1883static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1884{
1885        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1886}
1887
1888static int mr_is_region(struct mlx4_mpt_entry *mpt)
1889{
1890        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1891}
1892
1893static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1894{
1895        return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1896}
1897
1898static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1899{
1900        return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1901}
1902
1903static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1904{
1905        int page_shift = (qpc->log_page_size & 0x3f) + 12;
1906        int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1907        int log_sq_sride = qpc->sq_size_stride & 7;
1908        int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1909        int log_rq_stride = qpc->rq_size_stride & 7;
1910        int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1911        int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1912        int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1913        int sq_size;
1914        int rq_size;
1915        int total_pages;
1916        int total_mem;
1917        int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1918
1919        sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1920        rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1921        total_mem = sq_size + rq_size;
1922        total_pages =
1923                roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1924                                   page_shift);
1925
1926        return total_pages;
1927}
1928
1929static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1930                           int size, struct res_mtt *mtt)
1931{
1932        int res_start = mtt->com.res_id;
1933        int res_size = (1 << mtt->order);
1934
1935        if (start < res_start || start + size > res_start + res_size)
1936                return -EPERM;
1937        return 0;
1938}
1939
1940int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1941                           struct mlx4_vhcr *vhcr,
1942                           struct mlx4_cmd_mailbox *inbox,
1943                           struct mlx4_cmd_mailbox *outbox,
1944                           struct mlx4_cmd_info *cmd)
1945{
1946        int err;
1947        int index = vhcr->in_modifier;
1948        struct res_mtt *mtt;
1949        struct res_mpt *mpt;
1950        int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1951        int phys;
1952        int id;
1953        u32 pd;
1954        int pd_slave;
1955
1956        id = index & mpt_mask(dev);
1957        err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1958        if (err)
1959                return err;
1960
1961        /* Disable memory windows for VFs. */
1962        if (!mr_is_region(inbox->buf)) {
1963                err = -EPERM;
1964                goto ex_abort;
1965        }
1966
1967        /* Make sure that the PD bits related to the slave id are zeros. */
1968        pd = mr_get_pd(inbox->buf);
1969        pd_slave = (pd >> 17) & 0x7f;
1970        if (pd_slave != 0 && pd_slave != slave) {
1971                err = -EPERM;
1972                goto ex_abort;
1973        }
1974
1975        if (mr_is_fmr(inbox->buf)) {
1976                /* FMR and Bind Enable are forbidden in slave devices. */
1977                if (mr_is_bind_enabled(inbox->buf)) {
1978                        err = -EPERM;
1979                        goto ex_abort;
1980                }
1981                /* FMR and Memory Windows are also forbidden. */
1982                if (!mr_is_region(inbox->buf)) {
1983                        err = -EPERM;
1984                        goto ex_abort;
1985                }
1986        }
1987
1988        phys = mr_phys_mpt(inbox->buf);
1989        if (!phys) {
1990                err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1991                if (err)
1992                        goto ex_abort;
1993
1994                err = check_mtt_range(dev, slave, mtt_base,
1995                                      mr_get_mtt_size(inbox->buf), mtt);
1996                if (err)
1997                        goto ex_put;
1998
1999                mpt->mtt = mtt;
2000        }
2001
2002        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2003        if (err)
2004                goto ex_put;
2005
2006        if (!phys) {
2007                atomic_inc(&mtt->ref_count);
2008                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2009        }
2010
2011        res_end_move(dev, slave, RES_MPT, id);
2012        return 0;
2013
2014ex_put:
2015        if (!phys)
2016                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2017ex_abort:
2018        res_abort_move(dev, slave, RES_MPT, id);
2019
2020        return err;
2021}
2022
2023int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2024                           struct mlx4_vhcr *vhcr,
2025                           struct mlx4_cmd_mailbox *inbox,
2026                           struct mlx4_cmd_mailbox *outbox,
2027                           struct mlx4_cmd_info *cmd)
2028{
2029        int err;
2030        int index = vhcr->in_modifier;
2031        struct res_mpt *mpt;
2032        int id;
2033
2034        id = index & mpt_mask(dev);
2035        err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2036        if (err)
2037                return err;
2038
2039        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2040        if (err)
2041                goto ex_abort;
2042
2043        if (mpt->mtt)
2044                atomic_dec(&mpt->mtt->ref_count);
2045
2046        res_end_move(dev, slave, RES_MPT, id);
2047        return 0;
2048
2049ex_abort:
2050        res_abort_move(dev, slave, RES_MPT, id);
2051
2052        return err;
2053}
2054
2055int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2056                           struct mlx4_vhcr *vhcr,
2057                           struct mlx4_cmd_mailbox *inbox,
2058                           struct mlx4_cmd_mailbox *outbox,
2059                           struct mlx4_cmd_info *cmd)
2060{
2061        int err;
2062        int index = vhcr->in_modifier;
2063        struct res_mpt *mpt;
2064        int id;
2065
2066        id = index & mpt_mask(dev);
2067        err = get_res(dev, slave, id, RES_MPT, &mpt);
2068        if (err)
2069                return err;
2070
2071        if (mpt->com.from_state != RES_MPT_HW) {
2072                err = -EBUSY;
2073                goto out;
2074        }
2075
2076        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2077
2078out:
2079        put_res(dev, slave, id, RES_MPT);
2080        return err;
2081}
2082
2083static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2084{
2085        return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2086}
2087
2088static int qp_get_scqn(struct mlx4_qp_context *qpc)
2089{
2090        return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2091}
2092
2093static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2094{
2095        return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2096}
2097
2098static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2099                                  struct mlx4_qp_context *context)
2100{
2101        u32 qpn = vhcr->in_modifier & 0xffffff;
2102        u32 qkey = 0;
2103
2104        if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2105                return;
2106
2107        /* adjust qkey in qp context */
2108        context->qkey = cpu_to_be32(qkey);
2109}
2110
2111int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2112                             struct mlx4_vhcr *vhcr,
2113                             struct mlx4_cmd_mailbox *inbox,
2114                             struct mlx4_cmd_mailbox *outbox,
2115                             struct mlx4_cmd_info *cmd)
2116{
2117        int err;
2118        int qpn = vhcr->in_modifier & 0x7fffff;
2119        struct res_mtt *mtt;
2120        struct res_qp *qp;
2121        struct mlx4_qp_context *qpc = inbox->buf + 8;
2122        int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2123        int mtt_size = qp_get_mtt_size(qpc);
2124        struct res_cq *rcq;
2125        struct res_cq *scq;
2126        int rcqn = qp_get_rcqn(qpc);
2127        int scqn = qp_get_scqn(qpc);
2128        u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2129        int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2130        struct res_srq *srq;
2131        int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2132
2133        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2134        if (err)
2135                return err;
2136        qp->local_qpn = local_qpn;
2137        qp->sched_queue = 0;
2138        qp->qpc_flags = be32_to_cpu(qpc->flags);
2139
2140        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2141        if (err)
2142                goto ex_abort;
2143
2144        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2145        if (err)
2146                goto ex_put_mtt;
2147
2148        err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2149        if (err)
2150                goto ex_put_mtt;
2151
2152        if (scqn != rcqn) {
2153                err = get_res(dev, slave, scqn, RES_CQ, &scq);
2154                if (err)
2155                        goto ex_put_rcq;
2156        } else
2157                scq = rcq;
2158
2159        if (use_srq) {
2160                err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2161                if (err)
2162                        goto ex_put_scq;
2163        }
2164
2165        adjust_proxy_tun_qkey(dev, vhcr, qpc);
2166        update_pkey_index(dev, slave, inbox);
2167        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2168        if (err)
2169                goto ex_put_srq;
2170        atomic_inc(&mtt->ref_count);
2171        qp->mtt = mtt;
2172        atomic_inc(&rcq->ref_count);
2173        qp->rcq = rcq;
2174        atomic_inc(&scq->ref_count);
2175        qp->scq = scq;
2176
2177        if (scqn != rcqn)
2178                put_res(dev, slave, scqn, RES_CQ);
2179
2180        if (use_srq) {
2181                atomic_inc(&srq->ref_count);
2182                put_res(dev, slave, srqn, RES_SRQ);
2183                qp->srq = srq;
2184        }
2185        put_res(dev, slave, rcqn, RES_CQ);
2186        put_res(dev, slave, mtt_base, RES_MTT);
2187        res_end_move(dev, slave, RES_QP, qpn);
2188
2189        return 0;
2190
2191ex_put_srq:
2192        if (use_srq)
2193                put_res(dev, slave, srqn, RES_SRQ);
2194ex_put_scq:
2195        if (scqn != rcqn)
2196                put_res(dev, slave, scqn, RES_CQ);
2197ex_put_rcq:
2198        put_res(dev, slave, rcqn, RES_CQ);
2199ex_put_mtt:
2200        put_res(dev, slave, mtt_base, RES_MTT);
2201ex_abort:
2202        res_abort_move(dev, slave, RES_QP, qpn);
2203
2204        return err;
2205}
2206
2207static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2208{
2209        return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2210}
2211
2212static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2213{
2214        int log_eq_size = eqc->log_eq_size & 0x1f;
2215        int page_shift = (eqc->log_page_size & 0x3f) + 12;
2216
2217        if (log_eq_size + 5 < page_shift)
2218                return 1;
2219
2220        return 1 << (log_eq_size + 5 - page_shift);
2221}
2222
2223static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2224{
2225        return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2226}
2227
2228static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2229{
2230        int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2231        int page_shift = (cqc->log_page_size & 0x3f) + 12;
2232
2233        if (log_cq_size + 5 < page_shift)
2234                return 1;
2235
2236        return 1 << (log_cq_size + 5 - page_shift);
2237}
2238
2239int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2240                          struct mlx4_vhcr *vhcr,
2241                          struct mlx4_cmd_mailbox *inbox,
2242                          struct mlx4_cmd_mailbox *outbox,
2243                          struct mlx4_cmd_info *cmd)
2244{
2245        int err;
2246        int eqn = vhcr->in_modifier;
2247        int res_id = (slave << 8) | eqn;
2248        struct mlx4_eq_context *eqc = inbox->buf;
2249        int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2250        int mtt_size = eq_get_mtt_size(eqc);
2251        struct res_eq *eq;
2252        struct res_mtt *mtt;
2253
2254        err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2255        if (err)
2256                return err;
2257        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2258        if (err)
2259                goto out_add;
2260
2261        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2262        if (err)
2263                goto out_move;
2264
2265        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2266        if (err)
2267                goto out_put;
2268
2269        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2270        if (err)
2271                goto out_put;
2272
2273        atomic_inc(&mtt->ref_count);
2274        eq->mtt = mtt;
2275        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2276        res_end_move(dev, slave, RES_EQ, res_id);
2277        return 0;
2278
2279out_put:
2280        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2281out_move:
2282        res_abort_move(dev, slave, RES_EQ, res_id);
2283out_add:
2284        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2285        return err;
2286}
2287
2288static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2289                              int len, struct res_mtt **res)
2290{
2291        struct mlx4_priv *priv = mlx4_priv(dev);
2292        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2293        struct res_mtt *mtt;
2294        int err = -EINVAL;
2295
2296        spin_lock_irq(mlx4_tlock(dev));
2297        list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2298                            com.list) {
2299                if (!check_mtt_range(dev, slave, start, len, mtt)) {
2300                        *res = mtt;
2301                        mtt->com.from_state = mtt->com.state;
2302                        mtt->com.state = RES_MTT_BUSY;
2303                        err = 0;
2304                        break;
2305                }
2306        }
2307        spin_unlock_irq(mlx4_tlock(dev));
2308
2309        return err;
2310}
2311
2312static int verify_qp_parameters(struct mlx4_dev *dev,
2313                                struct mlx4_cmd_mailbox *inbox,
2314                                enum qp_transition transition, u8 slave)
2315{
2316        u32                     qp_type;
2317        struct mlx4_qp_context  *qp_ctx;
2318        enum mlx4_qp_optpar     optpar;
2319
2320        qp_ctx  = inbox->buf + 8;
2321        qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2322        optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2323
2324        switch (qp_type) {
2325        case MLX4_QP_ST_RC:
2326        case MLX4_QP_ST_UC:
2327                switch (transition) {
2328                case QP_TRANS_INIT2RTR:
2329                case QP_TRANS_RTR2RTS:
2330                case QP_TRANS_RTS2RTS:
2331                case QP_TRANS_SQD2SQD:
2332                case QP_TRANS_SQD2RTS:
2333                        if (slave != mlx4_master_func_num(dev))
2334                                /* slaves have only gid index 0 */
2335                                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2336                                        if (qp_ctx->pri_path.mgid_index)
2337                                                return -EINVAL;
2338                                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2339                                        if (qp_ctx->alt_path.mgid_index)
2340                                                return -EINVAL;
2341                        break;
2342                default:
2343                        break;
2344                }
2345
2346                break;
2347        default:
2348                break;
2349        }
2350
2351        return 0;
2352}
2353
2354int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2355                           struct mlx4_vhcr *vhcr,
2356                           struct mlx4_cmd_mailbox *inbox,
2357                           struct mlx4_cmd_mailbox *outbox,
2358                           struct mlx4_cmd_info *cmd)
2359{
2360        struct mlx4_mtt mtt;
2361        __be64 *page_list = inbox->buf;
2362        u64 *pg_list = (u64 *)page_list;
2363        int i;
2364        struct res_mtt *rmtt = NULL;
2365        int start = be64_to_cpu(page_list[0]);
2366        int npages = vhcr->in_modifier;
2367        int err;
2368
2369        err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2370        if (err)
2371                return err;
2372
2373        /* Call the SW implementation of write_mtt:
2374         * - Prepare a dummy mtt struct
2375         * - Translate inbox contents to simple addresses in host endianess */
2376        mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2377                            we don't really use it */
2378        mtt.order = 0;
2379        mtt.page_shift = 0;
2380        for (i = 0; i < npages; ++i)
2381                pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2382
2383        err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2384                               ((u64 *)page_list + 2));
2385
2386        if (rmtt)
2387                put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2388
2389        return err;
2390}
2391
2392int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2393                          struct mlx4_vhcr *vhcr,
2394                          struct mlx4_cmd_mailbox *inbox,
2395                          struct mlx4_cmd_mailbox *outbox,
2396                          struct mlx4_cmd_info *cmd)
2397{
2398        int eqn = vhcr->in_modifier;
2399        int res_id = eqn | (slave << 8);
2400        struct res_eq *eq;
2401        int err;
2402
2403        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2404        if (err)
2405                return err;
2406
2407        err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2408        if (err)
2409                goto ex_abort;
2410
2411        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2412        if (err)
2413                goto ex_put;
2414
2415        atomic_dec(&eq->mtt->ref_count);
2416        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2417        res_end_move(dev, slave, RES_EQ, res_id);
2418        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2419
2420        return 0;
2421
2422ex_put:
2423        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2424ex_abort:
2425        res_abort_move(dev, slave, RES_EQ, res_id);
2426
2427        return err;
2428}
2429
2430int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2431{
2432        struct mlx4_priv *priv = mlx4_priv(dev);
2433        struct mlx4_slave_event_eq_info *event_eq;
2434        struct mlx4_cmd_mailbox *mailbox;
2435        u32 in_modifier = 0;
2436        int err;
2437        int res_id;
2438        struct res_eq *req;
2439
2440        if (!priv->mfunc.master.slave_state)
2441                return -EINVAL;
2442
2443        event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2444
2445        /* Create the event only if the slave is registered */
2446        if (event_eq->eqn < 0)
2447                return 0;
2448
2449        mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2450        res_id = (slave << 8) | event_eq->eqn;
2451        err = get_res(dev, slave, res_id, RES_EQ, &req);
2452        if (err)
2453                goto unlock;
2454
2455        if (req->com.from_state != RES_EQ_HW) {
2456                err = -EINVAL;
2457                goto put;
2458        }
2459
2460        mailbox = mlx4_alloc_cmd_mailbox(dev);
2461        if (IS_ERR(mailbox)) {
2462                err = PTR_ERR(mailbox);
2463                goto put;
2464        }
2465
2466        if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2467                ++event_eq->token;
2468                eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2469        }
2470
2471        memcpy(mailbox->buf, (u8 *) eqe, 28);
2472
2473        in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2474
2475        err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2476                       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2477                       MLX4_CMD_NATIVE);
2478
2479        put_res(dev, slave, res_id, RES_EQ);
2480        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2481        mlx4_free_cmd_mailbox(dev, mailbox);
2482        return err;
2483
2484put:
2485        put_res(dev, slave, res_id, RES_EQ);
2486
2487unlock:
2488        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2489        return err;
2490}
2491
2492int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2493                          struct mlx4_vhcr *vhcr,
2494                          struct mlx4_cmd_mailbox *inbox,
2495                          struct mlx4_cmd_mailbox *outbox,
2496                          struct mlx4_cmd_info *cmd)
2497{
2498        int eqn = vhcr->in_modifier;
2499        int res_id = eqn | (slave << 8);
2500        struct res_eq *eq;
2501        int err;
2502
2503        err = get_res(dev, slave, res_id, RES_EQ, &eq);
2504        if (err)
2505                return err;
2506
2507        if (eq->com.from_state != RES_EQ_HW) {
2508                err = -EINVAL;
2509                goto ex_put;
2510        }
2511
2512        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2513
2514ex_put:
2515        put_res(dev, slave, res_id, RES_EQ);
2516        return err;
2517}
2518
2519int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2520                          struct mlx4_vhcr *vhcr,
2521                          struct mlx4_cmd_mailbox *inbox,
2522                          struct mlx4_cmd_mailbox *outbox,
2523                          struct mlx4_cmd_info *cmd)
2524{
2525        int err;
2526        int cqn = vhcr->in_modifier;
2527        struct mlx4_cq_context *cqc = inbox->buf;
2528        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2529        struct res_cq *cq;
2530        struct res_mtt *mtt;
2531
2532        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2533        if (err)
2534                return err;
2535        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2536        if (err)
2537                goto out_move;
2538        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2539        if (err)
2540                goto out_put;
2541        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2542        if (err)
2543                goto out_put;
2544        atomic_inc(&mtt->ref_count);
2545        cq->mtt = mtt;
2546        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2547        res_end_move(dev, slave, RES_CQ, cqn);
2548        return 0;
2549
2550out_put:
2551        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2552out_move:
2553        res_abort_move(dev, slave, RES_CQ, cqn);
2554        return err;
2555}
2556
2557int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2558                          struct mlx4_vhcr *vhcr,
2559                          struct mlx4_cmd_mailbox *inbox,
2560                          struct mlx4_cmd_mailbox *outbox,
2561                          struct mlx4_cmd_info *cmd)
2562{
2563        int err;
2564        int cqn = vhcr->in_modifier;
2565        struct res_cq *cq;
2566
2567        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2568        if (err)
2569                return err;
2570        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2571        if (err)
2572                goto out_move;
2573        atomic_dec(&cq->mtt->ref_count);
2574        res_end_move(dev, slave, RES_CQ, cqn);
2575        return 0;
2576
2577out_move:
2578        res_abort_move(dev, slave, RES_CQ, cqn);
2579        return err;
2580}
2581
2582int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2583                          struct mlx4_vhcr *vhcr,
2584                          struct mlx4_cmd_mailbox *inbox,
2585                          struct mlx4_cmd_mailbox *outbox,
2586                          struct mlx4_cmd_info *cmd)
2587{
2588        int cqn = vhcr->in_modifier;
2589        struct res_cq *cq;
2590        int err;
2591
2592        err = get_res(dev, slave, cqn, RES_CQ, &cq);
2593        if (err)
2594                return err;
2595
2596        if (cq->com.from_state != RES_CQ_HW)
2597                goto ex_put;
2598
2599        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2600ex_put:
2601        put_res(dev, slave, cqn, RES_CQ);
2602
2603        return err;
2604}
2605
2606static int handle_resize(struct mlx4_dev *dev, int slave,
2607                         struct mlx4_vhcr *vhcr,
2608                         struct mlx4_cmd_mailbox *inbox,
2609                         struct mlx4_cmd_mailbox *outbox,
2610                         struct mlx4_cmd_info *cmd,
2611                         struct res_cq *cq)
2612{
2613        int err;
2614        struct res_mtt *orig_mtt;
2615        struct res_mtt *mtt;
2616        struct mlx4_cq_context *cqc = inbox->buf;
2617        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2618
2619        err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2620        if (err)
2621                return err;
2622
2623        if (orig_mtt != cq->mtt) {
2624                err = -EINVAL;
2625                goto ex_put;
2626        }
2627
2628        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2629        if (err)
2630                goto ex_put;
2631
2632        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2633        if (err)
2634                goto ex_put1;
2635        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2636        if (err)
2637                goto ex_put1;
2638        atomic_dec(&orig_mtt->ref_count);
2639        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2640        atomic_inc(&mtt->ref_count);
2641        cq->mtt = mtt;
2642        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2643        return 0;
2644
2645ex_put1:
2646        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2647ex_put:
2648        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2649
2650        return err;
2651
2652}
2653
2654int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2655                           struct mlx4_vhcr *vhcr,
2656                           struct mlx4_cmd_mailbox *inbox,
2657                           struct mlx4_cmd_mailbox *outbox,
2658                           struct mlx4_cmd_info *cmd)
2659{
2660        int cqn = vhcr->in_modifier;
2661        struct res_cq *cq;
2662        int err;
2663
2664        err = get_res(dev, slave, cqn, RES_CQ, &cq);
2665        if (err)
2666                return err;
2667
2668        if (cq->com.from_state != RES_CQ_HW)
2669                goto ex_put;
2670
2671        if (vhcr->op_modifier == 0) {
2672                err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2673                goto ex_put;
2674        }
2675
2676        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2677ex_put:
2678        put_res(dev, slave, cqn, RES_CQ);
2679
2680        return err;
2681}
2682
2683static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2684{
2685        int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2686        int log_rq_stride = srqc->logstride & 7;
2687        int page_shift = (srqc->log_page_size & 0x3f) + 12;
2688
2689        if (log_srq_size + log_rq_stride + 4 < page_shift)
2690                return 1;
2691
2692        return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2693}
2694
2695int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2696                           struct mlx4_vhcr *vhcr,
2697                           struct mlx4_cmd_mailbox *inbox,
2698                           struct mlx4_cmd_mailbox *outbox,
2699                           struct mlx4_cmd_info *cmd)
2700{
2701        int err;
2702        int srqn = vhcr->in_modifier;
2703        struct res_mtt *mtt;
2704        struct res_srq *srq;
2705        struct mlx4_srq_context *srqc = inbox->buf;
2706        int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2707
2708        if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2709                return -EINVAL;
2710
2711        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2712        if (err)
2713                return err;
2714        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2715        if (err)
2716                goto ex_abort;
2717        err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2718                              mtt);
2719        if (err)
2720                goto ex_put_mtt;
2721
2722        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2723        if (err)
2724                goto ex_put_mtt;
2725
2726        atomic_inc(&mtt->ref_count);
2727        srq->mtt = mtt;
2728        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2729        res_end_move(dev, slave, RES_SRQ, srqn);
2730        return 0;
2731
2732ex_put_mtt:
2733        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2734ex_abort:
2735        res_abort_move(dev, slave, RES_SRQ, srqn);
2736
2737        return err;
2738}
2739
2740int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2741                           struct mlx4_vhcr *vhcr,
2742                           struct mlx4_cmd_mailbox *inbox,
2743                           struct mlx4_cmd_mailbox *outbox,
2744                           struct mlx4_cmd_info *cmd)
2745{
2746        int err;
2747        int srqn = vhcr->in_modifier;
2748        struct res_srq *srq;
2749
2750        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2751        if (err)
2752                return err;
2753        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2754        if (err)
2755                goto ex_abort;
2756        atomic_dec(&srq->mtt->ref_count);
2757        if (srq->cq)
2758                atomic_dec(&srq->cq->ref_count);
2759        res_end_move(dev, slave, RES_SRQ, srqn);
2760
2761        return 0;
2762
2763ex_abort:
2764        res_abort_move(dev, slave, RES_SRQ, srqn);
2765
2766        return err;
2767}
2768
2769int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2770                           struct mlx4_vhcr *vhcr,
2771                           struct mlx4_cmd_mailbox *inbox,
2772                           struct mlx4_cmd_mailbox *outbox,
2773                           struct mlx4_cmd_info *cmd)
2774{
2775        int err;
2776        int srqn = vhcr->in_modifier;
2777        struct res_srq *srq;
2778
2779        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2780        if (err)
2781                return err;
2782        if (srq->com.from_state != RES_SRQ_HW) {
2783                err = -EBUSY;
2784                goto out;
2785        }
2786        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2787out:
2788        put_res(dev, slave, srqn, RES_SRQ);
2789        return err;
2790}
2791
2792int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2793                         struct mlx4_vhcr *vhcr,
2794                         struct mlx4_cmd_mailbox *inbox,
2795                         struct mlx4_cmd_mailbox *outbox,
2796                         struct mlx4_cmd_info *cmd)
2797{
2798        int err;
2799        int srqn = vhcr->in_modifier;
2800        struct res_srq *srq;
2801
2802        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2803        if (err)
2804                return err;
2805
2806        if (srq->com.from_state != RES_SRQ_HW) {
2807                err = -EBUSY;
2808                goto out;
2809        }
2810
2811        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2812out:
2813        put_res(dev, slave, srqn, RES_SRQ);
2814        return err;
2815}
2816
2817int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2818                        struct mlx4_vhcr *vhcr,
2819                        struct mlx4_cmd_mailbox *inbox,
2820                        struct mlx4_cmd_mailbox *outbox,
2821                        struct mlx4_cmd_info *cmd)
2822{
2823        int err;
2824        int qpn = vhcr->in_modifier & 0x7fffff;
2825        struct res_qp *qp;
2826
2827        err = get_res(dev, slave, qpn, RES_QP, &qp);
2828        if (err)
2829                return err;
2830        if (qp->com.from_state != RES_QP_HW) {
2831                err = -EBUSY;
2832                goto out;
2833        }
2834
2835        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2836out:
2837        put_res(dev, slave, qpn, RES_QP);
2838        return err;
2839}
2840
2841int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2842                              struct mlx4_vhcr *vhcr,
2843                              struct mlx4_cmd_mailbox *inbox,
2844                              struct mlx4_cmd_mailbox *outbox,
2845                              struct mlx4_cmd_info *cmd)
2846{
2847        struct mlx4_qp_context *context = inbox->buf + 8;
2848        adjust_proxy_tun_qkey(dev, vhcr, context);
2849        update_pkey_index(dev, slave, inbox);
2850        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2851}
2852
2853int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2854                             struct mlx4_vhcr *vhcr,
2855                             struct mlx4_cmd_mailbox *inbox,
2856                             struct mlx4_cmd_mailbox *outbox,
2857                             struct mlx4_cmd_info *cmd)
2858{
2859        int err;
2860        struct mlx4_qp_context *qpc = inbox->buf + 8;
2861        int qpn = vhcr->in_modifier & 0x7fffff;
2862        struct res_qp *qp;
2863        u8 orig_sched_queue;
2864
2865        err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2866        if (err)
2867                return err;
2868
2869        update_pkey_index(dev, slave, inbox);
2870        update_gid(dev, inbox, (u8)slave);
2871        adjust_proxy_tun_qkey(dev, vhcr, qpc);
2872        orig_sched_queue = qpc->pri_path.sched_queue;
2873        err = update_vport_qp_param(dev, inbox, slave, qpn);
2874        if (err)
2875                return err;
2876
2877        err = get_res(dev, slave, qpn, RES_QP, &qp);
2878        if (err)
2879                return err;
2880        if (qp->com.from_state != RES_QP_HW) {
2881                err = -EBUSY;
2882                goto out;
2883        }
2884
2885        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2886out:
2887        /* if no error, save sched queue value passed in by VF. This is
2888         * essentially the QOS value provided by the VF. This will be useful
2889         * if we allow dynamic changes from VST back to VGT
2890         */
2891        if (!err)
2892                qp->sched_queue = orig_sched_queue;
2893
2894        put_res(dev, slave, qpn, RES_QP);
2895        return err;
2896}
2897
2898int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2899                            struct mlx4_vhcr *vhcr,
2900                            struct mlx4_cmd_mailbox *inbox,
2901                            struct mlx4_cmd_mailbox *outbox,
2902                            struct mlx4_cmd_info *cmd)
2903{
2904        int err;
2905        struct mlx4_qp_context *context = inbox->buf + 8;
2906
2907        err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2908        if (err)
2909                return err;
2910
2911        update_pkey_index(dev, slave, inbox);
2912        update_gid(dev, inbox, (u8)slave);
2913        adjust_proxy_tun_qkey(dev, vhcr, context);
2914        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2915}
2916
2917int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2918                            struct mlx4_vhcr *vhcr,
2919                            struct mlx4_cmd_mailbox *inbox,
2920                            struct mlx4_cmd_mailbox *outbox,
2921                            struct mlx4_cmd_info *cmd)
2922{
2923        int err;
2924        struct mlx4_qp_context *context = inbox->buf + 8;
2925
2926        err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2927        if (err)
2928                return err;
2929
2930        update_pkey_index(dev, slave, inbox);
2931        update_gid(dev, inbox, (u8)slave);
2932        adjust_proxy_tun_qkey(dev, vhcr, context);
2933        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2934}
2935
2936
2937int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2938                              struct mlx4_vhcr *vhcr,
2939                              struct mlx4_cmd_mailbox *inbox,
2940                              struct mlx4_cmd_mailbox *outbox,
2941                              struct mlx4_cmd_info *cmd)
2942{
2943        struct mlx4_qp_context *context = inbox->buf + 8;
2944        adjust_proxy_tun_qkey(dev, vhcr, context);
2945        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2946}
2947
2948int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2949                            struct mlx4_vhcr *vhcr,
2950                            struct mlx4_cmd_mailbox *inbox,
2951                            struct mlx4_cmd_mailbox *outbox,
2952                            struct mlx4_cmd_info *cmd)
2953{
2954        int err;
2955        struct mlx4_qp_context *context = inbox->buf + 8;
2956
2957        err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2958        if (err)
2959                return err;
2960
2961        adjust_proxy_tun_qkey(dev, vhcr, context);
2962        update_gid(dev, inbox, (u8)slave);
2963        update_pkey_index(dev, slave, inbox);
2964        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2965}
2966
2967int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2968                            struct mlx4_vhcr *vhcr,
2969                            struct mlx4_cmd_mailbox *inbox,
2970                            struct mlx4_cmd_mailbox *outbox,
2971                            struct mlx4_cmd_info *cmd)
2972{
2973        int err;
2974        struct mlx4_qp_context *context = inbox->buf + 8;
2975
2976        err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2977        if (err)
2978                return err;
2979
2980        adjust_proxy_tun_qkey(dev, vhcr, context);
2981        update_gid(dev, inbox, (u8)slave);
2982        update_pkey_index(dev, slave, inbox);
2983        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2984}
2985
2986int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2987                         struct mlx4_vhcr *vhcr,
2988                         struct mlx4_cmd_mailbox *inbox,
2989                         struct mlx4_cmd_mailbox *outbox,
2990                         struct mlx4_cmd_info *cmd)
2991{
2992        int err;
2993        int qpn = vhcr->in_modifier & 0x7fffff;
2994        struct res_qp *qp;
2995
2996        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2997        if (err)
2998                return err;
2999        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3000        if (err)
3001                goto ex_abort;
3002
3003        atomic_dec(&qp->mtt->ref_count);
3004        atomic_dec(&qp->rcq->ref_count);
3005        atomic_dec(&qp->scq->ref_count);
3006        if (qp->srq)
3007                atomic_dec(&qp->srq->ref_count);
3008        res_end_move(dev, slave, RES_QP, qpn);
3009        return 0;
3010
3011ex_abort:
3012        res_abort_move(dev, slave, RES_QP, qpn);
3013
3014        return err;
3015}
3016
3017static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3018                                struct res_qp *rqp, u8 *gid)
3019{
3020        struct res_gid *res;
3021
3022        list_for_each_entry(res, &rqp->mcg_list, list) {
3023                if (!memcmp(res->gid, gid, 16))
3024                        return res;
3025        }
3026        return NULL;
3027}
3028
3029static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3030                       u8 *gid, enum mlx4_protocol prot,
3031                       enum mlx4_steer_type steer, u64 reg_id)
3032{
3033        struct res_gid *res;
3034        int err;
3035
3036        res = kzalloc(sizeof *res, GFP_KERNEL);
3037        if (!res)
3038                return -ENOMEM;
3039
3040        spin_lock_irq(&rqp->mcg_spl);
3041        if (find_gid(dev, slave, rqp, gid)) {
3042                kfree(res);
3043                err = -EEXIST;
3044        } else {
3045                memcpy(res->gid, gid, 16);
3046                res->prot = prot;
3047                res->steer = steer;
3048                res->reg_id = reg_id;
3049                list_add_tail(&res->list, &rqp->mcg_list);
3050                err = 0;
3051        }
3052        spin_unlock_irq(&rqp->mcg_spl);
3053
3054        return err;
3055}
3056
3057static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3058                       u8 *gid, enum mlx4_protocol prot,
3059                       enum mlx4_steer_type steer, u64 *reg_id)
3060{
3061        struct res_gid *res;
3062        int err;
3063
3064        spin_lock_irq(&rqp->mcg_spl);
3065        res = find_gid(dev, slave, rqp, gid);
3066        if (!res || res->prot != prot || res->steer != steer)
3067                err = -EINVAL;
3068        else {
3069                *reg_id = res->reg_id;
3070                list_del(&res->list);
3071                kfree(res);
3072                err = 0;
3073        }
3074        spin_unlock_irq(&rqp->mcg_spl);
3075
3076        return err;
3077}
3078
3079static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3080                     int block_loopback, enum mlx4_protocol prot,
3081                     enum mlx4_steer_type type, u64 *reg_id)
3082{
3083        switch (dev->caps.steering_mode) {
3084        case MLX4_STEERING_MODE_DEVICE_MANAGED:
3085                return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3086                                                block_loopback, prot,
3087                                                reg_id);
3088        case MLX4_STEERING_MODE_B0:
3089                return mlx4_qp_attach_common(dev, qp, gid,
3090                                            block_loopback, prot, type);
3091        default:
3092                return -EINVAL;
3093        }
3094}
3095
3096static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3097                     enum mlx4_protocol prot, enum mlx4_steer_type type,
3098                     u64 reg_id)
3099{
3100        switch (dev->caps.steering_mode) {
3101        case MLX4_STEERING_MODE_DEVICE_MANAGED:
3102                return mlx4_flow_detach(dev, reg_id);
3103        case MLX4_STEERING_MODE_B0:
3104                return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3105        default:
3106                return -EINVAL;
3107        }
3108}
3109
3110int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3111                               struct mlx4_vhcr *vhcr,
3112                               struct mlx4_cmd_mailbox *inbox,
3113                               struct mlx4_cmd_mailbox *outbox,
3114                               struct mlx4_cmd_info *cmd)
3115{
3116        struct mlx4_qp qp; /* dummy for calling attach/detach */
3117        u8 *gid = inbox->buf;
3118        enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3119        int err;
3120        int qpn;
3121        struct res_qp *rqp;
3122        u64 reg_id = 0;
3123        int attach = vhcr->op_modifier;
3124        int block_loopback = vhcr->in_modifier >> 31;
3125        u8 steer_type_mask = 2;
3126        enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3127
3128        qpn = vhcr->in_modifier & 0xffffff;
3129        err = get_res(dev, slave, qpn, RES_QP, &rqp);
3130        if (err)
3131                return err;
3132
3133        qp.qpn = qpn;
3134        if (attach) {
3135                err = qp_attach(dev, &qp, gid, block_loopback, prot,
3136                                type, &reg_id);
3137                if (err) {
3138                        pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3139                        goto ex_put;
3140                }
3141                err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3142                if (err)
3143                        goto ex_detach;
3144        } else {
3145                err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3146                if (err)
3147                        goto ex_put;
3148
3149                err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3150                if (err)
3151                        pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3152                               qpn, reg_id);
3153        }
3154        put_res(dev, slave, qpn, RES_QP);
3155        return err;
3156
3157ex_detach:
3158        qp_detach(dev, &qp, gid, prot, type, reg_id);
3159ex_put:
3160        put_res(dev, slave, qpn, RES_QP);
3161        return err;
3162}
3163
3164/*
3165 * MAC validation for Flow Steering rules.
3166 * VF can attach rules only with a mac address which is assigned to it.
3167 */
3168static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3169                                   struct list_head *rlist)
3170{
3171        struct mac_res *res, *tmp;
3172        __be64 be_mac;
3173
3174        /* make sure it isn't multicast or broadcast mac*/
3175        if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3176            !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3177                list_for_each_entry_safe(res, tmp, rlist, list) {
3178                        be_mac = cpu_to_be64(res->mac << 16);
3179                        if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3180                                return 0;
3181                }
3182                pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3183                       eth_header->eth.dst_mac, slave);
3184                return -EINVAL;
3185        }
3186        return 0;
3187}
3188
3189/*
3190 * In case of missing eth header, append eth header with a MAC address
3191 * assigned to the VF.
3192 */
3193static int add_eth_header(struct mlx4_dev *dev, int slave,
3194                          struct mlx4_cmd_mailbox *inbox,
3195                          struct list_head *rlist, int header_id)
3196{
3197        struct mac_res *res, *tmp;
3198        u8 port;
3199        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3200        struct mlx4_net_trans_rule_hw_eth *eth_header;
3201        struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3202        struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3203        __be64 be_mac = 0;
3204        __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3205
3206        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3207        port = ctrl->port;
3208        eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3209
3210        /* Clear a space in the inbox for eth header */
3211        switch (header_id) {
3212        case MLX4_NET_TRANS_RULE_ID_IPV4:
3213                ip_header =
3214                        (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3215                memmove(ip_header, eth_header,
3216                        sizeof(*ip_header) + sizeof(*l4_header));
3217                break;
3218        case MLX4_NET_TRANS_RULE_ID_TCP:
3219        case MLX4_NET_TRANS_RULE_ID_UDP:
3220                l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3221                            (eth_header + 1);
3222                memmove(l4_header, eth_header, sizeof(*l4_header));
3223                break;
3224        default:
3225                return -EINVAL;
3226        }
3227        list_for_each_entry_safe(res, tmp, rlist, list) {
3228                if (port == res->port) {
3229                        be_mac = cpu_to_be64(res->mac << 16);
3230                        break;
3231                }
3232        }
3233        if (!be_mac) {
3234                pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3235                       port);
3236                return -EINVAL;
3237        }
3238
3239        memset(eth_header, 0, sizeof(*eth_header));
3240        eth_header->size = sizeof(*eth_header) >> 2;
3241        eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3242        memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3243        memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3244
3245        return 0;
3246
3247}
3248
3249int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3250                                         struct mlx4_vhcr *vhcr,
3251                                         struct mlx4_cmd_mailbox *inbox,
3252                                         struct mlx4_cmd_mailbox *outbox,
3253                                         struct mlx4_cmd_info *cmd)
3254{
3255
3256        struct mlx4_priv *priv = mlx4_priv(dev);
3257        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3258        struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3259        int err;
3260        int qpn;
3261        struct res_qp *rqp;
3262        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3263        struct _rule_hw  *rule_header;
3264        int header_id;
3265
3266        if (dev->caps.steering_mode !=
3267            MLX4_STEERING_MODE_DEVICE_MANAGED)
3268                return -EOPNOTSUPP;
3269
3270        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3271        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3272        err = get_res(dev, slave, qpn, RES_QP, &rqp);
3273        if (err) {
3274                pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3275                return err;
3276        }
3277        rule_header = (struct _rule_hw *)(ctrl + 1);
3278        header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3279
3280        switch (header_id) {
3281        case MLX4_NET_TRANS_RULE_ID_ETH:
3282                if (validate_eth_header_mac(slave, rule_header, rlist)) {
3283                        err = -EINVAL;
3284                        goto err_put;
3285                }
3286                break;
3287        case MLX4_NET_TRANS_RULE_ID_IB:
3288                break;
3289        case MLX4_NET_TRANS_RULE_ID_IPV4:
3290        case MLX4_NET_TRANS_RULE_ID_TCP:
3291        case MLX4_NET_TRANS_RULE_ID_UDP:
3292                pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3293                if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3294                        err = -EINVAL;
3295                        goto err_put;
3296                }
3297                vhcr->in_modifier +=
3298                        sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3299                break;
3300        default:
3301                pr_err("Corrupted mailbox.\n");
3302                err = -EINVAL;
3303                goto err_put;
3304        }
3305
3306        err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3307                           vhcr->in_modifier, 0,
3308                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3309                           MLX4_CMD_NATIVE);
3310        if (err)
3311                goto err_put;
3312
3313        err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3314        if (err) {
3315                mlx4_err(dev, "Fail to add flow steering resources.\n ");
3316                /* detach rule*/
3317                mlx4_cmd(dev, vhcr->out_param, 0, 0,
3318                         MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3319                         MLX4_CMD_NATIVE);
3320                goto err_put;
3321        }
3322        atomic_inc(&rqp->ref_count);
3323err_put:
3324        put_res(dev, slave, qpn, RES_QP);
3325        return err;
3326}
3327
3328int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3329                                         struct mlx4_vhcr *vhcr,
3330                                         struct mlx4_cmd_mailbox *inbox,
3331                                         struct mlx4_cmd_mailbox *outbox,
3332                                         struct mlx4_cmd_info *cmd)
3333{
3334        int err;
3335        struct res_qp *rqp;
3336        struct res_fs_rule *rrule;
3337
3338        if (dev->caps.steering_mode !=
3339            MLX4_STEERING_MODE_DEVICE_MANAGED)
3340                return -EOPNOTSUPP;
3341
3342        err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3343        if (err)
3344                return err;
3345        /* Release the rule form busy state before removal */
3346        put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3347        err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3348        if (err)
3349                return err;
3350
3351        err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3352        if (err) {
3353                mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3354                goto out;
3355        }
3356
3357        err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3358                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3359                       MLX4_CMD_NATIVE);
3360        if (!err)
3361                atomic_dec(&rqp->ref_count);
3362out:
3363        put_res(dev, slave, rrule->qpn, RES_QP);
3364        return err;
3365}
3366
3367enum {
3368        BUSY_MAX_RETRIES = 10
3369};
3370
3371int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3372                               struct mlx4_vhcr *vhcr,
3373                               struct mlx4_cmd_mailbox *inbox,
3374                               struct mlx4_cmd_mailbox *outbox,
3375                               struct mlx4_cmd_info *cmd)
3376{
3377        int err;
3378        int index = vhcr->in_modifier & 0xffff;
3379
3380        err = get_res(dev, slave, index, RES_COUNTER, NULL);
3381        if (err)
3382                return err;
3383
3384        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3385        put_res(dev, slave, index, RES_COUNTER);
3386        return err;
3387}
3388
3389static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3390{
3391        struct res_gid *rgid;
3392        struct res_gid *tmp;
3393        struct mlx4_qp qp; /* dummy for calling attach/detach */
3394
3395        list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3396                switch (dev->caps.steering_mode) {
3397                case MLX4_STEERING_MODE_DEVICE_MANAGED:
3398                        mlx4_flow_detach(dev, rgid->reg_id);
3399                        break;
3400                case MLX4_STEERING_MODE_B0:
3401                        qp.qpn = rqp->local_qpn;
3402                        (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3403                                                     rgid->prot, rgid->steer);
3404                        break;
3405                }
3406                list_del(&rgid->list);
3407                kfree(rgid);
3408        }
3409}
3410
3411static int _move_all_busy(struct mlx4_dev *dev, int slave,
3412                          enum mlx4_resource type, int print)
3413{
3414        struct mlx4_priv *priv = mlx4_priv(dev);
3415        struct mlx4_resource_tracker *tracker =
3416                &priv->mfunc.master.res_tracker;
3417        struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3418        struct res_common *r;
3419        struct res_common *tmp;
3420        int busy;
3421
3422        busy = 0;
3423        spin_lock_irq(mlx4_tlock(dev));
3424        list_for_each_entry_safe(r, tmp, rlist, list) {
3425                if (r->owner == slave) {
3426                        if (!r->removing) {
3427                                if (r->state == RES_ANY_BUSY) {
3428                                        if (print)
3429                                                mlx4_dbg(dev,
3430                                                         "%s id 0x%llx is busy\n",
3431                                                          ResourceType(type),
3432                                                          r->res_id);
3433                                        ++busy;
3434                                } else {
3435                                        r->from_state = r->state;
3436                                        r->state = RES_ANY_BUSY;
3437                                        r->removing = 1;
3438                                }
3439                        }
3440                }
3441        }
3442        spin_unlock_irq(mlx4_tlock(dev));
3443
3444        return busy;
3445}
3446
3447static int move_all_busy(struct mlx4_dev *dev, int slave,
3448                         enum mlx4_resource type)
3449{
3450        unsigned long begin;
3451        int busy;
3452
3453        begin = jiffies;
3454        do {
3455                busy = _move_all_busy(dev, slave, type, 0);
3456                if (time_after(jiffies, begin + 5 * HZ))
3457                        break;
3458                if (busy)
3459                        cond_resched();
3460        } while (busy);
3461
3462        if (busy)
3463                busy = _move_all_busy(dev, slave, type, 1);
3464
3465        return busy;
3466}
3467static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3468{
3469        struct mlx4_priv *priv = mlx4_priv(dev);
3470        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3471        struct list_head *qp_list =
3472                &tracker->slave_list[slave].res_list[RES_QP];
3473        struct res_qp *qp;
3474        struct res_qp *tmp;
3475        int state;
3476        u64 in_param;
3477        int qpn;
3478        int err;
3479
3480        err = move_all_busy(dev, slave, RES_QP);
3481        if (err)
3482                mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3483                          "for slave %d\n", slave);
3484
3485        spin_lock_irq(mlx4_tlock(dev));
3486        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3487                spin_unlock_irq(mlx4_tlock(dev));
3488                if (qp->com.owner == slave) {
3489                        qpn = qp->com.res_id;
3490                        detach_qp(dev, slave, qp);
3491                        state = qp->com.from_state;
3492                        while (state != 0) {
3493                                switch (state) {
3494                                case RES_QP_RESERVED:
3495                                        spin_lock_irq(mlx4_tlock(dev));
3496                                        rb_erase(&qp->com.node,
3497                                                 &tracker->res_tree[RES_QP]);
3498                                        list_del(&qp->com.list);
3499                                        spin_unlock_irq(mlx4_tlock(dev));
3500                                        kfree(qp);
3501                                        state = 0;
3502                                        break;
3503                                case RES_QP_MAPPED:
3504                                        if (!valid_reserved(dev, slave, qpn))
3505                                                __mlx4_qp_free_icm(dev, qpn);
3506                                        state = RES_QP_RESERVED;
3507                                        break;
3508                                case RES_QP_HW:
3509                                        in_param = slave;
3510                                        err = mlx4_cmd(dev, in_param,
3511                                                       qp->local_qpn, 2,
3512                                                       MLX4_CMD_2RST_QP,
3513                                                       MLX4_CMD_TIME_CLASS_A,
3514                                                       MLX4_CMD_NATIVE);
3515                                        if (err)
3516                                                mlx4_dbg(dev, "rem_slave_qps: failed"
3517                                                         " to move slave %d qpn %d to"
3518                                                         " reset\n", slave,
3519                                                         qp->local_qpn);
3520                                        atomic_dec(&qp->rcq->ref_count);
3521                                        atomic_dec(&qp->scq->ref_count);
3522                                        atomic_dec(&qp->mtt->ref_count);
3523                                        if (qp->srq)
3524                                                atomic_dec(&qp->srq->ref_count);
3525                                        state = RES_QP_MAPPED;
3526                                        break;
3527                                default:
3528                                        state = 0;
3529                                }
3530                        }
3531                }
3532                spin_lock_irq(mlx4_tlock(dev));
3533        }
3534        spin_unlock_irq(mlx4_tlock(dev));
3535}
3536
3537static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3538{
3539        struct mlx4_priv *priv = mlx4_priv(dev);
3540        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3541        struct list_head *srq_list =
3542                &tracker->slave_list[slave].res_list[RES_SRQ];
3543        struct res_srq *srq;
3544        struct res_srq *tmp;
3545        int state;
3546        u64 in_param;
3547        LIST_HEAD(tlist);
3548        int srqn;
3549        int err;
3550
3551        err = move_all_busy(dev, slave, RES_SRQ);
3552        if (err)
3553                mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3554                          "busy for slave %d\n", slave);
3555
3556        spin_lock_irq(mlx4_tlock(dev));
3557        list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3558                spin_unlock_irq(mlx4_tlock(dev));
3559                if (srq->com.owner == slave) {
3560                        srqn = srq->com.res_id;
3561                        state = srq->com.from_state;
3562                        while (state != 0) {
3563                                switch (state) {
3564                                case RES_SRQ_ALLOCATED:
3565                                        __mlx4_srq_free_icm(dev, srqn);
3566                                        spin_lock_irq(mlx4_tlock(dev));
3567                                        rb_erase(&srq->com.node,
3568                                                 &tracker->res_tree[RES_SRQ]);
3569                                        list_del(&srq->com.list);
3570                                        spin_unlock_irq(mlx4_tlock(dev));
3571                                        kfree(srq);
3572                                        state = 0;
3573                                        break;
3574
3575                                case RES_SRQ_HW:
3576                                        in_param = slave;
3577                                        err = mlx4_cmd(dev, in_param, srqn, 1,
3578                                                       MLX4_CMD_HW2SW_SRQ,
3579                                                       MLX4_CMD_TIME_CLASS_A,
3580                                                       MLX4_CMD_NATIVE);
3581                                        if (err)
3582                                                mlx4_dbg(dev, "rem_slave_srqs: failed"
3583                                                         " to move slave %d srq %d to"
3584                                                         " SW ownership\n",
3585                                                         slave, srqn);
3586
3587                                        atomic_dec(&srq->mtt->ref_count);
3588                                        if (srq->cq)
3589                                                atomic_dec(&srq->cq->ref_count);
3590                                        state = RES_SRQ_ALLOCATED;
3591                                        break;
3592
3593                                default:
3594                                        state = 0;
3595                                }
3596                        }
3597                }
3598                spin_lock_irq(mlx4_tlock(dev));
3599        }
3600        spin_unlock_irq(mlx4_tlock(dev));
3601}
3602
3603static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3604{
3605        struct mlx4_priv *priv = mlx4_priv(dev);
3606        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3607        struct list_head *cq_list =
3608                &tracker->slave_list[slave].res_list[RES_CQ];
3609        struct res_cq *cq;
3610        struct res_cq *tmp;
3611        int state;
3612        u64 in_param;
3613        LIST_HEAD(tlist);
3614        int cqn;
3615        int err;
3616
3617        err = move_all_busy(dev, slave, RES_CQ);
3618        if (err)
3619                mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3620                          "busy for slave %d\n", slave);
3621
3622        spin_lock_irq(mlx4_tlock(dev));
3623        list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3624                spin_unlock_irq(mlx4_tlock(dev));
3625                if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3626                        cqn = cq->com.res_id;
3627                        state = cq->com.from_state;
3628                        while (state != 0) {
3629                                switch (state) {
3630                                case RES_CQ_ALLOCATED:
3631                                        __mlx4_cq_free_icm(dev, cqn);
3632                                        spin_lock_irq(mlx4_tlock(dev));
3633                                        rb_erase(&cq->com.node,
3634                                                 &tracker->res_tree[RES_CQ]);
3635                                        list_del(&cq->com.list);
3636                                        spin_unlock_irq(mlx4_tlock(dev));
3637                                        kfree(cq);
3638                                        state = 0;
3639                                        break;
3640
3641                                case RES_CQ_HW:
3642                                        in_param = slave;
3643                                        err = mlx4_cmd(dev, in_param, cqn, 1,
3644                                                       MLX4_CMD_HW2SW_CQ,
3645                                                       MLX4_CMD_TIME_CLASS_A,
3646                                                       MLX4_CMD_NATIVE);
3647                                        if (err)
3648                                                mlx4_dbg(dev, "rem_slave_cqs: failed"
3649                                                         " to move slave %d cq %d to"
3650                                                         " SW ownership\n",
3651                                                         slave, cqn);
3652                                        atomic_dec(&cq->mtt->ref_count);
3653                                        state = RES_CQ_ALLOCATED;
3654                                        break;
3655
3656                                default:
3657                                        state = 0;
3658                                }
3659                        }
3660                }
3661                spin_lock_irq(mlx4_tlock(dev));
3662        }
3663        spin_unlock_irq(mlx4_tlock(dev));
3664}
3665
3666static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3667{
3668        struct mlx4_priv *priv = mlx4_priv(dev);
3669        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3670        struct list_head *mpt_list =
3671                &tracker->slave_list[slave].res_list[RES_MPT];
3672        struct res_mpt *mpt;
3673        struct res_mpt *tmp;
3674        int state;
3675        u64 in_param;
3676        LIST_HEAD(tlist);
3677        int mptn;
3678        int err;
3679
3680        err = move_all_busy(dev, slave, RES_MPT);
3681        if (err)
3682                mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3683                          "busy for slave %d\n", slave);
3684
3685        spin_lock_irq(mlx4_tlock(dev));
3686        list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3687                spin_unlock_irq(mlx4_tlock(dev));
3688                if (mpt->com.owner == slave) {
3689                        mptn = mpt->com.res_id;
3690                        state = mpt->com.from_state;
3691                        while (state != 0) {
3692                                switch (state) {
3693                                case RES_MPT_RESERVED:
3694                                        __mlx4_mpt_release(dev, mpt->key);
3695                                        spin_lock_irq(mlx4_tlock(dev));
3696                                        rb_erase(&mpt->com.node,
3697                                                 &tracker->res_tree[RES_MPT]);
3698                                        list_del(&mpt->com.list);
3699                                        spin_unlock_irq(mlx4_tlock(dev));
3700                                        kfree(mpt);
3701                                        state = 0;
3702                                        break;
3703
3704                                case RES_MPT_MAPPED:
3705                                        __mlx4_mpt_free_icm(dev, mpt->key);
3706                                        state = RES_MPT_RESERVED;
3707                                        break;
3708
3709                                case RES_MPT_HW:
3710                                        in_param = slave;
3711                                        err = mlx4_cmd(dev, in_param, mptn, 0,
3712                                                     MLX4_CMD_HW2SW_MPT,
3713                                                     MLX4_CMD_TIME_CLASS_A,
3714                                                     MLX4_CMD_NATIVE);
3715                                        if (err)
3716                                                mlx4_dbg(dev, "rem_slave_mrs: failed"
3717                                                         " to move slave %d mpt %d to"
3718                                                         " SW ownership\n",
3719                                                         slave, mptn);
3720                                        if (mpt->mtt)
3721                                                atomic_dec(&mpt->mtt->ref_count);
3722                                        state = RES_MPT_MAPPED;
3723                                        break;
3724                                default:
3725                                        state = 0;
3726                                }
3727                        }
3728                }
3729                spin_lock_irq(mlx4_tlock(dev));
3730        }
3731        spin_unlock_irq(mlx4_tlock(dev));
3732}
3733
3734static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3735{
3736        struct mlx4_priv *priv = mlx4_priv(dev);
3737        struct mlx4_resource_tracker *tracker =
3738                &priv->mfunc.master.res_tracker;
3739        struct list_head *mtt_list =
3740                &tracker->slave_list[slave].res_list[RES_MTT];
3741        struct res_mtt *mtt;
3742        struct res_mtt *tmp;
3743        int state;
3744        LIST_HEAD(tlist);
3745        int base;
3746        int err;
3747
3748        err = move_all_busy(dev, slave, RES_MTT);
3749        if (err)
3750                mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3751                          "busy for slave %d\n", slave);
3752
3753        spin_lock_irq(mlx4_tlock(dev));
3754        list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3755                spin_unlock_irq(mlx4_tlock(dev));
3756                if (mtt->com.owner == slave) {
3757                        base = mtt->com.res_id;
3758                        state = mtt->com.from_state;
3759                        while (state != 0) {
3760                                switch (state) {
3761                                case RES_MTT_ALLOCATED:
3762                                        __mlx4_free_mtt_range(dev, base,
3763                                                              mtt->order);
3764                                        spin_lock_irq(mlx4_tlock(dev));
3765                                        rb_erase(&mtt->com.node,
3766                                                 &tracker->res_tree[RES_MTT]);
3767                                        list_del(&mtt->com.list);
3768                                        spin_unlock_irq(mlx4_tlock(dev));
3769                                        kfree(mtt);
3770                                        state = 0;
3771                                        break;
3772
3773                                default:
3774                                        state = 0;
3775                                }
3776                        }
3777                }
3778                spin_lock_irq(mlx4_tlock(dev));
3779        }
3780        spin_unlock_irq(mlx4_tlock(dev));
3781}
3782
3783static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3784{
3785        struct mlx4_priv *priv = mlx4_priv(dev);
3786        struct mlx4_resource_tracker *tracker =
3787                &priv->mfunc.master.res_tracker;
3788        struct list_head *fs_rule_list =
3789                &tracker->slave_list[slave].res_list[RES_FS_RULE];
3790        struct res_fs_rule *fs_rule;
3791        struct res_fs_rule *tmp;
3792        int state;
3793        u64 base;
3794        int err;
3795
3796        err = move_all_busy(dev, slave, RES_FS_RULE);
3797        if (err)
3798                mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3799                          slave);
3800
3801        spin_lock_irq(mlx4_tlock(dev));
3802        list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3803                spin_unlock_irq(mlx4_tlock(dev));
3804                if (fs_rule->com.owner == slave) {
3805                        base = fs_rule->com.res_id;
3806                        state = fs_rule->com.from_state;
3807                        while (state != 0) {
3808                                switch (state) {
3809                                case RES_FS_RULE_ALLOCATED:
3810                                        /* detach rule */
3811                                        err = mlx4_cmd(dev, base, 0, 0,
3812                                                       MLX4_QP_FLOW_STEERING_DETACH,
3813                                                       MLX4_CMD_TIME_CLASS_A,
3814                                                       MLX4_CMD_NATIVE);
3815
3816                                        spin_lock_irq(mlx4_tlock(dev));
3817                                        rb_erase(&fs_rule->com.node,
3818                                                 &tracker->res_tree[RES_FS_RULE]);
3819                                        list_del(&fs_rule->com.list);
3820                                        spin_unlock_irq(mlx4_tlock(dev));
3821                                        kfree(fs_rule);
3822                                        state = 0;
3823                                        break;
3824
3825                                default:
3826                                        state = 0;
3827                                }
3828                        }
3829                }
3830                spin_lock_irq(mlx4_tlock(dev));
3831        }
3832        spin_unlock_irq(mlx4_tlock(dev));
3833}
3834
3835static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3836{
3837        struct mlx4_priv *priv = mlx4_priv(dev);
3838        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3839        struct list_head *eq_list =
3840                &tracker->slave_list[slave].res_list[RES_EQ];
3841        struct res_eq *eq;
3842        struct res_eq *tmp;
3843        int err;
3844        int state;
3845        LIST_HEAD(tlist);
3846        int eqn;
3847        struct mlx4_cmd_mailbox *mailbox;
3848
3849        err = move_all_busy(dev, slave, RES_EQ);
3850        if (err)
3851                mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3852                          "busy for slave %d\n", slave);
3853
3854        spin_lock_irq(mlx4_tlock(dev));
3855        list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3856                spin_unlock_irq(mlx4_tlock(dev));
3857                if (eq->com.owner == slave) {
3858                        eqn = eq->com.res_id;
3859                        state = eq->com.from_state;
3860                        while (state != 0) {
3861                                switch (state) {
3862                                case RES_EQ_RESERVED:
3863                                        spin_lock_irq(mlx4_tlock(dev));
3864                                        rb_erase(&eq->com.node,
3865                                                 &tracker->res_tree[RES_EQ]);
3866                                        list_del(&eq->com.list);
3867                                        spin_unlock_irq(mlx4_tlock(dev));
3868                                        kfree(eq);
3869                                        state = 0;
3870                                        break;
3871
3872                                case RES_EQ_HW:
3873                                        mailbox = mlx4_alloc_cmd_mailbox(dev);
3874                                        if (IS_ERR(mailbox)) {
3875                                                cond_resched();
3876                                                continue;
3877                                        }
3878                                        err = mlx4_cmd_box(dev, slave, 0,
3879                                                           eqn & 0xff, 0,
3880                                                           MLX4_CMD_HW2SW_EQ,
3881                                                           MLX4_CMD_TIME_CLASS_A,
3882                                                           MLX4_CMD_NATIVE);
3883                                        if (err)
3884                                                mlx4_dbg(dev, "rem_slave_eqs: failed"
3885                                                         " to move slave %d eqs %d to"
3886                                                         " SW ownership\n", slave, eqn);
3887                                        mlx4_free_cmd_mailbox(dev, mailbox);
3888                                        atomic_dec(&eq->mtt->ref_count);
3889                                        state = RES_EQ_RESERVED;
3890                                        break;
3891
3892                                default:
3893                                        state = 0;
3894                                }
3895                        }
3896                }
3897                spin_lock_irq(mlx4_tlock(dev));
3898        }
3899        spin_unlock_irq(mlx4_tlock(dev));
3900}
3901
3902static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3903{
3904        struct mlx4_priv *priv = mlx4_priv(dev);
3905        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3906        struct list_head *counter_list =
3907                &tracker->slave_list[slave].res_list[RES_COUNTER];
3908        struct res_counter *counter;
3909        struct res_counter *tmp;
3910        int err;
3911        int index;
3912
3913        err = move_all_busy(dev, slave, RES_COUNTER);
3914        if (err)
3915                mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3916                          "busy for slave %d\n", slave);
3917
3918        spin_lock_irq(mlx4_tlock(dev));
3919        list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3920                if (counter->com.owner == slave) {
3921                        index = counter->com.res_id;
3922                        rb_erase(&counter->com.node,
3923                                 &tracker->res_tree[RES_COUNTER]);
3924                        list_del(&counter->com.list);
3925                        kfree(counter);
3926                        __mlx4_counter_free(dev, index);
3927                }
3928        }
3929        spin_unlock_irq(mlx4_tlock(dev));
3930}
3931
3932static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3933{
3934        struct mlx4_priv *priv = mlx4_priv(dev);
3935        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3936        struct list_head *xrcdn_list =
3937                &tracker->slave_list[slave].res_list[RES_XRCD];
3938        struct res_xrcdn *xrcd;
3939        struct res_xrcdn *tmp;
3940        int err;
3941        int xrcdn;
3942
3943        err = move_all_busy(dev, slave, RES_XRCD);
3944        if (err)
3945                mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3946                          "busy for slave %d\n", slave);
3947
3948        spin_lock_irq(mlx4_tlock(dev));
3949        list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3950                if (xrcd->com.owner == slave) {
3951                        xrcdn = xrcd->com.res_id;
3952                        rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3953                        list_del(&xrcd->com.list);
3954                        kfree(xrcd);
3955                        __mlx4_xrcd_free(dev, xrcdn);
3956                }
3957        }
3958        spin_unlock_irq(mlx4_tlock(dev));
3959}
3960
3961void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3962{
3963        struct mlx4_priv *priv = mlx4_priv(dev);
3964
3965        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3966        /*VLAN*/
3967        rem_slave_macs(dev, slave);
3968        rem_slave_fs_rule(dev, slave);
3969        rem_slave_qps(dev, slave);
3970        rem_slave_srqs(dev, slave);
3971        rem_slave_cqs(dev, slave);
3972        rem_slave_mrs(dev, slave);
3973        rem_slave_eqs(dev, slave);
3974        rem_slave_mtts(dev, slave);
3975        rem_slave_counters(dev, slave);
3976        rem_slave_xrcdns(dev, slave);
3977        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3978}
3979
3980void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
3981{
3982        struct mlx4_vf_immed_vlan_work *work =
3983                container_of(_work, struct mlx4_vf_immed_vlan_work, work);
3984        struct mlx4_cmd_mailbox *mailbox;
3985        struct mlx4_update_qp_context *upd_context;
3986        struct mlx4_dev *dev = &work->priv->dev;
3987        struct mlx4_resource_tracker *tracker =
3988                &work->priv->mfunc.master.res_tracker;
3989        struct list_head *qp_list =
3990                &tracker->slave_list[work->slave].res_list[RES_QP];
3991        struct res_qp *qp;
3992        struct res_qp *tmp;
3993        u64 qp_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
3994                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
3995                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
3996                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
3997                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
3998                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED) |
3999                       (1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4000                       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4001
4002        int err;
4003        int port, errors = 0;
4004        u8 vlan_control;
4005
4006        if (mlx4_is_slave(dev)) {
4007                mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4008                          work->slave);
4009                goto out;
4010        }
4011
4012        mailbox = mlx4_alloc_cmd_mailbox(dev);
4013        if (IS_ERR(mailbox))
4014                goto out;
4015        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4016                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4017                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4018                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4019                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4020                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4021                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4022        else if (!work->vlan_id)
4023                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4024                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4025        else
4026                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4027                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4028                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4029
4030        upd_context = mailbox->buf;
4031        upd_context->primary_addr_path_mask = cpu_to_be64(qp_mask);
4032        upd_context->qp_context.pri_path.vlan_control = vlan_control;
4033        upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4034
4035        spin_lock_irq(mlx4_tlock(dev));
4036        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4037                spin_unlock_irq(mlx4_tlock(dev));
4038                if (qp->com.owner == work->slave) {
4039                        if (qp->com.from_state != RES_QP_HW ||
4040                            !qp->sched_queue ||  /* no INIT2RTR trans yet */
4041                            mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4042                            qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4043                                spin_lock_irq(mlx4_tlock(dev));
4044                                continue;
4045                        }
4046                        port = (qp->sched_queue >> 6 & 1) + 1;
4047                        if (port != work->port) {
4048                                spin_lock_irq(mlx4_tlock(dev));
4049                                continue;
4050                        }
4051                        upd_context->qp_context.pri_path.sched_queue =
4052                                qp->sched_queue & 0xC7;
4053                        upd_context->qp_context.pri_path.sched_queue |=
4054                                ((work->qos & 0x7) << 3);
4055
4056                        err = mlx4_cmd(dev, mailbox->dma,
4057                                       qp->local_qpn & 0xffffff,
4058                                       0, MLX4_CMD_UPDATE_QP,
4059                                       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4060                        if (err) {
4061                                mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4062                                          "port %d, qpn %d (%d)\n",
4063                                          work->slave, port, qp->local_qpn,
4064                                          err);
4065                                errors++;
4066                        }
4067                }
4068                spin_lock_irq(mlx4_tlock(dev));
4069        }
4070        spin_unlock_irq(mlx4_tlock(dev));
4071        mlx4_free_cmd_mailbox(dev, mailbox);
4072
4073        if (errors)
4074                mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4075                         errors, work->slave, work->port);
4076
4077        /* unregister previous vlan_id if needed and we had no errors
4078         * while updating the QPs
4079         */
4080        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4081            NO_INDX != work->orig_vlan_ix)
4082                __mlx4_unregister_vlan(&work->priv->dev, work->port,
4083                                       work->orig_vlan_ix);
4084out:
4085        kfree(work);
4086        return;
4087}
4088