linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
   4 * All rights reserved.
   5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/sched.h>
  37#include <linux/pci.h>
  38#include <linux/errno.h>
  39#include <linux/kernel.h>
  40#include <linux/io.h>
  41#include <linux/slab.h>
  42#include <linux/mlx4/cmd.h>
  43#include <linux/mlx4/qp.h>
  44#include <linux/if_ether.h>
  45#include <linux/etherdevice.h>
  46
  47#include "mlx4.h"
  48#include "fw.h"
  49
  50#define MLX4_MAC_VALID          (1ull << 63)
  51
  52struct mac_res {
  53        struct list_head list;
  54        u64 mac;
  55        u8 port;
  56};
  57
  58struct vlan_res {
  59        struct list_head list;
  60        u16 vlan;
  61        int ref_count;
  62        int vlan_index;
  63        u8 port;
  64};
  65
  66struct res_common {
  67        struct list_head        list;
  68        struct rb_node          node;
  69        u64                     res_id;
  70        int                     owner;
  71        int                     state;
  72        int                     from_state;
  73        int                     to_state;
  74        int                     removing;
  75};
  76
  77enum {
  78        RES_ANY_BUSY = 1
  79};
  80
  81struct res_gid {
  82        struct list_head        list;
  83        u8                      gid[16];
  84        enum mlx4_protocol      prot;
  85        enum mlx4_steer_type    steer;
  86        u64                     reg_id;
  87};
  88
  89enum res_qp_states {
  90        RES_QP_BUSY = RES_ANY_BUSY,
  91
  92        /* QP number was allocated */
  93        RES_QP_RESERVED,
  94
  95        /* ICM memory for QP context was mapped */
  96        RES_QP_MAPPED,
  97
  98        /* QP is in hw ownership */
  99        RES_QP_HW
 100};
 101
 102struct res_qp {
 103        struct res_common       com;
 104        struct res_mtt         *mtt;
 105        struct res_cq          *rcq;
 106        struct res_cq          *scq;
 107        struct res_srq         *srq;
 108        struct list_head        mcg_list;
 109        spinlock_t              mcg_spl;
 110        int                     local_qpn;
 111        atomic_t                ref_count;
 112        u32                     qpc_flags;
 113        /* saved qp params before VST enforcement in order to restore on VGT */
 114        u8                      sched_queue;
 115        __be32                  param3;
 116        u8                      vlan_control;
 117        u8                      fvl_rx;
 118        u8                      pri_path_fl;
 119        u8                      vlan_index;
 120        u8                      feup;
 121};
 122
 123enum res_mtt_states {
 124        RES_MTT_BUSY = RES_ANY_BUSY,
 125        RES_MTT_ALLOCATED,
 126};
 127
 128static inline const char *mtt_states_str(enum res_mtt_states state)
 129{
 130        switch (state) {
 131        case RES_MTT_BUSY: return "RES_MTT_BUSY";
 132        case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
 133        default: return "Unknown";
 134        }
 135}
 136
 137struct res_mtt {
 138        struct res_common       com;
 139        int                     order;
 140        atomic_t                ref_count;
 141};
 142
 143enum res_mpt_states {
 144        RES_MPT_BUSY = RES_ANY_BUSY,
 145        RES_MPT_RESERVED,
 146        RES_MPT_MAPPED,
 147        RES_MPT_HW,
 148};
 149
 150struct res_mpt {
 151        struct res_common       com;
 152        struct res_mtt         *mtt;
 153        int                     key;
 154};
 155
 156enum res_eq_states {
 157        RES_EQ_BUSY = RES_ANY_BUSY,
 158        RES_EQ_RESERVED,
 159        RES_EQ_HW,
 160};
 161
 162struct res_eq {
 163        struct res_common       com;
 164        struct res_mtt         *mtt;
 165};
 166
 167enum res_cq_states {
 168        RES_CQ_BUSY = RES_ANY_BUSY,
 169        RES_CQ_ALLOCATED,
 170        RES_CQ_HW,
 171};
 172
 173struct res_cq {
 174        struct res_common       com;
 175        struct res_mtt         *mtt;
 176        atomic_t                ref_count;
 177};
 178
 179enum res_srq_states {
 180        RES_SRQ_BUSY = RES_ANY_BUSY,
 181        RES_SRQ_ALLOCATED,
 182        RES_SRQ_HW,
 183};
 184
 185struct res_srq {
 186        struct res_common       com;
 187        struct res_mtt         *mtt;
 188        struct res_cq          *cq;
 189        atomic_t                ref_count;
 190};
 191
 192enum res_counter_states {
 193        RES_COUNTER_BUSY = RES_ANY_BUSY,
 194        RES_COUNTER_ALLOCATED,
 195};
 196
 197struct res_counter {
 198        struct res_common       com;
 199        int                     port;
 200};
 201
 202enum res_xrcdn_states {
 203        RES_XRCD_BUSY = RES_ANY_BUSY,
 204        RES_XRCD_ALLOCATED,
 205};
 206
 207struct res_xrcdn {
 208        struct res_common       com;
 209        int                     port;
 210};
 211
 212enum res_fs_rule_states {
 213        RES_FS_RULE_BUSY = RES_ANY_BUSY,
 214        RES_FS_RULE_ALLOCATED,
 215};
 216
 217struct res_fs_rule {
 218        struct res_common       com;
 219        int                     qpn;
 220};
 221
 222static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
 223{
 224        struct rb_node *node = root->rb_node;
 225
 226        while (node) {
 227                struct res_common *res = container_of(node, struct res_common,
 228                                                      node);
 229
 230                if (res_id < res->res_id)
 231                        node = node->rb_left;
 232                else if (res_id > res->res_id)
 233                        node = node->rb_right;
 234                else
 235                        return res;
 236        }
 237        return NULL;
 238}
 239
 240static int res_tracker_insert(struct rb_root *root, struct res_common *res)
 241{
 242        struct rb_node **new = &(root->rb_node), *parent = NULL;
 243
 244        /* Figure out where to put new node */
 245        while (*new) {
 246                struct res_common *this = container_of(*new, struct res_common,
 247                                                       node);
 248
 249                parent = *new;
 250                if (res->res_id < this->res_id)
 251                        new = &((*new)->rb_left);
 252                else if (res->res_id > this->res_id)
 253                        new = &((*new)->rb_right);
 254                else
 255                        return -EEXIST;
 256        }
 257
 258        /* Add new node and rebalance tree. */
 259        rb_link_node(&res->node, parent, new);
 260        rb_insert_color(&res->node, root);
 261
 262        return 0;
 263}
 264
 265enum qp_transition {
 266        QP_TRANS_INIT2RTR,
 267        QP_TRANS_RTR2RTS,
 268        QP_TRANS_RTS2RTS,
 269        QP_TRANS_SQERR2RTS,
 270        QP_TRANS_SQD2SQD,
 271        QP_TRANS_SQD2RTS
 272};
 273
 274/* For Debug uses */
 275static const char *ResourceType(enum mlx4_resource rt)
 276{
 277        switch (rt) {
 278        case RES_QP: return "RES_QP";
 279        case RES_CQ: return "RES_CQ";
 280        case RES_SRQ: return "RES_SRQ";
 281        case RES_MPT: return "RES_MPT";
 282        case RES_MTT: return "RES_MTT";
 283        case RES_MAC: return  "RES_MAC";
 284        case RES_VLAN: return  "RES_VLAN";
 285        case RES_EQ: return "RES_EQ";
 286        case RES_COUNTER: return "RES_COUNTER";
 287        case RES_FS_RULE: return "RES_FS_RULE";
 288        case RES_XRCD: return "RES_XRCD";
 289        default: return "Unknown resource type !!!";
 290        };
 291}
 292
 293static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
 294static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
 295                                      enum mlx4_resource res_type, int count,
 296                                      int port)
 297{
 298        struct mlx4_priv *priv = mlx4_priv(dev);
 299        struct resource_allocator *res_alloc =
 300                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 301        int err = -EINVAL;
 302        int allocated, free, reserved, guaranteed, from_free;
 303
 304        if (slave > dev->num_vfs)
 305                return -EINVAL;
 306
 307        spin_lock(&res_alloc->alloc_lock);
 308        allocated = (port > 0) ?
 309                res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
 310                res_alloc->allocated[slave];
 311        free = (port > 0) ? res_alloc->res_port_free[port - 1] :
 312                res_alloc->res_free;
 313        reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
 314                res_alloc->res_reserved;
 315        guaranteed = res_alloc->guaranteed[slave];
 316
 317        if (allocated + count > res_alloc->quota[slave])
 318                goto out;
 319
 320        if (allocated + count <= guaranteed) {
 321                err = 0;
 322        } else {
 323                /* portion may need to be obtained from free area */
 324                if (guaranteed - allocated > 0)
 325                        from_free = count - (guaranteed - allocated);
 326                else
 327                        from_free = count;
 328
 329                if (free - from_free > reserved)
 330                        err = 0;
 331        }
 332
 333        if (!err) {
 334                /* grant the request */
 335                if (port > 0) {
 336                        res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
 337                        res_alloc->res_port_free[port - 1] -= count;
 338                } else {
 339                        res_alloc->allocated[slave] += count;
 340                        res_alloc->res_free -= count;
 341                }
 342        }
 343
 344out:
 345        spin_unlock(&res_alloc->alloc_lock);
 346        return err;
 347}
 348
 349static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
 350                                    enum mlx4_resource res_type, int count,
 351                                    int port)
 352{
 353        struct mlx4_priv *priv = mlx4_priv(dev);
 354        struct resource_allocator *res_alloc =
 355                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 356
 357        if (slave > dev->num_vfs)
 358                return;
 359
 360        spin_lock(&res_alloc->alloc_lock);
 361        if (port > 0) {
 362                res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
 363                res_alloc->res_port_free[port - 1] += count;
 364        } else {
 365                res_alloc->allocated[slave] -= count;
 366                res_alloc->res_free += count;
 367        }
 368
 369        spin_unlock(&res_alloc->alloc_lock);
 370        return;
 371}
 372
 373static inline void initialize_res_quotas(struct mlx4_dev *dev,
 374                                         struct resource_allocator *res_alloc,
 375                                         enum mlx4_resource res_type,
 376                                         int vf, int num_instances)
 377{
 378        res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
 379        res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
 380        if (vf == mlx4_master_func_num(dev)) {
 381                res_alloc->res_free = num_instances;
 382                if (res_type == RES_MTT) {
 383                        /* reserved mtts will be taken out of the PF allocation */
 384                        res_alloc->res_free += dev->caps.reserved_mtts;
 385                        res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
 386                        res_alloc->quota[vf] += dev->caps.reserved_mtts;
 387                }
 388        }
 389}
 390
 391void mlx4_init_quotas(struct mlx4_dev *dev)
 392{
 393        struct mlx4_priv *priv = mlx4_priv(dev);
 394        int pf;
 395
 396        /* quotas for VFs are initialized in mlx4_slave_cap */
 397        if (mlx4_is_slave(dev))
 398                return;
 399
 400        if (!mlx4_is_mfunc(dev)) {
 401                dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
 402                        mlx4_num_reserved_sqps(dev);
 403                dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
 404                dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
 405                dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
 406                dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
 407                return;
 408        }
 409
 410        pf = mlx4_master_func_num(dev);
 411        dev->quotas.qp =
 412                priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
 413        dev->quotas.cq =
 414                priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
 415        dev->quotas.srq =
 416                priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
 417        dev->quotas.mtt =
 418                priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
 419        dev->quotas.mpt =
 420                priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
 421}
 422int mlx4_init_resource_tracker(struct mlx4_dev *dev)
 423{
 424        struct mlx4_priv *priv = mlx4_priv(dev);
 425        int i, j;
 426        int t;
 427
 428        priv->mfunc.master.res_tracker.slave_list =
 429                kzalloc(dev->num_slaves * sizeof(struct slave_list),
 430                        GFP_KERNEL);
 431        if (!priv->mfunc.master.res_tracker.slave_list)
 432                return -ENOMEM;
 433
 434        for (i = 0 ; i < dev->num_slaves; i++) {
 435                for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
 436                        INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
 437                                       slave_list[i].res_list[t]);
 438                mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 439        }
 440
 441        mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
 442                 dev->num_slaves);
 443        for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
 444                priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
 445
 446        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 447                struct resource_allocator *res_alloc =
 448                        &priv->mfunc.master.res_tracker.res_alloc[i];
 449                res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
 450                res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
 451                if (i == RES_MAC || i == RES_VLAN)
 452                        res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
 453                                                       (dev->num_vfs + 1) * sizeof(int),
 454                                                        GFP_KERNEL);
 455                else
 456                        res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
 457
 458                if (!res_alloc->quota || !res_alloc->guaranteed ||
 459                    !res_alloc->allocated)
 460                        goto no_mem_err;
 461
 462                spin_lock_init(&res_alloc->alloc_lock);
 463                for (t = 0; t < dev->num_vfs + 1; t++) {
 464                        switch (i) {
 465                        case RES_QP:
 466                                initialize_res_quotas(dev, res_alloc, RES_QP,
 467                                                      t, dev->caps.num_qps -
 468                                                      dev->caps.reserved_qps -
 469                                                      mlx4_num_reserved_sqps(dev));
 470                                break;
 471                        case RES_CQ:
 472                                initialize_res_quotas(dev, res_alloc, RES_CQ,
 473                                                      t, dev->caps.num_cqs -
 474                                                      dev->caps.reserved_cqs);
 475                                break;
 476                        case RES_SRQ:
 477                                initialize_res_quotas(dev, res_alloc, RES_SRQ,
 478                                                      t, dev->caps.num_srqs -
 479                                                      dev->caps.reserved_srqs);
 480                                break;
 481                        case RES_MPT:
 482                                initialize_res_quotas(dev, res_alloc, RES_MPT,
 483                                                      t, dev->caps.num_mpts -
 484                                                      dev->caps.reserved_mrws);
 485                                break;
 486                        case RES_MTT:
 487                                initialize_res_quotas(dev, res_alloc, RES_MTT,
 488                                                      t, dev->caps.num_mtts -
 489                                                      dev->caps.reserved_mtts);
 490                                break;
 491                        case RES_MAC:
 492                                if (t == mlx4_master_func_num(dev)) {
 493                                        res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
 494                                        res_alloc->guaranteed[t] = 2;
 495                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 496                                                res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
 497                                } else {
 498                                        res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
 499                                        res_alloc->guaranteed[t] = 2;
 500                                }
 501                                break;
 502                        case RES_VLAN:
 503                                if (t == mlx4_master_func_num(dev)) {
 504                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
 505                                        res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
 506                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 507                                                res_alloc->res_port_free[j] =
 508                                                        res_alloc->quota[t];
 509                                } else {
 510                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
 511                                        res_alloc->guaranteed[t] = 0;
 512                                }
 513                                break;
 514                        case RES_COUNTER:
 515                                res_alloc->quota[t] = dev->caps.max_counters;
 516                                res_alloc->guaranteed[t] = 0;
 517                                if (t == mlx4_master_func_num(dev))
 518                                        res_alloc->res_free = res_alloc->quota[t];
 519                                break;
 520                        default:
 521                                break;
 522                        }
 523                        if (i == RES_MAC || i == RES_VLAN) {
 524                                for (j = 0; j < MLX4_MAX_PORTS; j++)
 525                                        res_alloc->res_port_rsvd[j] +=
 526                                                res_alloc->guaranteed[t];
 527                        } else {
 528                                res_alloc->res_reserved += res_alloc->guaranteed[t];
 529                        }
 530                }
 531        }
 532        spin_lock_init(&priv->mfunc.master.res_tracker.lock);
 533        return 0;
 534
 535no_mem_err:
 536        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 537                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 538                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 539                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 540                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 541                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 542                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 543        }
 544        return -ENOMEM;
 545}
 546
 547void mlx4_free_resource_tracker(struct mlx4_dev *dev,
 548                                enum mlx4_res_tracker_free_type type)
 549{
 550        struct mlx4_priv *priv = mlx4_priv(dev);
 551        int i;
 552
 553        if (priv->mfunc.master.res_tracker.slave_list) {
 554                if (type != RES_TR_FREE_STRUCTS_ONLY) {
 555                        for (i = 0; i < dev->num_slaves; i++) {
 556                                if (type == RES_TR_FREE_ALL ||
 557                                    dev->caps.function != i)
 558                                        mlx4_delete_all_resources_for_slave(dev, i);
 559                        }
 560                        /* free master's vlans */
 561                        i = dev->caps.function;
 562                        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 563                        rem_slave_vlans(dev, i);
 564                        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 565                }
 566
 567                if (type != RES_TR_FREE_SLAVES_ONLY) {
 568                        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 569                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 570                                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 571                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 572                                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 573                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 574                                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 575                        }
 576                        kfree(priv->mfunc.master.res_tracker.slave_list);
 577                        priv->mfunc.master.res_tracker.slave_list = NULL;
 578                }
 579        }
 580}
 581
 582static void update_pkey_index(struct mlx4_dev *dev, int slave,
 583                              struct mlx4_cmd_mailbox *inbox)
 584{
 585        u8 sched = *(u8 *)(inbox->buf + 64);
 586        u8 orig_index = *(u8 *)(inbox->buf + 35);
 587        u8 new_index;
 588        struct mlx4_priv *priv = mlx4_priv(dev);
 589        int port;
 590
 591        port = (sched >> 6 & 1) + 1;
 592
 593        new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
 594        *(u8 *)(inbox->buf + 35) = new_index;
 595}
 596
 597static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
 598                       u8 slave)
 599{
 600        struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
 601        enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
 602        u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
 603
 604        if (MLX4_QP_ST_UD == ts)
 605                qp_ctx->pri_path.mgid_index = 0x80 | slave;
 606
 607        if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
 608                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
 609                        qp_ctx->pri_path.mgid_index = slave & 0x7F;
 610                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
 611                        qp_ctx->alt_path.mgid_index = slave & 0x7F;
 612        }
 613}
 614
 615static int update_vport_qp_param(struct mlx4_dev *dev,
 616                                 struct mlx4_cmd_mailbox *inbox,
 617                                 u8 slave, u32 qpn)
 618{
 619        struct mlx4_qp_context  *qpc = inbox->buf + 8;
 620        struct mlx4_vport_oper_state *vp_oper;
 621        struct mlx4_priv *priv;
 622        u32 qp_type;
 623        int port;
 624
 625        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
 626        priv = mlx4_priv(dev);
 627        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 628
 629        if (MLX4_VGT != vp_oper->state.default_vlan) {
 630                qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
 631                if (MLX4_QP_ST_RC == qp_type ||
 632                    (MLX4_QP_ST_UD == qp_type &&
 633                     !mlx4_is_qp_reserved(dev, qpn)))
 634                        return -EINVAL;
 635
 636                /* the reserved QPs (special, proxy, tunnel)
 637                 * do not operate over vlans
 638                 */
 639                if (mlx4_is_qp_reserved(dev, qpn))
 640                        return 0;
 641
 642                /* force strip vlan by clear vsd */
 643                qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
 644
 645                if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
 646                    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
 647                        qpc->pri_path.vlan_control =
 648                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 649                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 650                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
 651                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 652                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
 653                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 654                } else if (0 != vp_oper->state.default_vlan) {
 655                        qpc->pri_path.vlan_control =
 656                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 657                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 658                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 659                } else { /* priority tagged */
 660                        qpc->pri_path.vlan_control =
 661                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 662                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 663                }
 664
 665                qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
 666                qpc->pri_path.vlan_index = vp_oper->vlan_idx;
 667                qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
 668                qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
 669                qpc->pri_path.sched_queue &= 0xC7;
 670                qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
 671        }
 672        if (vp_oper->state.spoofchk) {
 673                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
 674                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
 675        }
 676        return 0;
 677}
 678
 679static int mpt_mask(struct mlx4_dev *dev)
 680{
 681        return dev->caps.num_mpts - 1;
 682}
 683
 684static void *find_res(struct mlx4_dev *dev, u64 res_id,
 685                      enum mlx4_resource type)
 686{
 687        struct mlx4_priv *priv = mlx4_priv(dev);
 688
 689        return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
 690                                  res_id);
 691}
 692
 693static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
 694                   enum mlx4_resource type,
 695                   void *res)
 696{
 697        struct res_common *r;
 698        int err = 0;
 699
 700        spin_lock_irq(mlx4_tlock(dev));
 701        r = find_res(dev, res_id, type);
 702        if (!r) {
 703                err = -ENONET;
 704                goto exit;
 705        }
 706
 707        if (r->state == RES_ANY_BUSY) {
 708                err = -EBUSY;
 709                goto exit;
 710        }
 711
 712        if (r->owner != slave) {
 713                err = -EPERM;
 714                goto exit;
 715        }
 716
 717        r->from_state = r->state;
 718        r->state = RES_ANY_BUSY;
 719
 720        if (res)
 721                *((struct res_common **)res) = r;
 722
 723exit:
 724        spin_unlock_irq(mlx4_tlock(dev));
 725        return err;
 726}
 727
 728int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
 729                                    enum mlx4_resource type,
 730                                    u64 res_id, int *slave)
 731{
 732
 733        struct res_common *r;
 734        int err = -ENOENT;
 735        int id = res_id;
 736
 737        if (type == RES_QP)
 738                id &= 0x7fffff;
 739        spin_lock(mlx4_tlock(dev));
 740
 741        r = find_res(dev, id, type);
 742        if (r) {
 743                *slave = r->owner;
 744                err = 0;
 745        }
 746        spin_unlock(mlx4_tlock(dev));
 747
 748        return err;
 749}
 750
 751static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
 752                    enum mlx4_resource type)
 753{
 754        struct res_common *r;
 755
 756        spin_lock_irq(mlx4_tlock(dev));
 757        r = find_res(dev, res_id, type);
 758        if (r)
 759                r->state = r->from_state;
 760        spin_unlock_irq(mlx4_tlock(dev));
 761}
 762
 763static struct res_common *alloc_qp_tr(int id)
 764{
 765        struct res_qp *ret;
 766
 767        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 768        if (!ret)
 769                return NULL;
 770
 771        ret->com.res_id = id;
 772        ret->com.state = RES_QP_RESERVED;
 773        ret->local_qpn = id;
 774        INIT_LIST_HEAD(&ret->mcg_list);
 775        spin_lock_init(&ret->mcg_spl);
 776        atomic_set(&ret->ref_count, 0);
 777
 778        return &ret->com;
 779}
 780
 781static struct res_common *alloc_mtt_tr(int id, int order)
 782{
 783        struct res_mtt *ret;
 784
 785        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 786        if (!ret)
 787                return NULL;
 788
 789        ret->com.res_id = id;
 790        ret->order = order;
 791        ret->com.state = RES_MTT_ALLOCATED;
 792        atomic_set(&ret->ref_count, 0);
 793
 794        return &ret->com;
 795}
 796
 797static struct res_common *alloc_mpt_tr(int id, int key)
 798{
 799        struct res_mpt *ret;
 800
 801        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 802        if (!ret)
 803                return NULL;
 804
 805        ret->com.res_id = id;
 806        ret->com.state = RES_MPT_RESERVED;
 807        ret->key = key;
 808
 809        return &ret->com;
 810}
 811
 812static struct res_common *alloc_eq_tr(int id)
 813{
 814        struct res_eq *ret;
 815
 816        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 817        if (!ret)
 818                return NULL;
 819
 820        ret->com.res_id = id;
 821        ret->com.state = RES_EQ_RESERVED;
 822
 823        return &ret->com;
 824}
 825
 826static struct res_common *alloc_cq_tr(int id)
 827{
 828        struct res_cq *ret;
 829
 830        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 831        if (!ret)
 832                return NULL;
 833
 834        ret->com.res_id = id;
 835        ret->com.state = RES_CQ_ALLOCATED;
 836        atomic_set(&ret->ref_count, 0);
 837
 838        return &ret->com;
 839}
 840
 841static struct res_common *alloc_srq_tr(int id)
 842{
 843        struct res_srq *ret;
 844
 845        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 846        if (!ret)
 847                return NULL;
 848
 849        ret->com.res_id = id;
 850        ret->com.state = RES_SRQ_ALLOCATED;
 851        atomic_set(&ret->ref_count, 0);
 852
 853        return &ret->com;
 854}
 855
 856static struct res_common *alloc_counter_tr(int id)
 857{
 858        struct res_counter *ret;
 859
 860        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 861        if (!ret)
 862                return NULL;
 863
 864        ret->com.res_id = id;
 865        ret->com.state = RES_COUNTER_ALLOCATED;
 866
 867        return &ret->com;
 868}
 869
 870static struct res_common *alloc_xrcdn_tr(int id)
 871{
 872        struct res_xrcdn *ret;
 873
 874        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 875        if (!ret)
 876                return NULL;
 877
 878        ret->com.res_id = id;
 879        ret->com.state = RES_XRCD_ALLOCATED;
 880
 881        return &ret->com;
 882}
 883
 884static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
 885{
 886        struct res_fs_rule *ret;
 887
 888        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 889        if (!ret)
 890                return NULL;
 891
 892        ret->com.res_id = id;
 893        ret->com.state = RES_FS_RULE_ALLOCATED;
 894        ret->qpn = qpn;
 895        return &ret->com;
 896}
 897
 898static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
 899                                   int extra)
 900{
 901        struct res_common *ret;
 902
 903        switch (type) {
 904        case RES_QP:
 905                ret = alloc_qp_tr(id);
 906                break;
 907        case RES_MPT:
 908                ret = alloc_mpt_tr(id, extra);
 909                break;
 910        case RES_MTT:
 911                ret = alloc_mtt_tr(id, extra);
 912                break;
 913        case RES_EQ:
 914                ret = alloc_eq_tr(id);
 915                break;
 916        case RES_CQ:
 917                ret = alloc_cq_tr(id);
 918                break;
 919        case RES_SRQ:
 920                ret = alloc_srq_tr(id);
 921                break;
 922        case RES_MAC:
 923                printk(KERN_ERR "implementation missing\n");
 924                return NULL;
 925        case RES_COUNTER:
 926                ret = alloc_counter_tr(id);
 927                break;
 928        case RES_XRCD:
 929                ret = alloc_xrcdn_tr(id);
 930                break;
 931        case RES_FS_RULE:
 932                ret = alloc_fs_rule_tr(id, extra);
 933                break;
 934        default:
 935                return NULL;
 936        }
 937        if (ret)
 938                ret->owner = slave;
 939
 940        return ret;
 941}
 942
 943static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
 944                         enum mlx4_resource type, int extra)
 945{
 946        int i;
 947        int err;
 948        struct mlx4_priv *priv = mlx4_priv(dev);
 949        struct res_common **res_arr;
 950        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
 951        struct rb_root *root = &tracker->res_tree[type];
 952
 953        res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
 954        if (!res_arr)
 955                return -ENOMEM;
 956
 957        for (i = 0; i < count; ++i) {
 958                res_arr[i] = alloc_tr(base + i, type, slave, extra);
 959                if (!res_arr[i]) {
 960                        for (--i; i >= 0; --i)
 961                                kfree(res_arr[i]);
 962
 963                        kfree(res_arr);
 964                        return -ENOMEM;
 965                }
 966        }
 967
 968        spin_lock_irq(mlx4_tlock(dev));
 969        for (i = 0; i < count; ++i) {
 970                if (find_res(dev, base + i, type)) {
 971                        err = -EEXIST;
 972                        goto undo;
 973                }
 974                err = res_tracker_insert(root, res_arr[i]);
 975                if (err)
 976                        goto undo;
 977                list_add_tail(&res_arr[i]->list,
 978                              &tracker->slave_list[slave].res_list[type]);
 979        }
 980        spin_unlock_irq(mlx4_tlock(dev));
 981        kfree(res_arr);
 982
 983        return 0;
 984
 985undo:
 986        for (--i; i >= base; --i)
 987                rb_erase(&res_arr[i]->node, root);
 988
 989        spin_unlock_irq(mlx4_tlock(dev));
 990
 991        for (i = 0; i < count; ++i)
 992                kfree(res_arr[i]);
 993
 994        kfree(res_arr);
 995
 996        return err;
 997}
 998
 999static int remove_qp_ok(struct res_qp *res)
1000{
1001        if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1002            !list_empty(&res->mcg_list)) {
1003                pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1004                       res->com.state, atomic_read(&res->ref_count));
1005                return -EBUSY;
1006        } else if (res->com.state != RES_QP_RESERVED) {
1007                return -EPERM;
1008        }
1009
1010        return 0;
1011}
1012
1013static int remove_mtt_ok(struct res_mtt *res, int order)
1014{
1015        if (res->com.state == RES_MTT_BUSY ||
1016            atomic_read(&res->ref_count)) {
1017                printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1018                       __func__, __LINE__,
1019                       mtt_states_str(res->com.state),
1020                       atomic_read(&res->ref_count));
1021                return -EBUSY;
1022        } else if (res->com.state != RES_MTT_ALLOCATED)
1023                return -EPERM;
1024        else if (res->order != order)
1025                return -EINVAL;
1026
1027        return 0;
1028}
1029
1030static int remove_mpt_ok(struct res_mpt *res)
1031{
1032        if (res->com.state == RES_MPT_BUSY)
1033                return -EBUSY;
1034        else if (res->com.state != RES_MPT_RESERVED)
1035                return -EPERM;
1036
1037        return 0;
1038}
1039
1040static int remove_eq_ok(struct res_eq *res)
1041{
1042        if (res->com.state == RES_MPT_BUSY)
1043                return -EBUSY;
1044        else if (res->com.state != RES_MPT_RESERVED)
1045                return -EPERM;
1046
1047        return 0;
1048}
1049
1050static int remove_counter_ok(struct res_counter *res)
1051{
1052        if (res->com.state == RES_COUNTER_BUSY)
1053                return -EBUSY;
1054        else if (res->com.state != RES_COUNTER_ALLOCATED)
1055                return -EPERM;
1056
1057        return 0;
1058}
1059
1060static int remove_xrcdn_ok(struct res_xrcdn *res)
1061{
1062        if (res->com.state == RES_XRCD_BUSY)
1063                return -EBUSY;
1064        else if (res->com.state != RES_XRCD_ALLOCATED)
1065                return -EPERM;
1066
1067        return 0;
1068}
1069
1070static int remove_fs_rule_ok(struct res_fs_rule *res)
1071{
1072        if (res->com.state == RES_FS_RULE_BUSY)
1073                return -EBUSY;
1074        else if (res->com.state != RES_FS_RULE_ALLOCATED)
1075                return -EPERM;
1076
1077        return 0;
1078}
1079
1080static int remove_cq_ok(struct res_cq *res)
1081{
1082        if (res->com.state == RES_CQ_BUSY)
1083                return -EBUSY;
1084        else if (res->com.state != RES_CQ_ALLOCATED)
1085                return -EPERM;
1086
1087        return 0;
1088}
1089
1090static int remove_srq_ok(struct res_srq *res)
1091{
1092        if (res->com.state == RES_SRQ_BUSY)
1093                return -EBUSY;
1094        else if (res->com.state != RES_SRQ_ALLOCATED)
1095                return -EPERM;
1096
1097        return 0;
1098}
1099
1100static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1101{
1102        switch (type) {
1103        case RES_QP:
1104                return remove_qp_ok((struct res_qp *)res);
1105        case RES_CQ:
1106                return remove_cq_ok((struct res_cq *)res);
1107        case RES_SRQ:
1108                return remove_srq_ok((struct res_srq *)res);
1109        case RES_MPT:
1110                return remove_mpt_ok((struct res_mpt *)res);
1111        case RES_MTT:
1112                return remove_mtt_ok((struct res_mtt *)res, extra);
1113        case RES_MAC:
1114                return -ENOSYS;
1115        case RES_EQ:
1116                return remove_eq_ok((struct res_eq *)res);
1117        case RES_COUNTER:
1118                return remove_counter_ok((struct res_counter *)res);
1119        case RES_XRCD:
1120                return remove_xrcdn_ok((struct res_xrcdn *)res);
1121        case RES_FS_RULE:
1122                return remove_fs_rule_ok((struct res_fs_rule *)res);
1123        default:
1124                return -EINVAL;
1125        }
1126}
1127
1128static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1129                         enum mlx4_resource type, int extra)
1130{
1131        u64 i;
1132        int err;
1133        struct mlx4_priv *priv = mlx4_priv(dev);
1134        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1135        struct res_common *r;
1136
1137        spin_lock_irq(mlx4_tlock(dev));
1138        for (i = base; i < base + count; ++i) {
1139                r = res_tracker_lookup(&tracker->res_tree[type], i);
1140                if (!r) {
1141                        err = -ENOENT;
1142                        goto out;
1143                }
1144                if (r->owner != slave) {
1145                        err = -EPERM;
1146                        goto out;
1147                }
1148                err = remove_ok(r, type, extra);
1149                if (err)
1150                        goto out;
1151        }
1152
1153        for (i = base; i < base + count; ++i) {
1154                r = res_tracker_lookup(&tracker->res_tree[type], i);
1155                rb_erase(&r->node, &tracker->res_tree[type]);
1156                list_del(&r->list);
1157                kfree(r);
1158        }
1159        err = 0;
1160
1161out:
1162        spin_unlock_irq(mlx4_tlock(dev));
1163
1164        return err;
1165}
1166
1167static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1168                                enum res_qp_states state, struct res_qp **qp,
1169                                int alloc)
1170{
1171        struct mlx4_priv *priv = mlx4_priv(dev);
1172        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1173        struct res_qp *r;
1174        int err = 0;
1175
1176        spin_lock_irq(mlx4_tlock(dev));
1177        r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1178        if (!r)
1179                err = -ENOENT;
1180        else if (r->com.owner != slave)
1181                err = -EPERM;
1182        else {
1183                switch (state) {
1184                case RES_QP_BUSY:
1185                        mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1186                                 __func__, r->com.res_id);
1187                        err = -EBUSY;
1188                        break;
1189
1190                case RES_QP_RESERVED:
1191                        if (r->com.state == RES_QP_MAPPED && !alloc)
1192                                break;
1193
1194                        mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1195                        err = -EINVAL;
1196                        break;
1197
1198                case RES_QP_MAPPED:
1199                        if ((r->com.state == RES_QP_RESERVED && alloc) ||
1200                            r->com.state == RES_QP_HW)
1201                                break;
1202                        else {
1203                                mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1204                                          r->com.res_id);
1205                                err = -EINVAL;
1206                        }
1207
1208                        break;
1209
1210                case RES_QP_HW:
1211                        if (r->com.state != RES_QP_MAPPED)
1212                                err = -EINVAL;
1213                        break;
1214                default:
1215                        err = -EINVAL;
1216                }
1217
1218                if (!err) {
1219                        r->com.from_state = r->com.state;
1220                        r->com.to_state = state;
1221                        r->com.state = RES_QP_BUSY;
1222                        if (qp)
1223                                *qp = r;
1224                }
1225        }
1226
1227        spin_unlock_irq(mlx4_tlock(dev));
1228
1229        return err;
1230}
1231
1232static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1233                                enum res_mpt_states state, struct res_mpt **mpt)
1234{
1235        struct mlx4_priv *priv = mlx4_priv(dev);
1236        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1237        struct res_mpt *r;
1238        int err = 0;
1239
1240        spin_lock_irq(mlx4_tlock(dev));
1241        r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1242        if (!r)
1243                err = -ENOENT;
1244        else if (r->com.owner != slave)
1245                err = -EPERM;
1246        else {
1247                switch (state) {
1248                case RES_MPT_BUSY:
1249                        err = -EINVAL;
1250                        break;
1251
1252                case RES_MPT_RESERVED:
1253                        if (r->com.state != RES_MPT_MAPPED)
1254                                err = -EINVAL;
1255                        break;
1256
1257                case RES_MPT_MAPPED:
1258                        if (r->com.state != RES_MPT_RESERVED &&
1259                            r->com.state != RES_MPT_HW)
1260                                err = -EINVAL;
1261                        break;
1262
1263                case RES_MPT_HW:
1264                        if (r->com.state != RES_MPT_MAPPED)
1265                                err = -EINVAL;
1266                        break;
1267                default:
1268                        err = -EINVAL;
1269                }
1270
1271                if (!err) {
1272                        r->com.from_state = r->com.state;
1273                        r->com.to_state = state;
1274                        r->com.state = RES_MPT_BUSY;
1275                        if (mpt)
1276                                *mpt = r;
1277                }
1278        }
1279
1280        spin_unlock_irq(mlx4_tlock(dev));
1281
1282        return err;
1283}
1284
1285static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1286                                enum res_eq_states state, struct res_eq **eq)
1287{
1288        struct mlx4_priv *priv = mlx4_priv(dev);
1289        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1290        struct res_eq *r;
1291        int err = 0;
1292
1293        spin_lock_irq(mlx4_tlock(dev));
1294        r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1295        if (!r)
1296                err = -ENOENT;
1297        else if (r->com.owner != slave)
1298                err = -EPERM;
1299        else {
1300                switch (state) {
1301                case RES_EQ_BUSY:
1302                        err = -EINVAL;
1303                        break;
1304
1305                case RES_EQ_RESERVED:
1306                        if (r->com.state != RES_EQ_HW)
1307                                err = -EINVAL;
1308                        break;
1309
1310                case RES_EQ_HW:
1311                        if (r->com.state != RES_EQ_RESERVED)
1312                                err = -EINVAL;
1313                        break;
1314
1315                default:
1316                        err = -EINVAL;
1317                }
1318
1319                if (!err) {
1320                        r->com.from_state = r->com.state;
1321                        r->com.to_state = state;
1322                        r->com.state = RES_EQ_BUSY;
1323                        if (eq)
1324                                *eq = r;
1325                }
1326        }
1327
1328        spin_unlock_irq(mlx4_tlock(dev));
1329
1330        return err;
1331}
1332
1333static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1334                                enum res_cq_states state, struct res_cq **cq)
1335{
1336        struct mlx4_priv *priv = mlx4_priv(dev);
1337        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1338        struct res_cq *r;
1339        int err;
1340
1341        spin_lock_irq(mlx4_tlock(dev));
1342        r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1343        if (!r)
1344                err = -ENOENT;
1345        else if (r->com.owner != slave)
1346                err = -EPERM;
1347        else {
1348                switch (state) {
1349                case RES_CQ_BUSY:
1350                        err = -EBUSY;
1351                        break;
1352
1353                case RES_CQ_ALLOCATED:
1354                        if (r->com.state != RES_CQ_HW)
1355                                err = -EINVAL;
1356                        else if (atomic_read(&r->ref_count))
1357                                err = -EBUSY;
1358                        else
1359                                err = 0;
1360                        break;
1361
1362                case RES_CQ_HW:
1363                        if (r->com.state != RES_CQ_ALLOCATED)
1364                                err = -EINVAL;
1365                        else
1366                                err = 0;
1367                        break;
1368
1369                default:
1370                        err = -EINVAL;
1371                }
1372
1373                if (!err) {
1374                        r->com.from_state = r->com.state;
1375                        r->com.to_state = state;
1376                        r->com.state = RES_CQ_BUSY;
1377                        if (cq)
1378                                *cq = r;
1379                }
1380        }
1381
1382        spin_unlock_irq(mlx4_tlock(dev));
1383
1384        return err;
1385}
1386
1387static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1388                                 enum res_cq_states state, struct res_srq **srq)
1389{
1390        struct mlx4_priv *priv = mlx4_priv(dev);
1391        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1392        struct res_srq *r;
1393        int err = 0;
1394
1395        spin_lock_irq(mlx4_tlock(dev));
1396        r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1397        if (!r)
1398                err = -ENOENT;
1399        else if (r->com.owner != slave)
1400                err = -EPERM;
1401        else {
1402                switch (state) {
1403                case RES_SRQ_BUSY:
1404                        err = -EINVAL;
1405                        break;
1406
1407                case RES_SRQ_ALLOCATED:
1408                        if (r->com.state != RES_SRQ_HW)
1409                                err = -EINVAL;
1410                        else if (atomic_read(&r->ref_count))
1411                                err = -EBUSY;
1412                        break;
1413
1414                case RES_SRQ_HW:
1415                        if (r->com.state != RES_SRQ_ALLOCATED)
1416                                err = -EINVAL;
1417                        break;
1418
1419                default:
1420                        err = -EINVAL;
1421                }
1422
1423                if (!err) {
1424                        r->com.from_state = r->com.state;
1425                        r->com.to_state = state;
1426                        r->com.state = RES_SRQ_BUSY;
1427                        if (srq)
1428                                *srq = r;
1429                }
1430        }
1431
1432        spin_unlock_irq(mlx4_tlock(dev));
1433
1434        return err;
1435}
1436
1437static void res_abort_move(struct mlx4_dev *dev, int slave,
1438                           enum mlx4_resource type, int id)
1439{
1440        struct mlx4_priv *priv = mlx4_priv(dev);
1441        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1442        struct res_common *r;
1443
1444        spin_lock_irq(mlx4_tlock(dev));
1445        r = res_tracker_lookup(&tracker->res_tree[type], id);
1446        if (r && (r->owner == slave))
1447                r->state = r->from_state;
1448        spin_unlock_irq(mlx4_tlock(dev));
1449}
1450
1451static void res_end_move(struct mlx4_dev *dev, int slave,
1452                         enum mlx4_resource type, int id)
1453{
1454        struct mlx4_priv *priv = mlx4_priv(dev);
1455        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1456        struct res_common *r;
1457
1458        spin_lock_irq(mlx4_tlock(dev));
1459        r = res_tracker_lookup(&tracker->res_tree[type], id);
1460        if (r && (r->owner == slave))
1461                r->state = r->to_state;
1462        spin_unlock_irq(mlx4_tlock(dev));
1463}
1464
1465static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1466{
1467        return mlx4_is_qp_reserved(dev, qpn) &&
1468                (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1469}
1470
1471static int fw_reserved(struct mlx4_dev *dev, int qpn)
1472{
1473        return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1474}
1475
1476static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1477                        u64 in_param, u64 *out_param)
1478{
1479        int err;
1480        int count;
1481        int align;
1482        int base;
1483        int qpn;
1484
1485        switch (op) {
1486        case RES_OP_RESERVE:
1487                count = get_param_l(&in_param);
1488                align = get_param_h(&in_param);
1489                err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1490                if (err)
1491                        return err;
1492
1493                err = __mlx4_qp_reserve_range(dev, count, align, &base);
1494                if (err) {
1495                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1496                        return err;
1497                }
1498
1499                err = add_res_range(dev, slave, base, count, RES_QP, 0);
1500                if (err) {
1501                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1502                        __mlx4_qp_release_range(dev, base, count);
1503                        return err;
1504                }
1505                set_param_l(out_param, base);
1506                break;
1507        case RES_OP_MAP_ICM:
1508                qpn = get_param_l(&in_param) & 0x7fffff;
1509                if (valid_reserved(dev, slave, qpn)) {
1510                        err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1511                        if (err)
1512                                return err;
1513                }
1514
1515                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1516                                           NULL, 1);
1517                if (err)
1518                        return err;
1519
1520                if (!fw_reserved(dev, qpn)) {
1521                        err = __mlx4_qp_alloc_icm(dev, qpn);
1522                        if (err) {
1523                                res_abort_move(dev, slave, RES_QP, qpn);
1524                                return err;
1525                        }
1526                }
1527
1528                res_end_move(dev, slave, RES_QP, qpn);
1529                break;
1530
1531        default:
1532                err = -EINVAL;
1533                break;
1534        }
1535        return err;
1536}
1537
1538static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1539                         u64 in_param, u64 *out_param)
1540{
1541        int err = -EINVAL;
1542        int base;
1543        int order;
1544
1545        if (op != RES_OP_RESERVE_AND_MAP)
1546                return err;
1547
1548        order = get_param_l(&in_param);
1549
1550        err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1551        if (err)
1552                return err;
1553
1554        base = __mlx4_alloc_mtt_range(dev, order);
1555        if (base == -1) {
1556                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1557                return -ENOMEM;
1558        }
1559
1560        err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1561        if (err) {
1562                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1563                __mlx4_free_mtt_range(dev, base, order);
1564        } else {
1565                set_param_l(out_param, base);
1566        }
1567
1568        return err;
1569}
1570
1571static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1572                         u64 in_param, u64 *out_param)
1573{
1574        int err = -EINVAL;
1575        int index;
1576        int id;
1577        struct res_mpt *mpt;
1578
1579        switch (op) {
1580        case RES_OP_RESERVE:
1581                err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1582                if (err)
1583                        break;
1584
1585                index = __mlx4_mpt_reserve(dev);
1586                if (index == -1) {
1587                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1588                        break;
1589                }
1590                id = index & mpt_mask(dev);
1591
1592                err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1593                if (err) {
1594                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1595                        __mlx4_mpt_release(dev, index);
1596                        break;
1597                }
1598                set_param_l(out_param, index);
1599                break;
1600        case RES_OP_MAP_ICM:
1601                index = get_param_l(&in_param);
1602                id = index & mpt_mask(dev);
1603                err = mr_res_start_move_to(dev, slave, id,
1604                                           RES_MPT_MAPPED, &mpt);
1605                if (err)
1606                        return err;
1607
1608                err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1609                if (err) {
1610                        res_abort_move(dev, slave, RES_MPT, id);
1611                        return err;
1612                }
1613
1614                res_end_move(dev, slave, RES_MPT, id);
1615                break;
1616        }
1617        return err;
1618}
1619
1620static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1621                        u64 in_param, u64 *out_param)
1622{
1623        int cqn;
1624        int err;
1625
1626        switch (op) {
1627        case RES_OP_RESERVE_AND_MAP:
1628                err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1629                if (err)
1630                        break;
1631
1632                err = __mlx4_cq_alloc_icm(dev, &cqn);
1633                if (err) {
1634                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1635                        break;
1636                }
1637
1638                err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1639                if (err) {
1640                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1641                        __mlx4_cq_free_icm(dev, cqn);
1642                        break;
1643                }
1644
1645                set_param_l(out_param, cqn);
1646                break;
1647
1648        default:
1649                err = -EINVAL;
1650        }
1651
1652        return err;
1653}
1654
1655static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1656                         u64 in_param, u64 *out_param)
1657{
1658        int srqn;
1659        int err;
1660
1661        switch (op) {
1662        case RES_OP_RESERVE_AND_MAP:
1663                err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1664                if (err)
1665                        break;
1666
1667                err = __mlx4_srq_alloc_icm(dev, &srqn);
1668                if (err) {
1669                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1670                        break;
1671                }
1672
1673                err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1674                if (err) {
1675                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1676                        __mlx4_srq_free_icm(dev, srqn);
1677                        break;
1678                }
1679
1680                set_param_l(out_param, srqn);
1681                break;
1682
1683        default:
1684                err = -EINVAL;
1685        }
1686
1687        return err;
1688}
1689
1690static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1691{
1692        struct mlx4_priv *priv = mlx4_priv(dev);
1693        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1694        struct mac_res *res;
1695
1696        if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1697                return -EINVAL;
1698        res = kzalloc(sizeof *res, GFP_KERNEL);
1699        if (!res) {
1700                mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1701                return -ENOMEM;
1702        }
1703        res->mac = mac;
1704        res->port = (u8) port;
1705        list_add_tail(&res->list,
1706                      &tracker->slave_list[slave].res_list[RES_MAC]);
1707        return 0;
1708}
1709
1710static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1711                               int port)
1712{
1713        struct mlx4_priv *priv = mlx4_priv(dev);
1714        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1715        struct list_head *mac_list =
1716                &tracker->slave_list[slave].res_list[RES_MAC];
1717        struct mac_res *res, *tmp;
1718
1719        list_for_each_entry_safe(res, tmp, mac_list, list) {
1720                if (res->mac == mac && res->port == (u8) port) {
1721                        list_del(&res->list);
1722                        mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1723                        kfree(res);
1724                        break;
1725                }
1726        }
1727}
1728
1729static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1730{
1731        struct mlx4_priv *priv = mlx4_priv(dev);
1732        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1733        struct list_head *mac_list =
1734                &tracker->slave_list[slave].res_list[RES_MAC];
1735        struct mac_res *res, *tmp;
1736
1737        list_for_each_entry_safe(res, tmp, mac_list, list) {
1738                list_del(&res->list);
1739                __mlx4_unregister_mac(dev, res->port, res->mac);
1740                mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1741                kfree(res);
1742        }
1743}
1744
1745static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1746                         u64 in_param, u64 *out_param, int in_port)
1747{
1748        int err = -EINVAL;
1749        int port;
1750        u64 mac;
1751
1752        if (op != RES_OP_RESERVE_AND_MAP)
1753                return err;
1754
1755        port = !in_port ? get_param_l(out_param) : in_port;
1756        mac = in_param;
1757
1758        err = __mlx4_register_mac(dev, port, mac);
1759        if (err >= 0) {
1760                set_param_l(out_param, err);
1761                err = 0;
1762        }
1763
1764        if (!err) {
1765                err = mac_add_to_slave(dev, slave, mac, port);
1766                if (err)
1767                        __mlx4_unregister_mac(dev, port, mac);
1768        }
1769        return err;
1770}
1771
1772static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1773                             int port, int vlan_index)
1774{
1775        struct mlx4_priv *priv = mlx4_priv(dev);
1776        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1777        struct list_head *vlan_list =
1778                &tracker->slave_list[slave].res_list[RES_VLAN];
1779        struct vlan_res *res, *tmp;
1780
1781        list_for_each_entry_safe(res, tmp, vlan_list, list) {
1782                if (res->vlan == vlan && res->port == (u8) port) {
1783                        /* vlan found. update ref count */
1784                        ++res->ref_count;
1785                        return 0;
1786                }
1787        }
1788
1789        if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1790                return -EINVAL;
1791        res = kzalloc(sizeof(*res), GFP_KERNEL);
1792        if (!res) {
1793                mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1794                return -ENOMEM;
1795        }
1796        res->vlan = vlan;
1797        res->port = (u8) port;
1798        res->vlan_index = vlan_index;
1799        res->ref_count = 1;
1800        list_add_tail(&res->list,
1801                      &tracker->slave_list[slave].res_list[RES_VLAN]);
1802        return 0;
1803}
1804
1805
1806static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1807                                int port)
1808{
1809        struct mlx4_priv *priv = mlx4_priv(dev);
1810        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1811        struct list_head *vlan_list =
1812                &tracker->slave_list[slave].res_list[RES_VLAN];
1813        struct vlan_res *res, *tmp;
1814
1815        list_for_each_entry_safe(res, tmp, vlan_list, list) {
1816                if (res->vlan == vlan && res->port == (u8) port) {
1817                        if (!--res->ref_count) {
1818                                list_del(&res->list);
1819                                mlx4_release_resource(dev, slave, RES_VLAN,
1820                                                      1, port);
1821                                kfree(res);
1822                        }
1823                        break;
1824                }
1825        }
1826}
1827
1828static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1829{
1830        struct mlx4_priv *priv = mlx4_priv(dev);
1831        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1832        struct list_head *vlan_list =
1833                &tracker->slave_list[slave].res_list[RES_VLAN];
1834        struct vlan_res *res, *tmp;
1835        int i;
1836
1837        list_for_each_entry_safe(res, tmp, vlan_list, list) {
1838                list_del(&res->list);
1839                /* dereference the vlan the num times the slave referenced it */
1840                for (i = 0; i < res->ref_count; i++)
1841                        __mlx4_unregister_vlan(dev, res->port, res->vlan);
1842                mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1843                kfree(res);
1844        }
1845}
1846
1847static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1848                          u64 in_param, u64 *out_param, int in_port)
1849{
1850        struct mlx4_priv *priv = mlx4_priv(dev);
1851        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1852        int err;
1853        u16 vlan;
1854        int vlan_index;
1855        int port;
1856
1857        port = !in_port ? get_param_l(out_param) : in_port;
1858
1859        if (!port || op != RES_OP_RESERVE_AND_MAP)
1860                return -EINVAL;
1861
1862        /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1863        if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1864                slave_state[slave].old_vlan_api = true;
1865                return 0;
1866        }
1867
1868        vlan = (u16) in_param;
1869
1870        err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1871        if (!err) {
1872                set_param_l(out_param, (u32) vlan_index);
1873                err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1874                if (err)
1875                        __mlx4_unregister_vlan(dev, port, vlan);
1876        }
1877        return err;
1878}
1879
1880static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1881                             u64 in_param, u64 *out_param)
1882{
1883        u32 index;
1884        int err;
1885
1886        if (op != RES_OP_RESERVE)
1887                return -EINVAL;
1888
1889        err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1890        if (err)
1891                return err;
1892
1893        err = __mlx4_counter_alloc(dev, &index);
1894        if (err) {
1895                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1896                return err;
1897        }
1898
1899        err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1900        if (err) {
1901                __mlx4_counter_free(dev, index);
1902                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1903        } else {
1904                set_param_l(out_param, index);
1905        }
1906
1907        return err;
1908}
1909
1910static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1911                           u64 in_param, u64 *out_param)
1912{
1913        u32 xrcdn;
1914        int err;
1915
1916        if (op != RES_OP_RESERVE)
1917                return -EINVAL;
1918
1919        err = __mlx4_xrcd_alloc(dev, &xrcdn);
1920        if (err)
1921                return err;
1922
1923        err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1924        if (err)
1925                __mlx4_xrcd_free(dev, xrcdn);
1926        else
1927                set_param_l(out_param, xrcdn);
1928
1929        return err;
1930}
1931
1932int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1933                           struct mlx4_vhcr *vhcr,
1934                           struct mlx4_cmd_mailbox *inbox,
1935                           struct mlx4_cmd_mailbox *outbox,
1936                           struct mlx4_cmd_info *cmd)
1937{
1938        int err;
1939        int alop = vhcr->op_modifier;
1940
1941        switch (vhcr->in_modifier & 0xFF) {
1942        case RES_QP:
1943                err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1944                                   vhcr->in_param, &vhcr->out_param);
1945                break;
1946
1947        case RES_MTT:
1948                err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1949                                    vhcr->in_param, &vhcr->out_param);
1950                break;
1951
1952        case RES_MPT:
1953                err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1954                                    vhcr->in_param, &vhcr->out_param);
1955                break;
1956
1957        case RES_CQ:
1958                err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1959                                   vhcr->in_param, &vhcr->out_param);
1960                break;
1961
1962        case RES_SRQ:
1963                err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1964                                    vhcr->in_param, &vhcr->out_param);
1965                break;
1966
1967        case RES_MAC:
1968                err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1969                                    vhcr->in_param, &vhcr->out_param,
1970                                    (vhcr->in_modifier >> 8) & 0xFF);
1971                break;
1972
1973        case RES_VLAN:
1974                err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1975                                     vhcr->in_param, &vhcr->out_param,
1976                                     (vhcr->in_modifier >> 8) & 0xFF);
1977                break;
1978
1979        case RES_COUNTER:
1980                err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1981                                        vhcr->in_param, &vhcr->out_param);
1982                break;
1983
1984        case RES_XRCD:
1985                err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1986                                      vhcr->in_param, &vhcr->out_param);
1987                break;
1988
1989        default:
1990                err = -EINVAL;
1991                break;
1992        }
1993
1994        return err;
1995}
1996
1997static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1998                       u64 in_param)
1999{
2000        int err;
2001        int count;
2002        int base;
2003        int qpn;
2004
2005        switch (op) {
2006        case RES_OP_RESERVE:
2007                base = get_param_l(&in_param) & 0x7fffff;
2008                count = get_param_h(&in_param);
2009                err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2010                if (err)
2011                        break;
2012                mlx4_release_resource(dev, slave, RES_QP, count, 0);
2013                __mlx4_qp_release_range(dev, base, count);
2014                break;
2015        case RES_OP_MAP_ICM:
2016                qpn = get_param_l(&in_param) & 0x7fffff;
2017                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2018                                           NULL, 0);
2019                if (err)
2020                        return err;
2021
2022                if (!fw_reserved(dev, qpn))
2023                        __mlx4_qp_free_icm(dev, qpn);
2024
2025                res_end_move(dev, slave, RES_QP, qpn);
2026
2027                if (valid_reserved(dev, slave, qpn))
2028                        err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2029                break;
2030        default:
2031                err = -EINVAL;
2032                break;
2033        }
2034        return err;
2035}
2036
2037static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2038                        u64 in_param, u64 *out_param)
2039{
2040        int err = -EINVAL;
2041        int base;
2042        int order;
2043
2044        if (op != RES_OP_RESERVE_AND_MAP)
2045                return err;
2046
2047        base = get_param_l(&in_param);
2048        order = get_param_h(&in_param);
2049        err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2050        if (!err) {
2051                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2052                __mlx4_free_mtt_range(dev, base, order);
2053        }
2054        return err;
2055}
2056
2057static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2058                        u64 in_param)
2059{
2060        int err = -EINVAL;
2061        int index;
2062        int id;
2063        struct res_mpt *mpt;
2064
2065        switch (op) {
2066        case RES_OP_RESERVE:
2067                index = get_param_l(&in_param);
2068                id = index & mpt_mask(dev);
2069                err = get_res(dev, slave, id, RES_MPT, &mpt);
2070                if (err)
2071                        break;
2072                index = mpt->key;
2073                put_res(dev, slave, id, RES_MPT);
2074
2075                err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2076                if (err)
2077                        break;
2078                mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2079                __mlx4_mpt_release(dev, index);
2080                break;
2081        case RES_OP_MAP_ICM:
2082                        index = get_param_l(&in_param);
2083                        id = index & mpt_mask(dev);
2084                        err = mr_res_start_move_to(dev, slave, id,
2085                                                   RES_MPT_RESERVED, &mpt);
2086                        if (err)
2087                                return err;
2088
2089                        __mlx4_mpt_free_icm(dev, mpt->key);
2090                        res_end_move(dev, slave, RES_MPT, id);
2091                        return err;
2092                break;
2093        default:
2094                err = -EINVAL;
2095                break;
2096        }
2097        return err;
2098}
2099
2100static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2101                       u64 in_param, u64 *out_param)
2102{
2103        int cqn;
2104        int err;
2105
2106        switch (op) {
2107        case RES_OP_RESERVE_AND_MAP:
2108                cqn = get_param_l(&in_param);
2109                err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2110                if (err)
2111                        break;
2112
2113                mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2114                __mlx4_cq_free_icm(dev, cqn);
2115                break;
2116
2117        default:
2118                err = -EINVAL;
2119                break;
2120        }
2121
2122        return err;
2123}
2124
2125static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2126                        u64 in_param, u64 *out_param)
2127{
2128        int srqn;
2129        int err;
2130
2131        switch (op) {
2132        case RES_OP_RESERVE_AND_MAP:
2133                srqn = get_param_l(&in_param);
2134                err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2135                if (err)
2136                        break;
2137
2138                mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2139                __mlx4_srq_free_icm(dev, srqn);
2140                break;
2141
2142        default:
2143                err = -EINVAL;
2144                break;
2145        }
2146
2147        return err;
2148}
2149
2150static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2151                            u64 in_param, u64 *out_param, int in_port)
2152{
2153        int port;
2154        int err = 0;
2155
2156        switch (op) {
2157        case RES_OP_RESERVE_AND_MAP:
2158                port = !in_port ? get_param_l(out_param) : in_port;
2159                mac_del_from_slave(dev, slave, in_param, port);
2160                __mlx4_unregister_mac(dev, port, in_param);
2161                break;
2162        default:
2163                err = -EINVAL;
2164                break;
2165        }
2166
2167        return err;
2168
2169}
2170
2171static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2172                            u64 in_param, u64 *out_param, int port)
2173{
2174        struct mlx4_priv *priv = mlx4_priv(dev);
2175        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2176        int err = 0;
2177
2178        switch (op) {
2179        case RES_OP_RESERVE_AND_MAP:
2180                if (slave_state[slave].old_vlan_api)
2181                        return 0;
2182                if (!port)
2183                        return -EINVAL;
2184                vlan_del_from_slave(dev, slave, in_param, port);
2185                __mlx4_unregister_vlan(dev, port, in_param);
2186                break;
2187        default:
2188                err = -EINVAL;
2189                break;
2190        }
2191
2192        return err;
2193}
2194
2195static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2196                            u64 in_param, u64 *out_param)
2197{
2198        int index;
2199        int err;
2200
2201        if (op != RES_OP_RESERVE)
2202                return -EINVAL;
2203
2204        index = get_param_l(&in_param);
2205        err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2206        if (err)
2207                return err;
2208
2209        __mlx4_counter_free(dev, index);
2210        mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2211
2212        return err;
2213}
2214
2215static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2216                          u64 in_param, u64 *out_param)
2217{
2218        int xrcdn;
2219        int err;
2220
2221        if (op != RES_OP_RESERVE)
2222                return -EINVAL;
2223
2224        xrcdn = get_param_l(&in_param);
2225        err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2226        if (err)
2227                return err;
2228
2229        __mlx4_xrcd_free(dev, xrcdn);
2230
2231        return err;
2232}
2233
2234int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2235                          struct mlx4_vhcr *vhcr,
2236                          struct mlx4_cmd_mailbox *inbox,
2237                          struct mlx4_cmd_mailbox *outbox,
2238                          struct mlx4_cmd_info *cmd)
2239{
2240        int err = -EINVAL;
2241        int alop = vhcr->op_modifier;
2242
2243        switch (vhcr->in_modifier & 0xFF) {
2244        case RES_QP:
2245                err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2246                                  vhcr->in_param);
2247                break;
2248
2249        case RES_MTT:
2250                err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2251                                   vhcr->in_param, &vhcr->out_param);
2252                break;
2253
2254        case RES_MPT:
2255                err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2256                                   vhcr->in_param);
2257                break;
2258
2259        case RES_CQ:
2260                err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2261                                  vhcr->in_param, &vhcr->out_param);
2262                break;
2263
2264        case RES_SRQ:
2265                err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2266                                   vhcr->in_param, &vhcr->out_param);
2267                break;
2268
2269        case RES_MAC:
2270                err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2271                                   vhcr->in_param, &vhcr->out_param,
2272                                   (vhcr->in_modifier >> 8) & 0xFF);
2273                break;
2274
2275        case RES_VLAN:
2276                err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2277                                    vhcr->in_param, &vhcr->out_param,
2278                                    (vhcr->in_modifier >> 8) & 0xFF);
2279                break;
2280
2281        case RES_COUNTER:
2282                err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2283                                       vhcr->in_param, &vhcr->out_param);
2284                break;
2285
2286        case RES_XRCD:
2287                err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2288                                     vhcr->in_param, &vhcr->out_param);
2289
2290        default:
2291                break;
2292        }
2293        return err;
2294}
2295
2296/* ugly but other choices are uglier */
2297static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2298{
2299        return (be32_to_cpu(mpt->flags) >> 9) & 1;
2300}
2301
2302static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2303{
2304        return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2305}
2306
2307static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2308{
2309        return be32_to_cpu(mpt->mtt_sz);
2310}
2311
2312static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2313{
2314        return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2315}
2316
2317static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2318{
2319        return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2320}
2321
2322static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2323{
2324        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2325}
2326
2327static int mr_is_region(struct mlx4_mpt_entry *mpt)
2328{
2329        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2330}
2331
2332static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2333{
2334        return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2335}
2336
2337static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2338{
2339        return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2340}
2341
2342static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2343{
2344        int page_shift = (qpc->log_page_size & 0x3f) + 12;
2345        int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2346        int log_sq_sride = qpc->sq_size_stride & 7;
2347        int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2348        int log_rq_stride = qpc->rq_size_stride & 7;
2349        int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2350        int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2351        u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2352        int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2353        int sq_size;
2354        int rq_size;
2355        int total_pages;
2356        int total_mem;
2357        int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2358
2359        sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2360        rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2361        total_mem = sq_size + rq_size;
2362        total_pages =
2363                roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2364                                   page_shift);
2365
2366        return total_pages;
2367}
2368
2369static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2370                           int size, struct res_mtt *mtt)
2371{
2372        int res_start = mtt->com.res_id;
2373        int res_size = (1 << mtt->order);
2374
2375        if (start < res_start || start + size > res_start + res_size)
2376                return -EPERM;
2377        return 0;
2378}
2379
2380int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2381                           struct mlx4_vhcr *vhcr,
2382                           struct mlx4_cmd_mailbox *inbox,
2383                           struct mlx4_cmd_mailbox *outbox,
2384                           struct mlx4_cmd_info *cmd)
2385{
2386        int err;
2387        int index = vhcr->in_modifier;
2388        struct res_mtt *mtt;
2389        struct res_mpt *mpt;
2390        int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2391        int phys;
2392        int id;
2393        u32 pd;
2394        int pd_slave;
2395
2396        id = index & mpt_mask(dev);
2397        err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2398        if (err)
2399                return err;
2400
2401        /* Disable memory windows for VFs. */
2402        if (!mr_is_region(inbox->buf)) {
2403                err = -EPERM;
2404                goto ex_abort;
2405        }
2406
2407        /* Make sure that the PD bits related to the slave id are zeros. */
2408        pd = mr_get_pd(inbox->buf);
2409        pd_slave = (pd >> 17) & 0x7f;
2410        if (pd_slave != 0 && pd_slave != slave) {
2411                err = -EPERM;
2412                goto ex_abort;
2413        }
2414
2415        if (mr_is_fmr(inbox->buf)) {
2416                /* FMR and Bind Enable are forbidden in slave devices. */
2417                if (mr_is_bind_enabled(inbox->buf)) {
2418                        err = -EPERM;
2419                        goto ex_abort;
2420                }
2421                /* FMR and Memory Windows are also forbidden. */
2422                if (!mr_is_region(inbox->buf)) {
2423                        err = -EPERM;
2424                        goto ex_abort;
2425                }
2426        }
2427
2428        phys = mr_phys_mpt(inbox->buf);
2429        if (!phys) {
2430                err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2431                if (err)
2432                        goto ex_abort;
2433
2434                err = check_mtt_range(dev, slave, mtt_base,
2435                                      mr_get_mtt_size(inbox->buf), mtt);
2436                if (err)
2437                        goto ex_put;
2438
2439                mpt->mtt = mtt;
2440        }
2441
2442        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2443        if (err)
2444                goto ex_put;
2445
2446        if (!phys) {
2447                atomic_inc(&mtt->ref_count);
2448                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2449        }
2450
2451        res_end_move(dev, slave, RES_MPT, id);
2452        return 0;
2453
2454ex_put:
2455        if (!phys)
2456                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2457ex_abort:
2458        res_abort_move(dev, slave, RES_MPT, id);
2459
2460        return err;
2461}
2462
2463int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2464                           struct mlx4_vhcr *vhcr,
2465                           struct mlx4_cmd_mailbox *inbox,
2466                           struct mlx4_cmd_mailbox *outbox,
2467                           struct mlx4_cmd_info *cmd)
2468{
2469        int err;
2470        int index = vhcr->in_modifier;
2471        struct res_mpt *mpt;
2472        int id;
2473
2474        id = index & mpt_mask(dev);
2475        err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2476        if (err)
2477                return err;
2478
2479        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2480        if (err)
2481                goto ex_abort;
2482
2483        if (mpt->mtt)
2484                atomic_dec(&mpt->mtt->ref_count);
2485
2486        res_end_move(dev, slave, RES_MPT, id);
2487        return 0;
2488
2489ex_abort:
2490        res_abort_move(dev, slave, RES_MPT, id);
2491
2492        return err;
2493}
2494
2495int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2496                           struct mlx4_vhcr *vhcr,
2497                           struct mlx4_cmd_mailbox *inbox,
2498                           struct mlx4_cmd_mailbox *outbox,
2499                           struct mlx4_cmd_info *cmd)
2500{
2501        int err;
2502        int index = vhcr->in_modifier;
2503        struct res_mpt *mpt;
2504        int id;
2505
2506        id = index & mpt_mask(dev);
2507        err = get_res(dev, slave, id, RES_MPT, &mpt);
2508        if (err)
2509                return err;
2510
2511        if (mpt->com.from_state != RES_MPT_HW) {
2512                err = -EBUSY;
2513                goto out;
2514        }
2515
2516        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2517
2518out:
2519        put_res(dev, slave, id, RES_MPT);
2520        return err;
2521}
2522
2523static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2524{
2525        return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2526}
2527
2528static int qp_get_scqn(struct mlx4_qp_context *qpc)
2529{
2530        return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2531}
2532
2533static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2534{
2535        return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2536}
2537
2538static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2539                                  struct mlx4_qp_context *context)
2540{
2541        u32 qpn = vhcr->in_modifier & 0xffffff;
2542        u32 qkey = 0;
2543
2544        if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2545                return;
2546
2547        /* adjust qkey in qp context */
2548        context->qkey = cpu_to_be32(qkey);
2549}
2550
2551int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2552                             struct mlx4_vhcr *vhcr,
2553                             struct mlx4_cmd_mailbox *inbox,
2554                             struct mlx4_cmd_mailbox *outbox,
2555                             struct mlx4_cmd_info *cmd)
2556{
2557        int err;
2558        int qpn = vhcr->in_modifier & 0x7fffff;
2559        struct res_mtt *mtt;
2560        struct res_qp *qp;
2561        struct mlx4_qp_context *qpc = inbox->buf + 8;
2562        int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2563        int mtt_size = qp_get_mtt_size(qpc);
2564        struct res_cq *rcq;
2565        struct res_cq *scq;
2566        int rcqn = qp_get_rcqn(qpc);
2567        int scqn = qp_get_scqn(qpc);
2568        u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2569        int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2570        struct res_srq *srq;
2571        int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2572
2573        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2574        if (err)
2575                return err;
2576        qp->local_qpn = local_qpn;
2577        qp->sched_queue = 0;
2578        qp->param3 = 0;
2579        qp->vlan_control = 0;
2580        qp->fvl_rx = 0;
2581        qp->pri_path_fl = 0;
2582        qp->vlan_index = 0;
2583        qp->feup = 0;
2584        qp->qpc_flags = be32_to_cpu(qpc->flags);
2585
2586        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2587        if (err)
2588                goto ex_abort;
2589
2590        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2591        if (err)
2592                goto ex_put_mtt;
2593
2594        err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2595        if (err)
2596                goto ex_put_mtt;
2597
2598        if (scqn != rcqn) {
2599                err = get_res(dev, slave, scqn, RES_CQ, &scq);
2600                if (err)
2601                        goto ex_put_rcq;
2602        } else
2603                scq = rcq;
2604
2605        if (use_srq) {
2606                err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2607                if (err)
2608                        goto ex_put_scq;
2609        }
2610
2611        adjust_proxy_tun_qkey(dev, vhcr, qpc);
2612        update_pkey_index(dev, slave, inbox);
2613        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2614        if (err)
2615                goto ex_put_srq;
2616        atomic_inc(&mtt->ref_count);
2617        qp->mtt = mtt;
2618        atomic_inc(&rcq->ref_count);
2619        qp->rcq = rcq;
2620        atomic_inc(&scq->ref_count);
2621        qp->scq = scq;
2622
2623        if (scqn != rcqn)
2624                put_res(dev, slave, scqn, RES_CQ);
2625
2626        if (use_srq) {
2627                atomic_inc(&srq->ref_count);
2628                put_res(dev, slave, srqn, RES_SRQ);
2629                qp->srq = srq;
2630        }
2631        put_res(dev, slave, rcqn, RES_CQ);
2632        put_res(dev, slave, mtt_base, RES_MTT);
2633        res_end_move(dev, slave, RES_QP, qpn);
2634
2635        return 0;
2636
2637ex_put_srq:
2638        if (use_srq)
2639                put_res(dev, slave, srqn, RES_SRQ);
2640ex_put_scq:
2641        if (scqn != rcqn)
2642                put_res(dev, slave, scqn, RES_CQ);
2643ex_put_rcq:
2644        put_res(dev, slave, rcqn, RES_CQ);
2645ex_put_mtt:
2646        put_res(dev, slave, mtt_base, RES_MTT);
2647ex_abort:
2648        res_abort_move(dev, slave, RES_QP, qpn);
2649
2650        return err;
2651}
2652
2653static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2654{
2655        return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2656}
2657
2658static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2659{
2660        int log_eq_size = eqc->log_eq_size & 0x1f;
2661        int page_shift = (eqc->log_page_size & 0x3f) + 12;
2662
2663        if (log_eq_size + 5 < page_shift)
2664                return 1;
2665
2666        return 1 << (log_eq_size + 5 - page_shift);
2667}
2668
2669static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2670{
2671        return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2672}
2673
2674static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2675{
2676        int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2677        int page_shift = (cqc->log_page_size & 0x3f) + 12;
2678
2679        if (log_cq_size + 5 < page_shift)
2680                return 1;
2681
2682        return 1 << (log_cq_size + 5 - page_shift);
2683}
2684
2685int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2686                          struct mlx4_vhcr *vhcr,
2687                          struct mlx4_cmd_mailbox *inbox,
2688                          struct mlx4_cmd_mailbox *outbox,
2689                          struct mlx4_cmd_info *cmd)
2690{
2691        int err;
2692        int eqn = vhcr->in_modifier;
2693        int res_id = (slave << 8) | eqn;
2694        struct mlx4_eq_context *eqc = inbox->buf;
2695        int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2696        int mtt_size = eq_get_mtt_size(eqc);
2697        struct res_eq *eq;
2698        struct res_mtt *mtt;
2699
2700        err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2701        if (err)
2702                return err;
2703        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2704        if (err)
2705                goto out_add;
2706
2707        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2708        if (err)
2709                goto out_move;
2710
2711        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2712        if (err)
2713                goto out_put;
2714
2715        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2716        if (err)
2717                goto out_put;
2718
2719        atomic_inc(&mtt->ref_count);
2720        eq->mtt = mtt;
2721        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2722        res_end_move(dev, slave, RES_EQ, res_id);
2723        return 0;
2724
2725out_put:
2726        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2727out_move:
2728        res_abort_move(dev, slave, RES_EQ, res_id);
2729out_add:
2730        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2731        return err;
2732}
2733
2734static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2735                              int len, struct res_mtt **res)
2736{
2737        struct mlx4_priv *priv = mlx4_priv(dev);
2738        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2739        struct res_mtt *mtt;
2740        int err = -EINVAL;
2741
2742        spin_lock_irq(mlx4_tlock(dev));
2743        list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2744                            com.list) {
2745                if (!check_mtt_range(dev, slave, start, len, mtt)) {
2746                        *res = mtt;
2747                        mtt->com.from_state = mtt->com.state;
2748                        mtt->com.state = RES_MTT_BUSY;
2749                        err = 0;
2750                        break;
2751                }
2752        }
2753        spin_unlock_irq(mlx4_tlock(dev));
2754
2755        return err;
2756}
2757
2758static int verify_qp_parameters(struct mlx4_dev *dev,
2759                                struct mlx4_cmd_mailbox *inbox,
2760                                enum qp_transition transition, u8 slave)
2761{
2762        u32                     qp_type;
2763        struct mlx4_qp_context  *qp_ctx;
2764        enum mlx4_qp_optpar     optpar;
2765
2766        qp_ctx  = inbox->buf + 8;
2767        qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2768        optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2769
2770        switch (qp_type) {
2771        case MLX4_QP_ST_RC:
2772        case MLX4_QP_ST_UC:
2773                switch (transition) {
2774                case QP_TRANS_INIT2RTR:
2775                case QP_TRANS_RTR2RTS:
2776                case QP_TRANS_RTS2RTS:
2777                case QP_TRANS_SQD2SQD:
2778                case QP_TRANS_SQD2RTS:
2779                        if (slave != mlx4_master_func_num(dev))
2780                                /* slaves have only gid index 0 */
2781                                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2782                                        if (qp_ctx->pri_path.mgid_index)
2783                                                return -EINVAL;
2784                                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2785                                        if (qp_ctx->alt_path.mgid_index)
2786                                                return -EINVAL;
2787                        break;
2788                default:
2789                        break;
2790                }
2791
2792                break;
2793        default:
2794                break;
2795        }
2796
2797        return 0;
2798}
2799
2800int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2801                           struct mlx4_vhcr *vhcr,
2802                           struct mlx4_cmd_mailbox *inbox,
2803                           struct mlx4_cmd_mailbox *outbox,
2804                           struct mlx4_cmd_info *cmd)
2805{
2806        struct mlx4_mtt mtt;
2807        __be64 *page_list = inbox->buf;
2808        u64 *pg_list = (u64 *)page_list;
2809        int i;
2810        struct res_mtt *rmtt = NULL;
2811        int start = be64_to_cpu(page_list[0]);
2812        int npages = vhcr->in_modifier;
2813        int err;
2814
2815        err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2816        if (err)
2817                return err;
2818
2819        /* Call the SW implementation of write_mtt:
2820         * - Prepare a dummy mtt struct
2821         * - Translate inbox contents to simple addresses in host endianess */
2822        mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2823                            we don't really use it */
2824        mtt.order = 0;
2825        mtt.page_shift = 0;
2826        for (i = 0; i < npages; ++i)
2827                pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2828
2829        err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2830                               ((u64 *)page_list + 2));
2831
2832        if (rmtt)
2833                put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2834
2835        return err;
2836}
2837
2838int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2839                          struct mlx4_vhcr *vhcr,
2840                          struct mlx4_cmd_mailbox *inbox,
2841                          struct mlx4_cmd_mailbox *outbox,
2842                          struct mlx4_cmd_info *cmd)
2843{
2844        int eqn = vhcr->in_modifier;
2845        int res_id = eqn | (slave << 8);
2846        struct res_eq *eq;
2847        int err;
2848
2849        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2850        if (err)
2851                return err;
2852
2853        err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2854        if (err)
2855                goto ex_abort;
2856
2857        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2858        if (err)
2859                goto ex_put;
2860
2861        atomic_dec(&eq->mtt->ref_count);
2862        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2863        res_end_move(dev, slave, RES_EQ, res_id);
2864        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2865
2866        return 0;
2867
2868ex_put:
2869        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2870ex_abort:
2871        res_abort_move(dev, slave, RES_EQ, res_id);
2872
2873        return err;
2874}
2875
2876int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2877{
2878        struct mlx4_priv *priv = mlx4_priv(dev);
2879        struct mlx4_slave_event_eq_info *event_eq;
2880        struct mlx4_cmd_mailbox *mailbox;
2881        u32 in_modifier = 0;
2882        int err;
2883        int res_id;
2884        struct res_eq *req;
2885
2886        if (!priv->mfunc.master.slave_state)
2887                return -EINVAL;
2888
2889        event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2890
2891        /* Create the event only if the slave is registered */
2892        if (event_eq->eqn < 0)
2893                return 0;
2894
2895        mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2896        res_id = (slave << 8) | event_eq->eqn;
2897        err = get_res(dev, slave, res_id, RES_EQ, &req);
2898        if (err)
2899                goto unlock;
2900
2901        if (req->com.from_state != RES_EQ_HW) {
2902                err = -EINVAL;
2903                goto put;
2904        }
2905
2906        mailbox = mlx4_alloc_cmd_mailbox(dev);
2907        if (IS_ERR(mailbox)) {
2908                err = PTR_ERR(mailbox);
2909                goto put;
2910        }
2911
2912        if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2913                ++event_eq->token;
2914                eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2915        }
2916
2917        memcpy(mailbox->buf, (u8 *) eqe, 28);
2918
2919        in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2920
2921        err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2922                       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2923                       MLX4_CMD_NATIVE);
2924
2925        put_res(dev, slave, res_id, RES_EQ);
2926        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2927        mlx4_free_cmd_mailbox(dev, mailbox);
2928        return err;
2929
2930put:
2931        put_res(dev, slave, res_id, RES_EQ);
2932
2933unlock:
2934        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2935        return err;
2936}
2937
2938int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2939                          struct mlx4_vhcr *vhcr,
2940                          struct mlx4_cmd_mailbox *inbox,
2941                          struct mlx4_cmd_mailbox *outbox,
2942                          struct mlx4_cmd_info *cmd)
2943{
2944        int eqn = vhcr->in_modifier;
2945        int res_id = eqn | (slave << 8);
2946        struct res_eq *eq;
2947        int err;
2948
2949        err = get_res(dev, slave, res_id, RES_EQ, &eq);
2950        if (err)
2951                return err;
2952
2953        if (eq->com.from_state != RES_EQ_HW) {
2954                err = -EINVAL;
2955                goto ex_put;
2956        }
2957
2958        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2959
2960ex_put:
2961        put_res(dev, slave, res_id, RES_EQ);
2962        return err;
2963}
2964
2965int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2966                          struct mlx4_vhcr *vhcr,
2967                          struct mlx4_cmd_mailbox *inbox,
2968                          struct mlx4_cmd_mailbox *outbox,
2969                          struct mlx4_cmd_info *cmd)
2970{
2971        int err;
2972        int cqn = vhcr->in_modifier;
2973        struct mlx4_cq_context *cqc = inbox->buf;
2974        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2975        struct res_cq *cq;
2976        struct res_mtt *mtt;
2977
2978        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2979        if (err)
2980                return err;
2981        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2982        if (err)
2983                goto out_move;
2984        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2985        if (err)
2986                goto out_put;
2987        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2988        if (err)
2989                goto out_put;
2990        atomic_inc(&mtt->ref_count);
2991        cq->mtt = mtt;
2992        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2993        res_end_move(dev, slave, RES_CQ, cqn);
2994        return 0;
2995
2996out_put:
2997        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2998out_move:
2999        res_abort_move(dev, slave, RES_CQ, cqn);
3000        return err;
3001}
3002
3003int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3004                          struct mlx4_vhcr *vhcr,
3005                          struct mlx4_cmd_mailbox *inbox,
3006                          struct mlx4_cmd_mailbox *outbox,
3007                          struct mlx4_cmd_info *cmd)
3008{
3009        int err;
3010        int cqn = vhcr->in_modifier;
3011        struct res_cq *cq;
3012
3013        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3014        if (err)
3015                return err;
3016        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3017        if (err)
3018                goto out_move;
3019        atomic_dec(&cq->mtt->ref_count);
3020        res_end_move(dev, slave, RES_CQ, cqn);
3021        return 0;
3022
3023out_move:
3024        res_abort_move(dev, slave, RES_CQ, cqn);
3025        return err;
3026}
3027
3028int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3029                          struct mlx4_vhcr *vhcr,
3030                          struct mlx4_cmd_mailbox *inbox,
3031                          struct mlx4_cmd_mailbox *outbox,
3032                          struct mlx4_cmd_info *cmd)
3033{
3034        int cqn = vhcr->in_modifier;
3035        struct res_cq *cq;
3036        int err;
3037
3038        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3039        if (err)
3040                return err;
3041
3042        if (cq->com.from_state != RES_CQ_HW)
3043                goto ex_put;
3044
3045        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3046ex_put:
3047        put_res(dev, slave, cqn, RES_CQ);
3048
3049        return err;
3050}
3051
3052static int handle_resize(struct mlx4_dev *dev, int slave,
3053                         struct mlx4_vhcr *vhcr,
3054                         struct mlx4_cmd_mailbox *inbox,
3055                         struct mlx4_cmd_mailbox *outbox,
3056                         struct mlx4_cmd_info *cmd,
3057                         struct res_cq *cq)
3058{
3059        int err;
3060        struct res_mtt *orig_mtt;
3061        struct res_mtt *mtt;
3062        struct mlx4_cq_context *cqc = inbox->buf;
3063        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3064
3065        err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3066        if (err)
3067                return err;
3068
3069        if (orig_mtt != cq->mtt) {
3070                err = -EINVAL;
3071                goto ex_put;
3072        }
3073
3074        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3075        if (err)
3076                goto ex_put;
3077
3078        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3079        if (err)
3080                goto ex_put1;
3081        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3082        if (err)
3083                goto ex_put1;
3084        atomic_dec(&orig_mtt->ref_count);
3085        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3086        atomic_inc(&mtt->ref_count);
3087        cq->mtt = mtt;
3088        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3089        return 0;
3090
3091ex_put1:
3092        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3093ex_put:
3094        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3095
3096        return err;
3097
3098}
3099
3100int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3101                           struct mlx4_vhcr *vhcr,
3102                           struct mlx4_cmd_mailbox *inbox,
3103                           struct mlx4_cmd_mailbox *outbox,
3104                           struct mlx4_cmd_info *cmd)
3105{
3106        int cqn = vhcr->in_modifier;
3107        struct res_cq *cq;
3108        int err;
3109
3110        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3111        if (err)
3112                return err;
3113
3114        if (cq->com.from_state != RES_CQ_HW)
3115                goto ex_put;
3116
3117        if (vhcr->op_modifier == 0) {
3118                err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3119                goto ex_put;
3120        }
3121
3122        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3123ex_put:
3124        put_res(dev, slave, cqn, RES_CQ);
3125
3126        return err;
3127}
3128
3129static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3130{
3131        int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3132        int log_rq_stride = srqc->logstride & 7;
3133        int page_shift = (srqc->log_page_size & 0x3f) + 12;
3134
3135        if (log_srq_size + log_rq_stride + 4 < page_shift)
3136                return 1;
3137
3138        return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3139}
3140
3141int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3142                           struct mlx4_vhcr *vhcr,
3143                           struct mlx4_cmd_mailbox *inbox,
3144                           struct mlx4_cmd_mailbox *outbox,
3145                           struct mlx4_cmd_info *cmd)
3146{
3147        int err;
3148        int srqn = vhcr->in_modifier;
3149        struct res_mtt *mtt;
3150        struct res_srq *srq;
3151        struct mlx4_srq_context *srqc = inbox->buf;
3152        int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3153
3154        if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3155                return -EINVAL;
3156
3157        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3158        if (err)
3159                return err;
3160        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3161        if (err)
3162                goto ex_abort;
3163        err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3164                              mtt);
3165        if (err)
3166                goto ex_put_mtt;
3167
3168        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3169        if (err)
3170                goto ex_put_mtt;
3171
3172        atomic_inc(&mtt->ref_count);
3173        srq->mtt = mtt;
3174        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3175        res_end_move(dev, slave, RES_SRQ, srqn);
3176        return 0;
3177
3178ex_put_mtt:
3179        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3180ex_abort:
3181        res_abort_move(dev, slave, RES_SRQ, srqn);
3182
3183        return err;
3184}
3185
3186int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3187                           struct mlx4_vhcr *vhcr,
3188                           struct mlx4_cmd_mailbox *inbox,
3189                           struct mlx4_cmd_mailbox *outbox,
3190                           struct mlx4_cmd_info *cmd)
3191{
3192        int err;
3193        int srqn = vhcr->in_modifier;
3194        struct res_srq *srq;
3195
3196        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3197        if (err)
3198                return err;
3199        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3200        if (err)
3201                goto ex_abort;
3202        atomic_dec(&srq->mtt->ref_count);
3203        if (srq->cq)
3204                atomic_dec(&srq->cq->ref_count);
3205        res_end_move(dev, slave, RES_SRQ, srqn);
3206
3207        return 0;
3208
3209ex_abort:
3210        res_abort_move(dev, slave, RES_SRQ, srqn);
3211
3212        return err;
3213}
3214
3215int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3216                           struct mlx4_vhcr *vhcr,
3217                           struct mlx4_cmd_mailbox *inbox,
3218                           struct mlx4_cmd_mailbox *outbox,
3219                           struct mlx4_cmd_info *cmd)
3220{
3221        int err;
3222        int srqn = vhcr->in_modifier;
3223        struct res_srq *srq;
3224
3225        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3226        if (err)
3227                return err;
3228        if (srq->com.from_state != RES_SRQ_HW) {
3229                err = -EBUSY;
3230                goto out;
3231        }
3232        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3233out:
3234        put_res(dev, slave, srqn, RES_SRQ);
3235        return err;
3236}
3237
3238int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3239                         struct mlx4_vhcr *vhcr,
3240                         struct mlx4_cmd_mailbox *inbox,
3241                         struct mlx4_cmd_mailbox *outbox,
3242                         struct mlx4_cmd_info *cmd)
3243{
3244        int err;
3245        int srqn = vhcr->in_modifier;
3246        struct res_srq *srq;
3247
3248        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3249        if (err)
3250                return err;
3251
3252        if (srq->com.from_state != RES_SRQ_HW) {
3253                err = -EBUSY;
3254                goto out;
3255        }
3256
3257        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3258out:
3259        put_res(dev, slave, srqn, RES_SRQ);
3260        return err;
3261}
3262
3263int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3264                        struct mlx4_vhcr *vhcr,
3265                        struct mlx4_cmd_mailbox *inbox,
3266                        struct mlx4_cmd_mailbox *outbox,
3267                        struct mlx4_cmd_info *cmd)
3268{
3269        int err;
3270        int qpn = vhcr->in_modifier & 0x7fffff;
3271        struct res_qp *qp;
3272
3273        err = get_res(dev, slave, qpn, RES_QP, &qp);
3274        if (err)
3275                return err;
3276        if (qp->com.from_state != RES_QP_HW) {
3277                err = -EBUSY;
3278                goto out;
3279        }
3280
3281        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3282out:
3283        put_res(dev, slave, qpn, RES_QP);
3284        return err;
3285}
3286
3287int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3288                              struct mlx4_vhcr *vhcr,
3289                              struct mlx4_cmd_mailbox *inbox,
3290                              struct mlx4_cmd_mailbox *outbox,
3291                              struct mlx4_cmd_info *cmd)
3292{
3293        struct mlx4_qp_context *context = inbox->buf + 8;
3294        adjust_proxy_tun_qkey(dev, vhcr, context);
3295        update_pkey_index(dev, slave, inbox);
3296        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3297}
3298
3299int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3300                             struct mlx4_vhcr *vhcr,
3301                             struct mlx4_cmd_mailbox *inbox,
3302                             struct mlx4_cmd_mailbox *outbox,
3303                             struct mlx4_cmd_info *cmd)
3304{
3305        int err;
3306        struct mlx4_qp_context *qpc = inbox->buf + 8;
3307        int qpn = vhcr->in_modifier & 0x7fffff;
3308        struct res_qp *qp;
3309        u8 orig_sched_queue;
3310        __be32  orig_param3 = qpc->param3;
3311        u8 orig_vlan_control = qpc->pri_path.vlan_control;
3312        u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3313        u8 orig_pri_path_fl = qpc->pri_path.fl;
3314        u8 orig_vlan_index = qpc->pri_path.vlan_index;
3315        u8 orig_feup = qpc->pri_path.feup;
3316
3317        err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3318        if (err)
3319                return err;
3320
3321        update_pkey_index(dev, slave, inbox);
3322        update_gid(dev, inbox, (u8)slave);
3323        adjust_proxy_tun_qkey(dev, vhcr, qpc);
3324        orig_sched_queue = qpc->pri_path.sched_queue;
3325        err = update_vport_qp_param(dev, inbox, slave, qpn);
3326        if (err)
3327                return err;
3328
3329        err = get_res(dev, slave, qpn, RES_QP, &qp);
3330        if (err)
3331                return err;
3332        if (qp->com.from_state != RES_QP_HW) {
3333                err = -EBUSY;
3334                goto out;
3335        }
3336
3337        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3338out:
3339        /* if no error, save sched queue value passed in by VF. This is
3340         * essentially the QOS value provided by the VF. This will be useful
3341         * if we allow dynamic changes from VST back to VGT
3342         */
3343        if (!err) {
3344                qp->sched_queue = orig_sched_queue;
3345                qp->param3      = orig_param3;
3346                qp->vlan_control = orig_vlan_control;
3347                qp->fvl_rx      =  orig_fvl_rx;
3348                qp->pri_path_fl = orig_pri_path_fl;
3349                qp->vlan_index  = orig_vlan_index;
3350                qp->feup        = orig_feup;
3351        }
3352        put_res(dev, slave, qpn, RES_QP);
3353        return err;
3354}
3355
3356int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3357                            struct mlx4_vhcr *vhcr,
3358                            struct mlx4_cmd_mailbox *inbox,
3359                            struct mlx4_cmd_mailbox *outbox,
3360                            struct mlx4_cmd_info *cmd)
3361{
3362        int err;
3363        struct mlx4_qp_context *context = inbox->buf + 8;
3364
3365        err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3366        if (err)
3367                return err;
3368
3369        update_pkey_index(dev, slave, inbox);
3370        update_gid(dev, inbox, (u8)slave);
3371        adjust_proxy_tun_qkey(dev, vhcr, context);
3372        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3373}
3374
3375int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3376                            struct mlx4_vhcr *vhcr,
3377                            struct mlx4_cmd_mailbox *inbox,
3378                            struct mlx4_cmd_mailbox *outbox,
3379                            struct mlx4_cmd_info *cmd)
3380{
3381        int err;
3382        struct mlx4_qp_context *context = inbox->buf + 8;
3383
3384        err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3385        if (err)
3386                return err;
3387
3388        update_pkey_index(dev, slave, inbox);
3389        update_gid(dev, inbox, (u8)slave);
3390        adjust_proxy_tun_qkey(dev, vhcr, context);
3391        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3392}
3393
3394
3395int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3396                              struct mlx4_vhcr *vhcr,
3397                              struct mlx4_cmd_mailbox *inbox,
3398                              struct mlx4_cmd_mailbox *outbox,
3399                              struct mlx4_cmd_info *cmd)
3400{
3401        struct mlx4_qp_context *context = inbox->buf + 8;
3402        adjust_proxy_tun_qkey(dev, vhcr, context);
3403        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3404}
3405
3406int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3407                            struct mlx4_vhcr *vhcr,
3408                            struct mlx4_cmd_mailbox *inbox,
3409                            struct mlx4_cmd_mailbox *outbox,
3410                            struct mlx4_cmd_info *cmd)
3411{
3412        int err;
3413        struct mlx4_qp_context *context = inbox->buf + 8;
3414
3415        err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3416        if (err)
3417                return err;
3418
3419        adjust_proxy_tun_qkey(dev, vhcr, context);
3420        update_gid(dev, inbox, (u8)slave);
3421        update_pkey_index(dev, slave, inbox);
3422        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3423}
3424
3425int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3426                            struct mlx4_vhcr *vhcr,
3427                            struct mlx4_cmd_mailbox *inbox,
3428                            struct mlx4_cmd_mailbox *outbox,
3429                            struct mlx4_cmd_info *cmd)
3430{
3431        int err;
3432        struct mlx4_qp_context *context = inbox->buf + 8;
3433
3434        err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3435        if (err)
3436                return err;
3437
3438        adjust_proxy_tun_qkey(dev, vhcr, context);
3439        update_gid(dev, inbox, (u8)slave);
3440        update_pkey_index(dev, slave, inbox);
3441        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3442}
3443
3444int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3445                         struct mlx4_vhcr *vhcr,
3446                         struct mlx4_cmd_mailbox *inbox,
3447                         struct mlx4_cmd_mailbox *outbox,
3448                         struct mlx4_cmd_info *cmd)
3449{
3450        int err;
3451        int qpn = vhcr->in_modifier & 0x7fffff;
3452        struct res_qp *qp;
3453
3454        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3455        if (err)
3456                return err;
3457        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3458        if (err)
3459                goto ex_abort;
3460
3461        atomic_dec(&qp->mtt->ref_count);
3462        atomic_dec(&qp->rcq->ref_count);
3463        atomic_dec(&qp->scq->ref_count);
3464        if (qp->srq)
3465                atomic_dec(&qp->srq->ref_count);
3466        res_end_move(dev, slave, RES_QP, qpn);
3467        return 0;
3468
3469ex_abort:
3470        res_abort_move(dev, slave, RES_QP, qpn);
3471
3472        return err;
3473}
3474
3475static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3476                                struct res_qp *rqp, u8 *gid)
3477{
3478        struct res_gid *res;
3479
3480        list_for_each_entry(res, &rqp->mcg_list, list) {
3481                if (!memcmp(res->gid, gid, 16))
3482                        return res;
3483        }
3484        return NULL;
3485}
3486
3487static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3488                       u8 *gid, enum mlx4_protocol prot,
3489                       enum mlx4_steer_type steer, u64 reg_id)
3490{
3491        struct res_gid *res;
3492        int err;
3493
3494        res = kzalloc(sizeof *res, GFP_KERNEL);
3495        if (!res)
3496                return -ENOMEM;
3497
3498        spin_lock_irq(&rqp->mcg_spl);
3499        if (find_gid(dev, slave, rqp, gid)) {
3500                kfree(res);
3501                err = -EEXIST;
3502        } else {
3503                memcpy(res->gid, gid, 16);
3504                res->prot = prot;
3505                res->steer = steer;
3506                res->reg_id = reg_id;
3507                list_add_tail(&res->list, &rqp->mcg_list);
3508                err = 0;
3509        }
3510        spin_unlock_irq(&rqp->mcg_spl);
3511
3512        return err;
3513}
3514
3515static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3516                       u8 *gid, enum mlx4_protocol prot,
3517                       enum mlx4_steer_type steer, u64 *reg_id)
3518{
3519        struct res_gid *res;
3520        int err;
3521
3522        spin_lock_irq(&rqp->mcg_spl);
3523        res = find_gid(dev, slave, rqp, gid);
3524        if (!res || res->prot != prot || res->steer != steer)
3525                err = -EINVAL;
3526        else {
3527                *reg_id = res->reg_id;
3528                list_del(&res->list);
3529                kfree(res);
3530                err = 0;
3531        }
3532        spin_unlock_irq(&rqp->mcg_spl);
3533
3534        return err;
3535}
3536
3537static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3538                     int block_loopback, enum mlx4_protocol prot,
3539                     enum mlx4_steer_type type, u64 *reg_id)
3540{
3541        switch (dev->caps.steering_mode) {
3542        case MLX4_STEERING_MODE_DEVICE_MANAGED:
3543                return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3544                                                block_loopback, prot,
3545                                                reg_id);
3546        case MLX4_STEERING_MODE_B0:
3547                return mlx4_qp_attach_common(dev, qp, gid,
3548                                            block_loopback, prot, type);
3549        default:
3550                return -EINVAL;
3551        }
3552}
3553
3554static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3555                     enum mlx4_protocol prot, enum mlx4_steer_type type,
3556                     u64 reg_id)
3557{
3558        switch (dev->caps.steering_mode) {
3559        case MLX4_STEERING_MODE_DEVICE_MANAGED:
3560                return mlx4_flow_detach(dev, reg_id);
3561        case MLX4_STEERING_MODE_B0:
3562                return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3563        default:
3564                return -EINVAL;
3565        }
3566}
3567
3568int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3569                               struct mlx4_vhcr *vhcr,
3570                               struct mlx4_cmd_mailbox *inbox,
3571                               struct mlx4_cmd_mailbox *outbox,
3572                               struct mlx4_cmd_info *cmd)
3573{
3574        struct mlx4_qp qp; /* dummy for calling attach/detach */
3575        u8 *gid = inbox->buf;
3576        enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3577        int err;
3578        int qpn;
3579        struct res_qp *rqp;
3580        u64 reg_id = 0;
3581        int attach = vhcr->op_modifier;
3582        int block_loopback = vhcr->in_modifier >> 31;
3583        u8 steer_type_mask = 2;
3584        enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3585
3586        qpn = vhcr->in_modifier & 0xffffff;
3587        err = get_res(dev, slave, qpn, RES_QP, &rqp);
3588        if (err)
3589                return err;
3590
3591        qp.qpn = qpn;
3592        if (attach) {
3593                err = qp_attach(dev, &qp, gid, block_loopback, prot,
3594                                type, &reg_id);
3595                if (err) {
3596                        pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3597                        goto ex_put;
3598                }
3599                err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3600                if (err)
3601                        goto ex_detach;
3602        } else {
3603                err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3604                if (err)
3605                        goto ex_put;
3606
3607                err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3608                if (err)
3609                        pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3610                               qpn, reg_id);
3611        }
3612        put_res(dev, slave, qpn, RES_QP);
3613        return err;
3614
3615ex_detach:
3616        qp_detach(dev, &qp, gid, prot, type, reg_id);
3617ex_put:
3618        put_res(dev, slave, qpn, RES_QP);
3619        return err;
3620}
3621
3622/*
3623 * MAC validation for Flow Steering rules.
3624 * VF can attach rules only with a mac address which is assigned to it.
3625 */
3626static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3627                                   struct list_head *rlist)
3628{
3629        struct mac_res *res, *tmp;
3630        __be64 be_mac;
3631
3632        /* make sure it isn't multicast or broadcast mac*/
3633        if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3634            !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3635                list_for_each_entry_safe(res, tmp, rlist, list) {
3636                        be_mac = cpu_to_be64(res->mac << 16);
3637                        if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3638                                return 0;
3639                }
3640                pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3641                       eth_header->eth.dst_mac, slave);
3642                return -EINVAL;
3643        }
3644        return 0;
3645}
3646
3647/*
3648 * In case of missing eth header, append eth header with a MAC address
3649 * assigned to the VF.
3650 */
3651static int add_eth_header(struct mlx4_dev *dev, int slave,
3652                          struct mlx4_cmd_mailbox *inbox,
3653                          struct list_head *rlist, int header_id)
3654{
3655        struct mac_res *res, *tmp;
3656        u8 port;
3657        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3658        struct mlx4_net_trans_rule_hw_eth *eth_header;
3659        struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3660        struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3661        __be64 be_mac = 0;
3662        __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3663
3664        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3665        port = ctrl->port;
3666        eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3667
3668        /* Clear a space in the inbox for eth header */
3669        switch (header_id) {
3670        case MLX4_NET_TRANS_RULE_ID_IPV4:
3671                ip_header =
3672                        (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3673                memmove(ip_header, eth_header,
3674                        sizeof(*ip_header) + sizeof(*l4_header));
3675                break;
3676        case MLX4_NET_TRANS_RULE_ID_TCP:
3677        case MLX4_NET_TRANS_RULE_ID_UDP:
3678                l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3679                            (eth_header + 1);
3680                memmove(l4_header, eth_header, sizeof(*l4_header));
3681                break;
3682        default:
3683                return -EINVAL;
3684        }
3685        list_for_each_entry_safe(res, tmp, rlist, list) {
3686                if (port == res->port) {
3687                        be_mac = cpu_to_be64(res->mac << 16);
3688                        break;
3689                }
3690        }
3691        if (!be_mac) {
3692                pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3693                       port);
3694                return -EINVAL;
3695        }
3696
3697        memset(eth_header, 0, sizeof(*eth_header));
3698        eth_header->size = sizeof(*eth_header) >> 2;
3699        eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3700        memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3701        memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3702
3703        return 0;
3704
3705}
3706
3707int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3708                                         struct mlx4_vhcr *vhcr,
3709                                         struct mlx4_cmd_mailbox *inbox,
3710                                         struct mlx4_cmd_mailbox *outbox,
3711                                         struct mlx4_cmd_info *cmd)
3712{
3713
3714        struct mlx4_priv *priv = mlx4_priv(dev);
3715        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3716        struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3717        int err;
3718        int qpn;
3719        struct res_qp *rqp;
3720        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3721        struct _rule_hw  *rule_header;
3722        int header_id;
3723
3724        if (dev->caps.steering_mode !=
3725            MLX4_STEERING_MODE_DEVICE_MANAGED)
3726                return -EOPNOTSUPP;
3727
3728        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3729        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3730        err = get_res(dev, slave, qpn, RES_QP, &rqp);
3731        if (err) {
3732                pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3733                return err;
3734        }
3735        rule_header = (struct _rule_hw *)(ctrl + 1);
3736        header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3737
3738        switch (header_id) {
3739        case MLX4_NET_TRANS_RULE_ID_ETH:
3740                if (validate_eth_header_mac(slave, rule_header, rlist)) {
3741                        err = -EINVAL;
3742                        goto err_put;
3743                }
3744                break;
3745        case MLX4_NET_TRANS_RULE_ID_IB:
3746                break;
3747        case MLX4_NET_TRANS_RULE_ID_IPV4:
3748        case MLX4_NET_TRANS_RULE_ID_TCP:
3749        case MLX4_NET_TRANS_RULE_ID_UDP:
3750                pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3751                if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3752                        err = -EINVAL;
3753                        goto err_put;
3754                }
3755                vhcr->in_modifier +=
3756                        sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3757                break;
3758        default:
3759                pr_err("Corrupted mailbox.\n");
3760                err = -EINVAL;
3761                goto err_put;
3762        }
3763
3764        err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3765                           vhcr->in_modifier, 0,
3766                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3767                           MLX4_CMD_NATIVE);
3768        if (err)
3769                goto err_put;
3770
3771        err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3772        if (err) {
3773                mlx4_err(dev, "Fail to add flow steering resources.\n ");
3774                /* detach rule*/
3775                mlx4_cmd(dev, vhcr->out_param, 0, 0,
3776                         MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3777                         MLX4_CMD_NATIVE);
3778                goto err_put;
3779        }
3780        atomic_inc(&rqp->ref_count);
3781err_put:
3782        put_res(dev, slave, qpn, RES_QP);
3783        return err;
3784}
3785
3786int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3787                                         struct mlx4_vhcr *vhcr,
3788                                         struct mlx4_cmd_mailbox *inbox,
3789                                         struct mlx4_cmd_mailbox *outbox,
3790                                         struct mlx4_cmd_info *cmd)
3791{
3792        int err;
3793        struct res_qp *rqp;
3794        struct res_fs_rule *rrule;
3795
3796        if (dev->caps.steering_mode !=
3797            MLX4_STEERING_MODE_DEVICE_MANAGED)
3798                return -EOPNOTSUPP;
3799
3800        err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3801        if (err)
3802                return err;
3803        /* Release the rule form busy state before removal */
3804        put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3805        err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3806        if (err)
3807                return err;
3808
3809        err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3810        if (err) {
3811                mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3812                goto out;
3813        }
3814
3815        err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3816                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3817                       MLX4_CMD_NATIVE);
3818        if (!err)
3819                atomic_dec(&rqp->ref_count);
3820out:
3821        put_res(dev, slave, rrule->qpn, RES_QP);
3822        return err;
3823}
3824
3825enum {
3826        BUSY_MAX_RETRIES = 10
3827};
3828
3829int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3830                               struct mlx4_vhcr *vhcr,
3831                               struct mlx4_cmd_mailbox *inbox,
3832                               struct mlx4_cmd_mailbox *outbox,
3833                               struct mlx4_cmd_info *cmd)
3834{
3835        int err;
3836        int index = vhcr->in_modifier & 0xffff;
3837
3838        err = get_res(dev, slave, index, RES_COUNTER, NULL);
3839        if (err)
3840                return err;
3841
3842        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3843        put_res(dev, slave, index, RES_COUNTER);
3844        return err;
3845}
3846
3847static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3848{
3849        struct res_gid *rgid;
3850        struct res_gid *tmp;
3851        struct mlx4_qp qp; /* dummy for calling attach/detach */
3852
3853        list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3854                switch (dev->caps.steering_mode) {
3855                case MLX4_STEERING_MODE_DEVICE_MANAGED:
3856                        mlx4_flow_detach(dev, rgid->reg_id);
3857                        break;
3858                case MLX4_STEERING_MODE_B0:
3859                        qp.qpn = rqp->local_qpn;
3860                        (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3861                                                     rgid->prot, rgid->steer);
3862                        break;
3863                }
3864                list_del(&rgid->list);
3865                kfree(rgid);
3866        }
3867}
3868
3869static int _move_all_busy(struct mlx4_dev *dev, int slave,
3870                          enum mlx4_resource type, int print)
3871{
3872        struct mlx4_priv *priv = mlx4_priv(dev);
3873        struct mlx4_resource_tracker *tracker =
3874                &priv->mfunc.master.res_tracker;
3875        struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3876        struct res_common *r;
3877        struct res_common *tmp;
3878        int busy;
3879
3880        busy = 0;
3881        spin_lock_irq(mlx4_tlock(dev));
3882        list_for_each_entry_safe(r, tmp, rlist, list) {
3883                if (r->owner == slave) {
3884                        if (!r->removing) {
3885                                if (r->state == RES_ANY_BUSY) {
3886                                        if (print)
3887                                                mlx4_dbg(dev,
3888                                                         "%s id 0x%llx is busy\n",
3889                                                          ResourceType(type),
3890                                                          r->res_id);
3891                                        ++busy;
3892                                } else {
3893                                        r->from_state = r->state;
3894                                        r->state = RES_ANY_BUSY;
3895                                        r->removing = 1;
3896                                }
3897                        }
3898                }
3899        }
3900        spin_unlock_irq(mlx4_tlock(dev));
3901
3902        return busy;
3903}
3904
3905static int move_all_busy(struct mlx4_dev *dev, int slave,
3906                         enum mlx4_resource type)
3907{
3908        unsigned long begin;
3909        int busy;
3910
3911        begin = jiffies;
3912        do {
3913                busy = _move_all_busy(dev, slave, type, 0);
3914                if (time_after(jiffies, begin + 5 * HZ))
3915                        break;
3916                if (busy)
3917                        cond_resched();
3918        } while (busy);
3919
3920        if (busy)
3921                busy = _move_all_busy(dev, slave, type, 1);
3922
3923        return busy;
3924}
3925static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3926{
3927        struct mlx4_priv *priv = mlx4_priv(dev);
3928        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3929        struct list_head *qp_list =
3930                &tracker->slave_list[slave].res_list[RES_QP];
3931        struct res_qp *qp;
3932        struct res_qp *tmp;
3933        int state;
3934        u64 in_param;
3935        int qpn;
3936        int err;
3937
3938        err = move_all_busy(dev, slave, RES_QP);
3939        if (err)
3940                mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3941                          "for slave %d\n", slave);
3942
3943        spin_lock_irq(mlx4_tlock(dev));
3944        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3945                spin_unlock_irq(mlx4_tlock(dev));
3946                if (qp->com.owner == slave) {
3947                        qpn = qp->com.res_id;
3948                        detach_qp(dev, slave, qp);
3949                        state = qp->com.from_state;
3950                        while (state != 0) {
3951                                switch (state) {
3952                                case RES_QP_RESERVED:
3953                                        spin_lock_irq(mlx4_tlock(dev));
3954                                        rb_erase(&qp->com.node,
3955                                                 &tracker->res_tree[RES_QP]);
3956                                        list_del(&qp->com.list);
3957                                        spin_unlock_irq(mlx4_tlock(dev));
3958                                        if (!valid_reserved(dev, slave, qpn)) {
3959                                                __mlx4_qp_release_range(dev, qpn, 1);
3960                                                mlx4_release_resource(dev, slave,
3961                                                                      RES_QP, 1, 0);
3962                                        }
3963                                        kfree(qp);
3964                                        state = 0;
3965                                        break;
3966                                case RES_QP_MAPPED:
3967                                        if (!valid_reserved(dev, slave, qpn))
3968                                                __mlx4_qp_free_icm(dev, qpn);
3969                                        state = RES_QP_RESERVED;
3970                                        break;
3971                                case RES_QP_HW:
3972                                        in_param = slave;
3973                                        err = mlx4_cmd(dev, in_param,
3974                                                       qp->local_qpn, 2,
3975                                                       MLX4_CMD_2RST_QP,
3976                                                       MLX4_CMD_TIME_CLASS_A,
3977                                                       MLX4_CMD_NATIVE);
3978                                        if (err)
3979                                                mlx4_dbg(dev, "rem_slave_qps: failed"
3980                                                         " to move slave %d qpn %d to"
3981                                                         " reset\n", slave,
3982                                                         qp->local_qpn);
3983                                        atomic_dec(&qp->rcq->ref_count);
3984                                        atomic_dec(&qp->scq->ref_count);
3985                                        atomic_dec(&qp->mtt->ref_count);
3986                                        if (qp->srq)
3987                                                atomic_dec(&qp->srq->ref_count);
3988                                        state = RES_QP_MAPPED;
3989                                        break;
3990                                default:
3991                                        state = 0;
3992                                }
3993                        }
3994                }
3995                spin_lock_irq(mlx4_tlock(dev));
3996        }
3997        spin_unlock_irq(mlx4_tlock(dev));
3998}
3999
4000static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4001{
4002        struct mlx4_priv *priv = mlx4_priv(dev);
4003        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4004        struct list_head *srq_list =
4005                &tracker->slave_list[slave].res_list[RES_SRQ];
4006        struct res_srq *srq;
4007        struct res_srq *tmp;
4008        int state;
4009        u64 in_param;
4010        LIST_HEAD(tlist);
4011        int srqn;
4012        int err;
4013
4014        err = move_all_busy(dev, slave, RES_SRQ);
4015        if (err)
4016                mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
4017                          "busy for slave %d\n", slave);
4018
4019        spin_lock_irq(mlx4_tlock(dev));
4020        list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4021                spin_unlock_irq(mlx4_tlock(dev));
4022                if (srq->com.owner == slave) {
4023                        srqn = srq->com.res_id;
4024                        state = srq->com.from_state;
4025                        while (state != 0) {
4026                                switch (state) {
4027                                case RES_SRQ_ALLOCATED:
4028                                        __mlx4_srq_free_icm(dev, srqn);
4029                                        spin_lock_irq(mlx4_tlock(dev));
4030                                        rb_erase(&srq->com.node,
4031                                                 &tracker->res_tree[RES_SRQ]);
4032                                        list_del(&srq->com.list);
4033                                        spin_unlock_irq(mlx4_tlock(dev));
4034                                        mlx4_release_resource(dev, slave,
4035                                                              RES_SRQ, 1, 0);
4036                                        kfree(srq);
4037                                        state = 0;
4038                                        break;
4039
4040                                case RES_SRQ_HW:
4041                                        in_param = slave;
4042                                        err = mlx4_cmd(dev, in_param, srqn, 1,
4043                                                       MLX4_CMD_HW2SW_SRQ,
4044                                                       MLX4_CMD_TIME_CLASS_A,
4045                                                       MLX4_CMD_NATIVE);
4046                                        if (err)
4047                                                mlx4_dbg(dev, "rem_slave_srqs: failed"
4048                                                         " to move slave %d srq %d to"
4049                                                         " SW ownership\n",
4050                                                         slave, srqn);
4051
4052                                        atomic_dec(&srq->mtt->ref_count);
4053                                        if (srq->cq)
4054                                                atomic_dec(&srq->cq->ref_count);
4055                                        state = RES_SRQ_ALLOCATED;
4056                                        break;
4057
4058                                default:
4059                                        state = 0;
4060                                }
4061                        }
4062                }
4063                spin_lock_irq(mlx4_tlock(dev));
4064        }
4065        spin_unlock_irq(mlx4_tlock(dev));
4066}
4067
4068static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4069{
4070        struct mlx4_priv *priv = mlx4_priv(dev);
4071        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4072        struct list_head *cq_list =
4073                &tracker->slave_list[slave].res_list[RES_CQ];
4074        struct res_cq *cq;
4075        struct res_cq *tmp;
4076        int state;
4077        u64 in_param;
4078        LIST_HEAD(tlist);
4079        int cqn;
4080        int err;
4081
4082        err = move_all_busy(dev, slave, RES_CQ);
4083        if (err)
4084                mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
4085                          "busy for slave %d\n", slave);
4086
4087        spin_lock_irq(mlx4_tlock(dev));
4088        list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4089                spin_unlock_irq(mlx4_tlock(dev));
4090                if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4091                        cqn = cq->com.res_id;
4092                        state = cq->com.from_state;
4093                        while (state != 0) {
4094                                switch (state) {
4095                                case RES_CQ_ALLOCATED:
4096                                        __mlx4_cq_free_icm(dev, cqn);
4097                                        spin_lock_irq(mlx4_tlock(dev));
4098                                        rb_erase(&cq->com.node,
4099                                                 &tracker->res_tree[RES_CQ]);
4100                                        list_del(&cq->com.list);
4101                                        spin_unlock_irq(mlx4_tlock(dev));
4102                                        mlx4_release_resource(dev, slave,
4103                                                              RES_CQ, 1, 0);
4104                                        kfree(cq);
4105                                        state = 0;
4106                                        break;
4107
4108                                case RES_CQ_HW:
4109                                        in_param = slave;
4110                                        err = mlx4_cmd(dev, in_param, cqn, 1,
4111                                                       MLX4_CMD_HW2SW_CQ,
4112                                                       MLX4_CMD_TIME_CLASS_A,
4113                                                       MLX4_CMD_NATIVE);
4114                                        if (err)
4115                                                mlx4_dbg(dev, "rem_slave_cqs: failed"
4116                                                         " to move slave %d cq %d to"
4117                                                         " SW ownership\n",
4118                                                         slave, cqn);
4119                                        atomic_dec(&cq->mtt->ref_count);
4120                                        state = RES_CQ_ALLOCATED;
4121                                        break;
4122
4123                                default:
4124                                        state = 0;
4125                                }
4126                        }
4127                }
4128                spin_lock_irq(mlx4_tlock(dev));
4129        }
4130        spin_unlock_irq(mlx4_tlock(dev));
4131}
4132
4133static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4134{
4135        struct mlx4_priv *priv = mlx4_priv(dev);
4136        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4137        struct list_head *mpt_list =
4138                &tracker->slave_list[slave].res_list[RES_MPT];
4139        struct res_mpt *mpt;
4140        struct res_mpt *tmp;
4141        int state;
4142        u64 in_param;
4143        LIST_HEAD(tlist);
4144        int mptn;
4145        int err;
4146
4147        err = move_all_busy(dev, slave, RES_MPT);
4148        if (err)
4149                mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4150                          "busy for slave %d\n", slave);
4151
4152        spin_lock_irq(mlx4_tlock(dev));
4153        list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4154                spin_unlock_irq(mlx4_tlock(dev));
4155                if (mpt->com.owner == slave) {
4156                        mptn = mpt->com.res_id;
4157                        state = mpt->com.from_state;
4158                        while (state != 0) {
4159                                switch (state) {
4160                                case RES_MPT_RESERVED:
4161                                        __mlx4_mpt_release(dev, mpt->key);
4162                                        spin_lock_irq(mlx4_tlock(dev));
4163                                        rb_erase(&mpt->com.node,
4164                                                 &tracker->res_tree[RES_MPT]);
4165                                        list_del(&mpt->com.list);
4166                                        spin_unlock_irq(mlx4_tlock(dev));
4167                                        mlx4_release_resource(dev, slave,
4168                                                              RES_MPT, 1, 0);
4169                                        kfree(mpt);
4170                                        state = 0;
4171                                        break;
4172
4173                                case RES_MPT_MAPPED:
4174                                        __mlx4_mpt_free_icm(dev, mpt->key);
4175                                        state = RES_MPT_RESERVED;
4176                                        break;
4177
4178                                case RES_MPT_HW:
4179                                        in_param = slave;
4180                                        err = mlx4_cmd(dev, in_param, mptn, 0,
4181                                                     MLX4_CMD_HW2SW_MPT,
4182                                                     MLX4_CMD_TIME_CLASS_A,
4183                                                     MLX4_CMD_NATIVE);
4184                                        if (err)
4185                                                mlx4_dbg(dev, "rem_slave_mrs: failed"
4186                                                         " to move slave %d mpt %d to"
4187                                                         " SW ownership\n",
4188                                                         slave, mptn);
4189                                        if (mpt->mtt)
4190                                                atomic_dec(&mpt->mtt->ref_count);
4191                                        state = RES_MPT_MAPPED;
4192                                        break;
4193                                default:
4194                                        state = 0;
4195                                }
4196                        }
4197                }
4198                spin_lock_irq(mlx4_tlock(dev));
4199        }
4200        spin_unlock_irq(mlx4_tlock(dev));
4201}
4202
4203static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4204{
4205        struct mlx4_priv *priv = mlx4_priv(dev);
4206        struct mlx4_resource_tracker *tracker =
4207                &priv->mfunc.master.res_tracker;
4208        struct list_head *mtt_list =
4209                &tracker->slave_list[slave].res_list[RES_MTT];
4210        struct res_mtt *mtt;
4211        struct res_mtt *tmp;
4212        int state;
4213        LIST_HEAD(tlist);
4214        int base;
4215        int err;
4216
4217        err = move_all_busy(dev, slave, RES_MTT);
4218        if (err)
4219                mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4220                          "busy for slave %d\n", slave);
4221
4222        spin_lock_irq(mlx4_tlock(dev));
4223        list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4224                spin_unlock_irq(mlx4_tlock(dev));
4225                if (mtt->com.owner == slave) {
4226                        base = mtt->com.res_id;
4227                        state = mtt->com.from_state;
4228                        while (state != 0) {
4229                                switch (state) {
4230                                case RES_MTT_ALLOCATED:
4231                                        __mlx4_free_mtt_range(dev, base,
4232                                                              mtt->order);
4233                                        spin_lock_irq(mlx4_tlock(dev));
4234                                        rb_erase(&mtt->com.node,
4235                                                 &tracker->res_tree[RES_MTT]);
4236                                        list_del(&mtt->com.list);
4237                                        spin_unlock_irq(mlx4_tlock(dev));
4238                                        mlx4_release_resource(dev, slave, RES_MTT,
4239                                                              1 << mtt->order, 0);
4240                                        kfree(mtt);
4241                                        state = 0;
4242                                        break;
4243
4244                                default:
4245                                        state = 0;
4246                                }
4247                        }
4248                }
4249                spin_lock_irq(mlx4_tlock(dev));
4250        }
4251        spin_unlock_irq(mlx4_tlock(dev));
4252}
4253
4254static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4255{
4256        struct mlx4_priv *priv = mlx4_priv(dev);
4257        struct mlx4_resource_tracker *tracker =
4258                &priv->mfunc.master.res_tracker;
4259        struct list_head *fs_rule_list =
4260                &tracker->slave_list[slave].res_list[RES_FS_RULE];
4261        struct res_fs_rule *fs_rule;
4262        struct res_fs_rule *tmp;
4263        int state;
4264        u64 base;
4265        int err;
4266
4267        err = move_all_busy(dev, slave, RES_FS_RULE);
4268        if (err)
4269                mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4270                          slave);
4271
4272        spin_lock_irq(mlx4_tlock(dev));
4273        list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4274                spin_unlock_irq(mlx4_tlock(dev));
4275                if (fs_rule->com.owner == slave) {
4276                        base = fs_rule->com.res_id;
4277                        state = fs_rule->com.from_state;
4278                        while (state != 0) {
4279                                switch (state) {
4280                                case RES_FS_RULE_ALLOCATED:
4281                                        /* detach rule */
4282                                        err = mlx4_cmd(dev, base, 0, 0,
4283                                                       MLX4_QP_FLOW_STEERING_DETACH,
4284                                                       MLX4_CMD_TIME_CLASS_A,
4285                                                       MLX4_CMD_NATIVE);
4286
4287                                        spin_lock_irq(mlx4_tlock(dev));
4288                                        rb_erase(&fs_rule->com.node,
4289                                                 &tracker->res_tree[RES_FS_RULE]);
4290                                        list_del(&fs_rule->com.list);
4291                                        spin_unlock_irq(mlx4_tlock(dev));
4292                                        kfree(fs_rule);
4293                                        state = 0;
4294                                        break;
4295
4296                                default:
4297                                        state = 0;
4298                                }
4299                        }
4300                }
4301                spin_lock_irq(mlx4_tlock(dev));
4302        }
4303        spin_unlock_irq(mlx4_tlock(dev));
4304}
4305
4306static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4307{
4308        struct mlx4_priv *priv = mlx4_priv(dev);
4309        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4310        struct list_head *eq_list =
4311                &tracker->slave_list[slave].res_list[RES_EQ];
4312        struct res_eq *eq;
4313        struct res_eq *tmp;
4314        int err;
4315        int state;
4316        LIST_HEAD(tlist);
4317        int eqn;
4318        struct mlx4_cmd_mailbox *mailbox;
4319
4320        err = move_all_busy(dev, slave, RES_EQ);
4321        if (err)
4322                mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4323                          "busy for slave %d\n", slave);
4324
4325        spin_lock_irq(mlx4_tlock(dev));
4326        list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4327                spin_unlock_irq(mlx4_tlock(dev));
4328                if (eq->com.owner == slave) {
4329                        eqn = eq->com.res_id;
4330                        state = eq->com.from_state;
4331                        while (state != 0) {
4332                                switch (state) {
4333                                case RES_EQ_RESERVED:
4334                                        spin_lock_irq(mlx4_tlock(dev));
4335                                        rb_erase(&eq->com.node,
4336                                                 &tracker->res_tree[RES_EQ]);
4337                                        list_del(&eq->com.list);
4338                                        spin_unlock_irq(mlx4_tlock(dev));
4339                                        kfree(eq);
4340                                        state = 0;
4341                                        break;
4342
4343                                case RES_EQ_HW:
4344                                        mailbox = mlx4_alloc_cmd_mailbox(dev);
4345                                        if (IS_ERR(mailbox)) {
4346                                                cond_resched();
4347                                                continue;
4348                                        }
4349                                        err = mlx4_cmd_box(dev, slave, 0,
4350                                                           eqn & 0xff, 0,
4351                                                           MLX4_CMD_HW2SW_EQ,
4352                                                           MLX4_CMD_TIME_CLASS_A,
4353                                                           MLX4_CMD_NATIVE);
4354                                        if (err)
4355                                                mlx4_dbg(dev, "rem_slave_eqs: failed"
4356                                                         " to move slave %d eqs %d to"
4357                                                         " SW ownership\n", slave, eqn);
4358                                        mlx4_free_cmd_mailbox(dev, mailbox);
4359                                        atomic_dec(&eq->mtt->ref_count);
4360                                        state = RES_EQ_RESERVED;
4361                                        break;
4362
4363                                default:
4364                                        state = 0;
4365                                }
4366                        }
4367                }
4368                spin_lock_irq(mlx4_tlock(dev));
4369        }
4370        spin_unlock_irq(mlx4_tlock(dev));
4371}
4372
4373static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4374{
4375        struct mlx4_priv *priv = mlx4_priv(dev);
4376        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4377        struct list_head *counter_list =
4378                &tracker->slave_list[slave].res_list[RES_COUNTER];
4379        struct res_counter *counter;
4380        struct res_counter *tmp;
4381        int err;
4382        int index;
4383
4384        err = move_all_busy(dev, slave, RES_COUNTER);
4385        if (err)
4386                mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
4387                          "busy for slave %d\n", slave);
4388
4389        spin_lock_irq(mlx4_tlock(dev));
4390        list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4391                if (counter->com.owner == slave) {
4392                        index = counter->com.res_id;
4393                        rb_erase(&counter->com.node,
4394                                 &tracker->res_tree[RES_COUNTER]);
4395                        list_del(&counter->com.list);
4396                        kfree(counter);
4397                        __mlx4_counter_free(dev, index);
4398                        mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4399                }
4400        }
4401        spin_unlock_irq(mlx4_tlock(dev));
4402}
4403
4404static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4405{
4406        struct mlx4_priv *priv = mlx4_priv(dev);
4407        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4408        struct list_head *xrcdn_list =
4409                &tracker->slave_list[slave].res_list[RES_XRCD];
4410        struct res_xrcdn *xrcd;
4411        struct res_xrcdn *tmp;
4412        int err;
4413        int xrcdn;
4414
4415        err = move_all_busy(dev, slave, RES_XRCD);
4416        if (err)
4417                mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4418                          "busy for slave %d\n", slave);
4419
4420        spin_lock_irq(mlx4_tlock(dev));
4421        list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4422                if (xrcd->com.owner == slave) {
4423                        xrcdn = xrcd->com.res_id;
4424                        rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4425                        list_del(&xrcd->com.list);
4426                        kfree(xrcd);
4427                        __mlx4_xrcd_free(dev, xrcdn);
4428                }
4429        }
4430        spin_unlock_irq(mlx4_tlock(dev));
4431}
4432
4433void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4434{
4435        struct mlx4_priv *priv = mlx4_priv(dev);
4436
4437        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4438        rem_slave_vlans(dev, slave);
4439        rem_slave_macs(dev, slave);
4440        rem_slave_fs_rule(dev, slave);
4441        rem_slave_qps(dev, slave);
4442        rem_slave_srqs(dev, slave);
4443        rem_slave_cqs(dev, slave);
4444        rem_slave_mrs(dev, slave);
4445        rem_slave_eqs(dev, slave);
4446        rem_slave_mtts(dev, slave);
4447        rem_slave_counters(dev, slave);
4448        rem_slave_xrcdns(dev, slave);
4449        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4450}
4451
4452void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4453{
4454        struct mlx4_vf_immed_vlan_work *work =
4455                container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4456        struct mlx4_cmd_mailbox *mailbox;
4457        struct mlx4_update_qp_context *upd_context;
4458        struct mlx4_dev *dev = &work->priv->dev;
4459        struct mlx4_resource_tracker *tracker =
4460                &work->priv->mfunc.master.res_tracker;
4461        struct list_head *qp_list =
4462                &tracker->slave_list[work->slave].res_list[RES_QP];
4463        struct res_qp *qp;
4464        struct res_qp *tmp;
4465        u64 qp_path_mask_vlan_ctrl =
4466                       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4467                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4468                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4469                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4470                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4471                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4472
4473        u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4474                       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4475                       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4476                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4477                       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4478                       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4479                       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4480
4481        int err;
4482        int port, errors = 0;
4483        u8 vlan_control;
4484
4485        if (mlx4_is_slave(dev)) {
4486                mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4487                          work->slave);
4488                goto out;
4489        }
4490
4491        mailbox = mlx4_alloc_cmd_mailbox(dev);
4492        if (IS_ERR(mailbox))
4493                goto out;
4494        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4495                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4496                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4497                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4498                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4499                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4500                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4501        else if (!work->vlan_id)
4502                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4503                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4504        else
4505                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4506                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4507                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4508
4509        upd_context = mailbox->buf;
4510        upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
4511
4512        spin_lock_irq(mlx4_tlock(dev));
4513        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4514                spin_unlock_irq(mlx4_tlock(dev));
4515                if (qp->com.owner == work->slave) {
4516                        if (qp->com.from_state != RES_QP_HW ||
4517                            !qp->sched_queue ||  /* no INIT2RTR trans yet */
4518                            mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4519                            qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4520                                spin_lock_irq(mlx4_tlock(dev));
4521                                continue;
4522                        }
4523                        port = (qp->sched_queue >> 6 & 1) + 1;
4524                        if (port != work->port) {
4525                                spin_lock_irq(mlx4_tlock(dev));
4526                                continue;
4527                        }
4528                        if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4529                                upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4530                        else
4531                                upd_context->primary_addr_path_mask =
4532                                        cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4533                        if (work->vlan_id == MLX4_VGT) {
4534                                upd_context->qp_context.param3 = qp->param3;
4535                                upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4536                                upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4537                                upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4538                                upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4539                                upd_context->qp_context.pri_path.feup = qp->feup;
4540                                upd_context->qp_context.pri_path.sched_queue =
4541                                        qp->sched_queue;
4542                        } else {
4543                                upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4544                                upd_context->qp_context.pri_path.vlan_control = vlan_control;
4545                                upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4546                                upd_context->qp_context.pri_path.fvl_rx =
4547                                        qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4548                                upd_context->qp_context.pri_path.fl =
4549                                        qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4550                                upd_context->qp_context.pri_path.feup =
4551                                        qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4552                                upd_context->qp_context.pri_path.sched_queue =
4553                                        qp->sched_queue & 0xC7;
4554                                upd_context->qp_context.pri_path.sched_queue |=
4555                                        ((work->qos & 0x7) << 3);
4556                        }
4557
4558                        err = mlx4_cmd(dev, mailbox->dma,
4559                                       qp->local_qpn & 0xffffff,
4560                                       0, MLX4_CMD_UPDATE_QP,
4561                                       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4562                        if (err) {
4563                                mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4564                                          "port %d, qpn %d (%d)\n",
4565                                          work->slave, port, qp->local_qpn,
4566                                          err);
4567                                errors++;
4568                        }
4569                }
4570                spin_lock_irq(mlx4_tlock(dev));
4571        }
4572        spin_unlock_irq(mlx4_tlock(dev));
4573        mlx4_free_cmd_mailbox(dev, mailbox);
4574
4575        if (errors)
4576                mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4577                         errors, work->slave, work->port);
4578
4579        /* unregister previous vlan_id if needed and we had no errors
4580         * while updating the QPs
4581         */
4582        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4583            NO_INDX != work->orig_vlan_ix)
4584                __mlx4_unregister_vlan(&work->priv->dev, work->port,
4585                                       work->orig_vlan_id);
4586out:
4587        kfree(work);
4588        return;
4589}
4590