linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
   4 * All rights reserved.
   5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/sched.h>
  37#include <linux/pci.h>
  38#include <linux/errno.h>
  39#include <linux/kernel.h>
  40#include <linux/io.h>
  41#include <linux/slab.h>
  42#include <linux/mlx4/cmd.h>
  43#include <linux/mlx4/qp.h>
  44#include <linux/if_ether.h>
  45#include <linux/etherdevice.h>
  46
  47#include "mlx4.h"
  48#include "fw.h"
  49
  50#define MLX4_MAC_VALID          (1ull << 63)
  51
  52struct mac_res {
  53        struct list_head list;
  54        u64 mac;
  55        int ref_count;
  56        u8 smac_index;
  57        u8 port;
  58};
  59
  60struct vlan_res {
  61        struct list_head list;
  62        u16 vlan;
  63        int ref_count;
  64        int vlan_index;
  65        u8 port;
  66};
  67
  68struct res_common {
  69        struct list_head        list;
  70        struct rb_node          node;
  71        u64                     res_id;
  72        int                     owner;
  73        int                     state;
  74        int                     from_state;
  75        int                     to_state;
  76        int                     removing;
  77};
  78
  79enum {
  80        RES_ANY_BUSY = 1
  81};
  82
  83struct res_gid {
  84        struct list_head        list;
  85        u8                      gid[16];
  86        enum mlx4_protocol      prot;
  87        enum mlx4_steer_type    steer;
  88        u64                     reg_id;
  89};
  90
  91enum res_qp_states {
  92        RES_QP_BUSY = RES_ANY_BUSY,
  93
  94        /* QP number was allocated */
  95        RES_QP_RESERVED,
  96
  97        /* ICM memory for QP context was mapped */
  98        RES_QP_MAPPED,
  99
 100        /* QP is in hw ownership */
 101        RES_QP_HW
 102};
 103
 104struct res_qp {
 105        struct res_common       com;
 106        struct res_mtt         *mtt;
 107        struct res_cq          *rcq;
 108        struct res_cq          *scq;
 109        struct res_srq         *srq;
 110        struct list_head        mcg_list;
 111        spinlock_t              mcg_spl;
 112        int                     local_qpn;
 113        atomic_t                ref_count;
 114        u32                     qpc_flags;
 115        /* saved qp params before VST enforcement in order to restore on VGT */
 116        u8                      sched_queue;
 117        __be32                  param3;
 118        u8                      vlan_control;
 119        u8                      fvl_rx;
 120        u8                      pri_path_fl;
 121        u8                      vlan_index;
 122        u8                      feup;
 123};
 124
 125enum res_mtt_states {
 126        RES_MTT_BUSY = RES_ANY_BUSY,
 127        RES_MTT_ALLOCATED,
 128};
 129
 130static inline const char *mtt_states_str(enum res_mtt_states state)
 131{
 132        switch (state) {
 133        case RES_MTT_BUSY: return "RES_MTT_BUSY";
 134        case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
 135        default: return "Unknown";
 136        }
 137}
 138
 139struct res_mtt {
 140        struct res_common       com;
 141        int                     order;
 142        atomic_t                ref_count;
 143};
 144
 145enum res_mpt_states {
 146        RES_MPT_BUSY = RES_ANY_BUSY,
 147        RES_MPT_RESERVED,
 148        RES_MPT_MAPPED,
 149        RES_MPT_HW,
 150};
 151
 152struct res_mpt {
 153        struct res_common       com;
 154        struct res_mtt         *mtt;
 155        int                     key;
 156};
 157
 158enum res_eq_states {
 159        RES_EQ_BUSY = RES_ANY_BUSY,
 160        RES_EQ_RESERVED,
 161        RES_EQ_HW,
 162};
 163
 164struct res_eq {
 165        struct res_common       com;
 166        struct res_mtt         *mtt;
 167};
 168
 169enum res_cq_states {
 170        RES_CQ_BUSY = RES_ANY_BUSY,
 171        RES_CQ_ALLOCATED,
 172        RES_CQ_HW,
 173};
 174
 175struct res_cq {
 176        struct res_common       com;
 177        struct res_mtt         *mtt;
 178        atomic_t                ref_count;
 179};
 180
 181enum res_srq_states {
 182        RES_SRQ_BUSY = RES_ANY_BUSY,
 183        RES_SRQ_ALLOCATED,
 184        RES_SRQ_HW,
 185};
 186
 187struct res_srq {
 188        struct res_common       com;
 189        struct res_mtt         *mtt;
 190        struct res_cq          *cq;
 191        atomic_t                ref_count;
 192};
 193
 194enum res_counter_states {
 195        RES_COUNTER_BUSY = RES_ANY_BUSY,
 196        RES_COUNTER_ALLOCATED,
 197};
 198
 199struct res_counter {
 200        struct res_common       com;
 201        int                     port;
 202};
 203
 204enum res_xrcdn_states {
 205        RES_XRCD_BUSY = RES_ANY_BUSY,
 206        RES_XRCD_ALLOCATED,
 207};
 208
 209struct res_xrcdn {
 210        struct res_common       com;
 211        int                     port;
 212};
 213
 214enum res_fs_rule_states {
 215        RES_FS_RULE_BUSY = RES_ANY_BUSY,
 216        RES_FS_RULE_ALLOCATED,
 217};
 218
 219struct res_fs_rule {
 220        struct res_common       com;
 221        int                     qpn;
 222};
 223
 224static int mlx4_is_eth(struct mlx4_dev *dev, int port)
 225{
 226        return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
 227}
 228
 229static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
 230{
 231        struct rb_node *node = root->rb_node;
 232
 233        while (node) {
 234                struct res_common *res = container_of(node, struct res_common,
 235                                                      node);
 236
 237                if (res_id < res->res_id)
 238                        node = node->rb_left;
 239                else if (res_id > res->res_id)
 240                        node = node->rb_right;
 241                else
 242                        return res;
 243        }
 244        return NULL;
 245}
 246
 247static int res_tracker_insert(struct rb_root *root, struct res_common *res)
 248{
 249        struct rb_node **new = &(root->rb_node), *parent = NULL;
 250
 251        /* Figure out where to put new node */
 252        while (*new) {
 253                struct res_common *this = container_of(*new, struct res_common,
 254                                                       node);
 255
 256                parent = *new;
 257                if (res->res_id < this->res_id)
 258                        new = &((*new)->rb_left);
 259                else if (res->res_id > this->res_id)
 260                        new = &((*new)->rb_right);
 261                else
 262                        return -EEXIST;
 263        }
 264
 265        /* Add new node and rebalance tree. */
 266        rb_link_node(&res->node, parent, new);
 267        rb_insert_color(&res->node, root);
 268
 269        return 0;
 270}
 271
 272enum qp_transition {
 273        QP_TRANS_INIT2RTR,
 274        QP_TRANS_RTR2RTS,
 275        QP_TRANS_RTS2RTS,
 276        QP_TRANS_SQERR2RTS,
 277        QP_TRANS_SQD2SQD,
 278        QP_TRANS_SQD2RTS
 279};
 280
 281/* For Debug uses */
 282static const char *resource_str(enum mlx4_resource rt)
 283{
 284        switch (rt) {
 285        case RES_QP: return "RES_QP";
 286        case RES_CQ: return "RES_CQ";
 287        case RES_SRQ: return "RES_SRQ";
 288        case RES_MPT: return "RES_MPT";
 289        case RES_MTT: return "RES_MTT";
 290        case RES_MAC: return  "RES_MAC";
 291        case RES_VLAN: return  "RES_VLAN";
 292        case RES_EQ: return "RES_EQ";
 293        case RES_COUNTER: return "RES_COUNTER";
 294        case RES_FS_RULE: return "RES_FS_RULE";
 295        case RES_XRCD: return "RES_XRCD";
 296        default: return "Unknown resource type !!!";
 297        };
 298}
 299
 300static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
 301static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
 302                                      enum mlx4_resource res_type, int count,
 303                                      int port)
 304{
 305        struct mlx4_priv *priv = mlx4_priv(dev);
 306        struct resource_allocator *res_alloc =
 307                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 308        int err = -EINVAL;
 309        int allocated, free, reserved, guaranteed, from_free;
 310        int from_rsvd;
 311
 312        if (slave > dev->num_vfs)
 313                return -EINVAL;
 314
 315        spin_lock(&res_alloc->alloc_lock);
 316        allocated = (port > 0) ?
 317                res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
 318                res_alloc->allocated[slave];
 319        free = (port > 0) ? res_alloc->res_port_free[port - 1] :
 320                res_alloc->res_free;
 321        reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
 322                res_alloc->res_reserved;
 323        guaranteed = res_alloc->guaranteed[slave];
 324
 325        if (allocated + count > res_alloc->quota[slave]) {
 326                mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
 327                          slave, port, resource_str(res_type), count,
 328                          allocated, res_alloc->quota[slave]);
 329                goto out;
 330        }
 331
 332        if (allocated + count <= guaranteed) {
 333                err = 0;
 334                from_rsvd = count;
 335        } else {
 336                /* portion may need to be obtained from free area */
 337                if (guaranteed - allocated > 0)
 338                        from_free = count - (guaranteed - allocated);
 339                else
 340                        from_free = count;
 341
 342                from_rsvd = count - from_free;
 343
 344                if (free - from_free >= reserved)
 345                        err = 0;
 346                else
 347                        mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
 348                                  slave, port, resource_str(res_type), free,
 349                                  from_free, reserved);
 350        }
 351
 352        if (!err) {
 353                /* grant the request */
 354                if (port > 0) {
 355                        res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
 356                        res_alloc->res_port_free[port - 1] -= count;
 357                        res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
 358                } else {
 359                        res_alloc->allocated[slave] += count;
 360                        res_alloc->res_free -= count;
 361                        res_alloc->res_reserved -= from_rsvd;
 362                }
 363        }
 364
 365out:
 366        spin_unlock(&res_alloc->alloc_lock);
 367        return err;
 368}
 369
 370static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
 371                                    enum mlx4_resource res_type, int count,
 372                                    int port)
 373{
 374        struct mlx4_priv *priv = mlx4_priv(dev);
 375        struct resource_allocator *res_alloc =
 376                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 377        int allocated, guaranteed, from_rsvd;
 378
 379        if (slave > dev->num_vfs)
 380                return;
 381
 382        spin_lock(&res_alloc->alloc_lock);
 383
 384        allocated = (port > 0) ?
 385                res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
 386                res_alloc->allocated[slave];
 387        guaranteed = res_alloc->guaranteed[slave];
 388
 389        if (allocated - count >= guaranteed) {
 390                from_rsvd = 0;
 391        } else {
 392                /* portion may need to be returned to reserved area */
 393                if (allocated - guaranteed > 0)
 394                        from_rsvd = count - (allocated - guaranteed);
 395                else
 396                        from_rsvd = count;
 397        }
 398
 399        if (port > 0) {
 400                res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
 401                res_alloc->res_port_free[port - 1] += count;
 402                res_alloc->res_port_rsvd[port - 1] += from_rsvd;
 403        } else {
 404                res_alloc->allocated[slave] -= count;
 405                res_alloc->res_free += count;
 406                res_alloc->res_reserved += from_rsvd;
 407        }
 408
 409        spin_unlock(&res_alloc->alloc_lock);
 410        return;
 411}
 412
 413static inline void initialize_res_quotas(struct mlx4_dev *dev,
 414                                         struct resource_allocator *res_alloc,
 415                                         enum mlx4_resource res_type,
 416                                         int vf, int num_instances)
 417{
 418        res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
 419        res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
 420        if (vf == mlx4_master_func_num(dev)) {
 421                res_alloc->res_free = num_instances;
 422                if (res_type == RES_MTT) {
 423                        /* reserved mtts will be taken out of the PF allocation */
 424                        res_alloc->res_free += dev->caps.reserved_mtts;
 425                        res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
 426                        res_alloc->quota[vf] += dev->caps.reserved_mtts;
 427                }
 428        }
 429}
 430
 431void mlx4_init_quotas(struct mlx4_dev *dev)
 432{
 433        struct mlx4_priv *priv = mlx4_priv(dev);
 434        int pf;
 435
 436        /* quotas for VFs are initialized in mlx4_slave_cap */
 437        if (mlx4_is_slave(dev))
 438                return;
 439
 440        if (!mlx4_is_mfunc(dev)) {
 441                dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
 442                        mlx4_num_reserved_sqps(dev);
 443                dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
 444                dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
 445                dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
 446                dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
 447                return;
 448        }
 449
 450        pf = mlx4_master_func_num(dev);
 451        dev->quotas.qp =
 452                priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
 453        dev->quotas.cq =
 454                priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
 455        dev->quotas.srq =
 456                priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
 457        dev->quotas.mtt =
 458                priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
 459        dev->quotas.mpt =
 460                priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
 461}
 462int mlx4_init_resource_tracker(struct mlx4_dev *dev)
 463{
 464        struct mlx4_priv *priv = mlx4_priv(dev);
 465        int i, j;
 466        int t;
 467
 468        priv->mfunc.master.res_tracker.slave_list =
 469                kzalloc(dev->num_slaves * sizeof(struct slave_list),
 470                        GFP_KERNEL);
 471        if (!priv->mfunc.master.res_tracker.slave_list)
 472                return -ENOMEM;
 473
 474        for (i = 0 ; i < dev->num_slaves; i++) {
 475                for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
 476                        INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
 477                                       slave_list[i].res_list[t]);
 478                mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 479        }
 480
 481        mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
 482                 dev->num_slaves);
 483        for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
 484                priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
 485
 486        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 487                struct resource_allocator *res_alloc =
 488                        &priv->mfunc.master.res_tracker.res_alloc[i];
 489                res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
 490                res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
 491                if (i == RES_MAC || i == RES_VLAN)
 492                        res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
 493                                                       (dev->num_vfs + 1) * sizeof(int),
 494                                                        GFP_KERNEL);
 495                else
 496                        res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
 497
 498                if (!res_alloc->quota || !res_alloc->guaranteed ||
 499                    !res_alloc->allocated)
 500                        goto no_mem_err;
 501
 502                spin_lock_init(&res_alloc->alloc_lock);
 503                for (t = 0; t < dev->num_vfs + 1; t++) {
 504                        struct mlx4_active_ports actv_ports =
 505                                mlx4_get_active_ports(dev, t);
 506                        switch (i) {
 507                        case RES_QP:
 508                                initialize_res_quotas(dev, res_alloc, RES_QP,
 509                                                      t, dev->caps.num_qps -
 510                                                      dev->caps.reserved_qps -
 511                                                      mlx4_num_reserved_sqps(dev));
 512                                break;
 513                        case RES_CQ:
 514                                initialize_res_quotas(dev, res_alloc, RES_CQ,
 515                                                      t, dev->caps.num_cqs -
 516                                                      dev->caps.reserved_cqs);
 517                                break;
 518                        case RES_SRQ:
 519                                initialize_res_quotas(dev, res_alloc, RES_SRQ,
 520                                                      t, dev->caps.num_srqs -
 521                                                      dev->caps.reserved_srqs);
 522                                break;
 523                        case RES_MPT:
 524                                initialize_res_quotas(dev, res_alloc, RES_MPT,
 525                                                      t, dev->caps.num_mpts -
 526                                                      dev->caps.reserved_mrws);
 527                                break;
 528                        case RES_MTT:
 529                                initialize_res_quotas(dev, res_alloc, RES_MTT,
 530                                                      t, dev->caps.num_mtts -
 531                                                      dev->caps.reserved_mtts);
 532                                break;
 533                        case RES_MAC:
 534                                if (t == mlx4_master_func_num(dev)) {
 535                                        int max_vfs_pport = 0;
 536                                        /* Calculate the max vfs per port for */
 537                                        /* both ports.                        */
 538                                        for (j = 0; j < dev->caps.num_ports;
 539                                             j++) {
 540                                                struct mlx4_slaves_pport slaves_pport =
 541                                                        mlx4_phys_to_slaves_pport(dev, j + 1);
 542                                                unsigned current_slaves =
 543                                                        bitmap_weight(slaves_pport.slaves,
 544                                                                      dev->caps.num_ports) - 1;
 545                                                if (max_vfs_pport < current_slaves)
 546                                                        max_vfs_pport =
 547                                                                current_slaves;
 548                                        }
 549                                        res_alloc->quota[t] =
 550                                                MLX4_MAX_MAC_NUM -
 551                                                2 * max_vfs_pport;
 552                                        res_alloc->guaranteed[t] = 2;
 553                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 554                                                res_alloc->res_port_free[j] =
 555                                                        MLX4_MAX_MAC_NUM;
 556                                } else {
 557                                        res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
 558                                        res_alloc->guaranteed[t] = 2;
 559                                }
 560                                break;
 561                        case RES_VLAN:
 562                                if (t == mlx4_master_func_num(dev)) {
 563                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
 564                                        res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
 565                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 566                                                res_alloc->res_port_free[j] =
 567                                                        res_alloc->quota[t];
 568                                } else {
 569                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
 570                                        res_alloc->guaranteed[t] = 0;
 571                                }
 572                                break;
 573                        case RES_COUNTER:
 574                                res_alloc->quota[t] = dev->caps.max_counters;
 575                                res_alloc->guaranteed[t] = 0;
 576                                if (t == mlx4_master_func_num(dev))
 577                                        res_alloc->res_free = res_alloc->quota[t];
 578                                break;
 579                        default:
 580                                break;
 581                        }
 582                        if (i == RES_MAC || i == RES_VLAN) {
 583                                for (j = 0; j < dev->caps.num_ports; j++)
 584                                        if (test_bit(j, actv_ports.ports))
 585                                                res_alloc->res_port_rsvd[j] +=
 586                                                        res_alloc->guaranteed[t];
 587                        } else {
 588                                res_alloc->res_reserved += res_alloc->guaranteed[t];
 589                        }
 590                }
 591        }
 592        spin_lock_init(&priv->mfunc.master.res_tracker.lock);
 593        return 0;
 594
 595no_mem_err:
 596        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 597                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 598                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 599                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 600                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 601                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 602                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 603        }
 604        return -ENOMEM;
 605}
 606
 607void mlx4_free_resource_tracker(struct mlx4_dev *dev,
 608                                enum mlx4_res_tracker_free_type type)
 609{
 610        struct mlx4_priv *priv = mlx4_priv(dev);
 611        int i;
 612
 613        if (priv->mfunc.master.res_tracker.slave_list) {
 614                if (type != RES_TR_FREE_STRUCTS_ONLY) {
 615                        for (i = 0; i < dev->num_slaves; i++) {
 616                                if (type == RES_TR_FREE_ALL ||
 617                                    dev->caps.function != i)
 618                                        mlx4_delete_all_resources_for_slave(dev, i);
 619                        }
 620                        /* free master's vlans */
 621                        i = dev->caps.function;
 622                        mlx4_reset_roce_gids(dev, i);
 623                        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 624                        rem_slave_vlans(dev, i);
 625                        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 626                }
 627
 628                if (type != RES_TR_FREE_SLAVES_ONLY) {
 629                        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 630                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 631                                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 632                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 633                                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 634                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 635                                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 636                        }
 637                        kfree(priv->mfunc.master.res_tracker.slave_list);
 638                        priv->mfunc.master.res_tracker.slave_list = NULL;
 639                }
 640        }
 641}
 642
 643static void update_pkey_index(struct mlx4_dev *dev, int slave,
 644                              struct mlx4_cmd_mailbox *inbox)
 645{
 646        u8 sched = *(u8 *)(inbox->buf + 64);
 647        u8 orig_index = *(u8 *)(inbox->buf + 35);
 648        u8 new_index;
 649        struct mlx4_priv *priv = mlx4_priv(dev);
 650        int port;
 651
 652        port = (sched >> 6 & 1) + 1;
 653
 654        new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
 655        *(u8 *)(inbox->buf + 35) = new_index;
 656}
 657
 658static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
 659                       u8 slave)
 660{
 661        struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
 662        enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
 663        u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
 664        int port;
 665
 666        if (MLX4_QP_ST_UD == ts) {
 667                port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 668                if (mlx4_is_eth(dev, port))
 669                        qp_ctx->pri_path.mgid_index =
 670                                mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
 671                else
 672                        qp_ctx->pri_path.mgid_index = slave | 0x80;
 673
 674        } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
 675                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
 676                        port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 677                        if (mlx4_is_eth(dev, port)) {
 678                                qp_ctx->pri_path.mgid_index +=
 679                                        mlx4_get_base_gid_ix(dev, slave, port);
 680                                qp_ctx->pri_path.mgid_index &= 0x7f;
 681                        } else {
 682                                qp_ctx->pri_path.mgid_index = slave & 0x7F;
 683                        }
 684                }
 685                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
 686                        port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
 687                        if (mlx4_is_eth(dev, port)) {
 688                                qp_ctx->alt_path.mgid_index +=
 689                                        mlx4_get_base_gid_ix(dev, slave, port);
 690                                qp_ctx->alt_path.mgid_index &= 0x7f;
 691                        } else {
 692                                qp_ctx->alt_path.mgid_index = slave & 0x7F;
 693                        }
 694                }
 695        }
 696}
 697
 698static int update_vport_qp_param(struct mlx4_dev *dev,
 699                                 struct mlx4_cmd_mailbox *inbox,
 700                                 u8 slave, u32 qpn)
 701{
 702        struct mlx4_qp_context  *qpc = inbox->buf + 8;
 703        struct mlx4_vport_oper_state *vp_oper;
 704        struct mlx4_priv *priv;
 705        int port;
 706
 707        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
 708        priv = mlx4_priv(dev);
 709        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 710
 711        if (MLX4_VGT != vp_oper->state.default_vlan) {
 712                /* the reserved QPs (special, proxy, tunnel)
 713                 * do not operate over vlans
 714                 */
 715                if (mlx4_is_qp_reserved(dev, qpn))
 716                        return 0;
 717
 718                /* force strip vlan by clear vsd */
 719                qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
 720
 721                if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
 722                    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
 723                        qpc->pri_path.vlan_control =
 724                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 725                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 726                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
 727                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 728                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
 729                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 730                } else if (0 != vp_oper->state.default_vlan) {
 731                        qpc->pri_path.vlan_control =
 732                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 733                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 734                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 735                } else { /* priority tagged */
 736                        qpc->pri_path.vlan_control =
 737                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 738                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 739                }
 740
 741                qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
 742                qpc->pri_path.vlan_index = vp_oper->vlan_idx;
 743                qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
 744                qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
 745                qpc->pri_path.sched_queue &= 0xC7;
 746                qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
 747        }
 748        if (vp_oper->state.spoofchk) {
 749                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
 750                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
 751        }
 752        return 0;
 753}
 754
 755static int mpt_mask(struct mlx4_dev *dev)
 756{
 757        return dev->caps.num_mpts - 1;
 758}
 759
 760static void *find_res(struct mlx4_dev *dev, u64 res_id,
 761                      enum mlx4_resource type)
 762{
 763        struct mlx4_priv *priv = mlx4_priv(dev);
 764
 765        return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
 766                                  res_id);
 767}
 768
 769static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
 770                   enum mlx4_resource type,
 771                   void *res)
 772{
 773        struct res_common *r;
 774        int err = 0;
 775
 776        spin_lock_irq(mlx4_tlock(dev));
 777        r = find_res(dev, res_id, type);
 778        if (!r) {
 779                err = -ENONET;
 780                goto exit;
 781        }
 782
 783        if (r->state == RES_ANY_BUSY) {
 784                err = -EBUSY;
 785                goto exit;
 786        }
 787
 788        if (r->owner != slave) {
 789                err = -EPERM;
 790                goto exit;
 791        }
 792
 793        r->from_state = r->state;
 794        r->state = RES_ANY_BUSY;
 795
 796        if (res)
 797                *((struct res_common **)res) = r;
 798
 799exit:
 800        spin_unlock_irq(mlx4_tlock(dev));
 801        return err;
 802}
 803
 804int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
 805                                    enum mlx4_resource type,
 806                                    u64 res_id, int *slave)
 807{
 808
 809        struct res_common *r;
 810        int err = -ENOENT;
 811        int id = res_id;
 812
 813        if (type == RES_QP)
 814                id &= 0x7fffff;
 815        spin_lock(mlx4_tlock(dev));
 816
 817        r = find_res(dev, id, type);
 818        if (r) {
 819                *slave = r->owner;
 820                err = 0;
 821        }
 822        spin_unlock(mlx4_tlock(dev));
 823
 824        return err;
 825}
 826
 827static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
 828                    enum mlx4_resource type)
 829{
 830        struct res_common *r;
 831
 832        spin_lock_irq(mlx4_tlock(dev));
 833        r = find_res(dev, res_id, type);
 834        if (r)
 835                r->state = r->from_state;
 836        spin_unlock_irq(mlx4_tlock(dev));
 837}
 838
 839static struct res_common *alloc_qp_tr(int id)
 840{
 841        struct res_qp *ret;
 842
 843        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 844        if (!ret)
 845                return NULL;
 846
 847        ret->com.res_id = id;
 848        ret->com.state = RES_QP_RESERVED;
 849        ret->local_qpn = id;
 850        INIT_LIST_HEAD(&ret->mcg_list);
 851        spin_lock_init(&ret->mcg_spl);
 852        atomic_set(&ret->ref_count, 0);
 853
 854        return &ret->com;
 855}
 856
 857static struct res_common *alloc_mtt_tr(int id, int order)
 858{
 859        struct res_mtt *ret;
 860
 861        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 862        if (!ret)
 863                return NULL;
 864
 865        ret->com.res_id = id;
 866        ret->order = order;
 867        ret->com.state = RES_MTT_ALLOCATED;
 868        atomic_set(&ret->ref_count, 0);
 869
 870        return &ret->com;
 871}
 872
 873static struct res_common *alloc_mpt_tr(int id, int key)
 874{
 875        struct res_mpt *ret;
 876
 877        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 878        if (!ret)
 879                return NULL;
 880
 881        ret->com.res_id = id;
 882        ret->com.state = RES_MPT_RESERVED;
 883        ret->key = key;
 884
 885        return &ret->com;
 886}
 887
 888static struct res_common *alloc_eq_tr(int id)
 889{
 890        struct res_eq *ret;
 891
 892        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 893        if (!ret)
 894                return NULL;
 895
 896        ret->com.res_id = id;
 897        ret->com.state = RES_EQ_RESERVED;
 898
 899        return &ret->com;
 900}
 901
 902static struct res_common *alloc_cq_tr(int id)
 903{
 904        struct res_cq *ret;
 905
 906        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 907        if (!ret)
 908                return NULL;
 909
 910        ret->com.res_id = id;
 911        ret->com.state = RES_CQ_ALLOCATED;
 912        atomic_set(&ret->ref_count, 0);
 913
 914        return &ret->com;
 915}
 916
 917static struct res_common *alloc_srq_tr(int id)
 918{
 919        struct res_srq *ret;
 920
 921        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 922        if (!ret)
 923                return NULL;
 924
 925        ret->com.res_id = id;
 926        ret->com.state = RES_SRQ_ALLOCATED;
 927        atomic_set(&ret->ref_count, 0);
 928
 929        return &ret->com;
 930}
 931
 932static struct res_common *alloc_counter_tr(int id)
 933{
 934        struct res_counter *ret;
 935
 936        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 937        if (!ret)
 938                return NULL;
 939
 940        ret->com.res_id = id;
 941        ret->com.state = RES_COUNTER_ALLOCATED;
 942
 943        return &ret->com;
 944}
 945
 946static struct res_common *alloc_xrcdn_tr(int id)
 947{
 948        struct res_xrcdn *ret;
 949
 950        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 951        if (!ret)
 952                return NULL;
 953
 954        ret->com.res_id = id;
 955        ret->com.state = RES_XRCD_ALLOCATED;
 956
 957        return &ret->com;
 958}
 959
 960static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
 961{
 962        struct res_fs_rule *ret;
 963
 964        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 965        if (!ret)
 966                return NULL;
 967
 968        ret->com.res_id = id;
 969        ret->com.state = RES_FS_RULE_ALLOCATED;
 970        ret->qpn = qpn;
 971        return &ret->com;
 972}
 973
 974static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
 975                                   int extra)
 976{
 977        struct res_common *ret;
 978
 979        switch (type) {
 980        case RES_QP:
 981                ret = alloc_qp_tr(id);
 982                break;
 983        case RES_MPT:
 984                ret = alloc_mpt_tr(id, extra);
 985                break;
 986        case RES_MTT:
 987                ret = alloc_mtt_tr(id, extra);
 988                break;
 989        case RES_EQ:
 990                ret = alloc_eq_tr(id);
 991                break;
 992        case RES_CQ:
 993                ret = alloc_cq_tr(id);
 994                break;
 995        case RES_SRQ:
 996                ret = alloc_srq_tr(id);
 997                break;
 998        case RES_MAC:
 999                pr_err("implementation missing\n");
1000                return NULL;
1001        case RES_COUNTER:
1002                ret = alloc_counter_tr(id);
1003                break;
1004        case RES_XRCD:
1005                ret = alloc_xrcdn_tr(id);
1006                break;
1007        case RES_FS_RULE:
1008                ret = alloc_fs_rule_tr(id, extra);
1009                break;
1010        default:
1011                return NULL;
1012        }
1013        if (ret)
1014                ret->owner = slave;
1015
1016        return ret;
1017}
1018
1019static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1020                         enum mlx4_resource type, int extra)
1021{
1022        int i;
1023        int err;
1024        struct mlx4_priv *priv = mlx4_priv(dev);
1025        struct res_common **res_arr;
1026        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1027        struct rb_root *root = &tracker->res_tree[type];
1028
1029        res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1030        if (!res_arr)
1031                return -ENOMEM;
1032
1033        for (i = 0; i < count; ++i) {
1034                res_arr[i] = alloc_tr(base + i, type, slave, extra);
1035                if (!res_arr[i]) {
1036                        for (--i; i >= 0; --i)
1037                                kfree(res_arr[i]);
1038
1039                        kfree(res_arr);
1040                        return -ENOMEM;
1041                }
1042        }
1043
1044        spin_lock_irq(mlx4_tlock(dev));
1045        for (i = 0; i < count; ++i) {
1046                if (find_res(dev, base + i, type)) {
1047                        err = -EEXIST;
1048                        goto undo;
1049                }
1050                err = res_tracker_insert(root, res_arr[i]);
1051                if (err)
1052                        goto undo;
1053                list_add_tail(&res_arr[i]->list,
1054                              &tracker->slave_list[slave].res_list[type]);
1055        }
1056        spin_unlock_irq(mlx4_tlock(dev));
1057        kfree(res_arr);
1058
1059        return 0;
1060
1061undo:
1062        for (--i; i >= base; --i)
1063                rb_erase(&res_arr[i]->node, root);
1064
1065        spin_unlock_irq(mlx4_tlock(dev));
1066
1067        for (i = 0; i < count; ++i)
1068                kfree(res_arr[i]);
1069
1070        kfree(res_arr);
1071
1072        return err;
1073}
1074
1075static int remove_qp_ok(struct res_qp *res)
1076{
1077        if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1078            !list_empty(&res->mcg_list)) {
1079                pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1080                       res->com.state, atomic_read(&res->ref_count));
1081                return -EBUSY;
1082        } else if (res->com.state != RES_QP_RESERVED) {
1083                return -EPERM;
1084        }
1085
1086        return 0;
1087}
1088
1089static int remove_mtt_ok(struct res_mtt *res, int order)
1090{
1091        if (res->com.state == RES_MTT_BUSY ||
1092            atomic_read(&res->ref_count)) {
1093                pr_devel("%s-%d: state %s, ref_count %d\n",
1094                         __func__, __LINE__,
1095                         mtt_states_str(res->com.state),
1096                         atomic_read(&res->ref_count));
1097                return -EBUSY;
1098        } else if (res->com.state != RES_MTT_ALLOCATED)
1099                return -EPERM;
1100        else if (res->order != order)
1101                return -EINVAL;
1102
1103        return 0;
1104}
1105
1106static int remove_mpt_ok(struct res_mpt *res)
1107{
1108        if (res->com.state == RES_MPT_BUSY)
1109                return -EBUSY;
1110        else if (res->com.state != RES_MPT_RESERVED)
1111                return -EPERM;
1112
1113        return 0;
1114}
1115
1116static int remove_eq_ok(struct res_eq *res)
1117{
1118        if (res->com.state == RES_MPT_BUSY)
1119                return -EBUSY;
1120        else if (res->com.state != RES_MPT_RESERVED)
1121                return -EPERM;
1122
1123        return 0;
1124}
1125
1126static int remove_counter_ok(struct res_counter *res)
1127{
1128        if (res->com.state == RES_COUNTER_BUSY)
1129                return -EBUSY;
1130        else if (res->com.state != RES_COUNTER_ALLOCATED)
1131                return -EPERM;
1132
1133        return 0;
1134}
1135
1136static int remove_xrcdn_ok(struct res_xrcdn *res)
1137{
1138        if (res->com.state == RES_XRCD_BUSY)
1139                return -EBUSY;
1140        else if (res->com.state != RES_XRCD_ALLOCATED)
1141                return -EPERM;
1142
1143        return 0;
1144}
1145
1146static int remove_fs_rule_ok(struct res_fs_rule *res)
1147{
1148        if (res->com.state == RES_FS_RULE_BUSY)
1149                return -EBUSY;
1150        else if (res->com.state != RES_FS_RULE_ALLOCATED)
1151                return -EPERM;
1152
1153        return 0;
1154}
1155
1156static int remove_cq_ok(struct res_cq *res)
1157{
1158        if (res->com.state == RES_CQ_BUSY)
1159                return -EBUSY;
1160        else if (res->com.state != RES_CQ_ALLOCATED)
1161                return -EPERM;
1162
1163        return 0;
1164}
1165
1166static int remove_srq_ok(struct res_srq *res)
1167{
1168        if (res->com.state == RES_SRQ_BUSY)
1169                return -EBUSY;
1170        else if (res->com.state != RES_SRQ_ALLOCATED)
1171                return -EPERM;
1172
1173        return 0;
1174}
1175
1176static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1177{
1178        switch (type) {
1179        case RES_QP:
1180                return remove_qp_ok((struct res_qp *)res);
1181        case RES_CQ:
1182                return remove_cq_ok((struct res_cq *)res);
1183        case RES_SRQ:
1184                return remove_srq_ok((struct res_srq *)res);
1185        case RES_MPT:
1186                return remove_mpt_ok((struct res_mpt *)res);
1187        case RES_MTT:
1188                return remove_mtt_ok((struct res_mtt *)res, extra);
1189        case RES_MAC:
1190                return -ENOSYS;
1191        case RES_EQ:
1192                return remove_eq_ok((struct res_eq *)res);
1193        case RES_COUNTER:
1194                return remove_counter_ok((struct res_counter *)res);
1195        case RES_XRCD:
1196                return remove_xrcdn_ok((struct res_xrcdn *)res);
1197        case RES_FS_RULE:
1198                return remove_fs_rule_ok((struct res_fs_rule *)res);
1199        default:
1200                return -EINVAL;
1201        }
1202}
1203
1204static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1205                         enum mlx4_resource type, int extra)
1206{
1207        u64 i;
1208        int err;
1209        struct mlx4_priv *priv = mlx4_priv(dev);
1210        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1211        struct res_common *r;
1212
1213        spin_lock_irq(mlx4_tlock(dev));
1214        for (i = base; i < base + count; ++i) {
1215                r = res_tracker_lookup(&tracker->res_tree[type], i);
1216                if (!r) {
1217                        err = -ENOENT;
1218                        goto out;
1219                }
1220                if (r->owner != slave) {
1221                        err = -EPERM;
1222                        goto out;
1223                }
1224                err = remove_ok(r, type, extra);
1225                if (err)
1226                        goto out;
1227        }
1228
1229        for (i = base; i < base + count; ++i) {
1230                r = res_tracker_lookup(&tracker->res_tree[type], i);
1231                rb_erase(&r->node, &tracker->res_tree[type]);
1232                list_del(&r->list);
1233                kfree(r);
1234        }
1235        err = 0;
1236
1237out:
1238        spin_unlock_irq(mlx4_tlock(dev));
1239
1240        return err;
1241}
1242
1243static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1244                                enum res_qp_states state, struct res_qp **qp,
1245                                int alloc)
1246{
1247        struct mlx4_priv *priv = mlx4_priv(dev);
1248        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1249        struct res_qp *r;
1250        int err = 0;
1251
1252        spin_lock_irq(mlx4_tlock(dev));
1253        r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1254        if (!r)
1255                err = -ENOENT;
1256        else if (r->com.owner != slave)
1257                err = -EPERM;
1258        else {
1259                switch (state) {
1260                case RES_QP_BUSY:
1261                        mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1262                                 __func__, r->com.res_id);
1263                        err = -EBUSY;
1264                        break;
1265
1266                case RES_QP_RESERVED:
1267                        if (r->com.state == RES_QP_MAPPED && !alloc)
1268                                break;
1269
1270                        mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1271                        err = -EINVAL;
1272                        break;
1273
1274                case RES_QP_MAPPED:
1275                        if ((r->com.state == RES_QP_RESERVED && alloc) ||
1276                            r->com.state == RES_QP_HW)
1277                                break;
1278                        else {
1279                                mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1280                                          r->com.res_id);
1281                                err = -EINVAL;
1282                        }
1283
1284                        break;
1285
1286                case RES_QP_HW:
1287                        if (r->com.state != RES_QP_MAPPED)
1288                                err = -EINVAL;
1289                        break;
1290                default:
1291                        err = -EINVAL;
1292                }
1293
1294                if (!err) {
1295                        r->com.from_state = r->com.state;
1296                        r->com.to_state = state;
1297                        r->com.state = RES_QP_BUSY;
1298                        if (qp)
1299                                *qp = r;
1300                }
1301        }
1302
1303        spin_unlock_irq(mlx4_tlock(dev));
1304
1305        return err;
1306}
1307
1308static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1309                                enum res_mpt_states state, struct res_mpt **mpt)
1310{
1311        struct mlx4_priv *priv = mlx4_priv(dev);
1312        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1313        struct res_mpt *r;
1314        int err = 0;
1315
1316        spin_lock_irq(mlx4_tlock(dev));
1317        r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1318        if (!r)
1319                err = -ENOENT;
1320        else if (r->com.owner != slave)
1321                err = -EPERM;
1322        else {
1323                switch (state) {
1324                case RES_MPT_BUSY:
1325                        err = -EINVAL;
1326                        break;
1327
1328                case RES_MPT_RESERVED:
1329                        if (r->com.state != RES_MPT_MAPPED)
1330                                err = -EINVAL;
1331                        break;
1332
1333                case RES_MPT_MAPPED:
1334                        if (r->com.state != RES_MPT_RESERVED &&
1335                            r->com.state != RES_MPT_HW)
1336                                err = -EINVAL;
1337                        break;
1338
1339                case RES_MPT_HW:
1340                        if (r->com.state != RES_MPT_MAPPED)
1341                                err = -EINVAL;
1342                        break;
1343                default:
1344                        err = -EINVAL;
1345                }
1346
1347                if (!err) {
1348                        r->com.from_state = r->com.state;
1349                        r->com.to_state = state;
1350                        r->com.state = RES_MPT_BUSY;
1351                        if (mpt)
1352                                *mpt = r;
1353                }
1354        }
1355
1356        spin_unlock_irq(mlx4_tlock(dev));
1357
1358        return err;
1359}
1360
1361static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1362                                enum res_eq_states state, struct res_eq **eq)
1363{
1364        struct mlx4_priv *priv = mlx4_priv(dev);
1365        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1366        struct res_eq *r;
1367        int err = 0;
1368
1369        spin_lock_irq(mlx4_tlock(dev));
1370        r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1371        if (!r)
1372                err = -ENOENT;
1373        else if (r->com.owner != slave)
1374                err = -EPERM;
1375        else {
1376                switch (state) {
1377                case RES_EQ_BUSY:
1378                        err = -EINVAL;
1379                        break;
1380
1381                case RES_EQ_RESERVED:
1382                        if (r->com.state != RES_EQ_HW)
1383                                err = -EINVAL;
1384                        break;
1385
1386                case RES_EQ_HW:
1387                        if (r->com.state != RES_EQ_RESERVED)
1388                                err = -EINVAL;
1389                        break;
1390
1391                default:
1392                        err = -EINVAL;
1393                }
1394
1395                if (!err) {
1396                        r->com.from_state = r->com.state;
1397                        r->com.to_state = state;
1398                        r->com.state = RES_EQ_BUSY;
1399                        if (eq)
1400                                *eq = r;
1401                }
1402        }
1403
1404        spin_unlock_irq(mlx4_tlock(dev));
1405
1406        return err;
1407}
1408
1409static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1410                                enum res_cq_states state, struct res_cq **cq)
1411{
1412        struct mlx4_priv *priv = mlx4_priv(dev);
1413        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1414        struct res_cq *r;
1415        int err;
1416
1417        spin_lock_irq(mlx4_tlock(dev));
1418        r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1419        if (!r) {
1420                err = -ENOENT;
1421        } else if (r->com.owner != slave) {
1422                err = -EPERM;
1423        } else if (state == RES_CQ_ALLOCATED) {
1424                if (r->com.state != RES_CQ_HW)
1425                        err = -EINVAL;
1426                else if (atomic_read(&r->ref_count))
1427                        err = -EBUSY;
1428                else
1429                        err = 0;
1430        } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1431                err = -EINVAL;
1432        } else {
1433                err = 0;
1434        }
1435
1436        if (!err) {
1437                r->com.from_state = r->com.state;
1438                r->com.to_state = state;
1439                r->com.state = RES_CQ_BUSY;
1440                if (cq)
1441                        *cq = r;
1442        }
1443
1444        spin_unlock_irq(mlx4_tlock(dev));
1445
1446        return err;
1447}
1448
1449static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1450                                 enum res_srq_states state, struct res_srq **srq)
1451{
1452        struct mlx4_priv *priv = mlx4_priv(dev);
1453        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1454        struct res_srq *r;
1455        int err = 0;
1456
1457        spin_lock_irq(mlx4_tlock(dev));
1458        r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1459        if (!r) {
1460                err = -ENOENT;
1461        } else if (r->com.owner != slave) {
1462                err = -EPERM;
1463        } else if (state == RES_SRQ_ALLOCATED) {
1464                if (r->com.state != RES_SRQ_HW)
1465                        err = -EINVAL;
1466                else if (atomic_read(&r->ref_count))
1467                        err = -EBUSY;
1468        } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1469                err = -EINVAL;
1470        }
1471
1472        if (!err) {
1473                r->com.from_state = r->com.state;
1474                r->com.to_state = state;
1475                r->com.state = RES_SRQ_BUSY;
1476                if (srq)
1477                        *srq = r;
1478        }
1479
1480        spin_unlock_irq(mlx4_tlock(dev));
1481
1482        return err;
1483}
1484
1485static void res_abort_move(struct mlx4_dev *dev, int slave,
1486                           enum mlx4_resource type, int id)
1487{
1488        struct mlx4_priv *priv = mlx4_priv(dev);
1489        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1490        struct res_common *r;
1491
1492        spin_lock_irq(mlx4_tlock(dev));
1493        r = res_tracker_lookup(&tracker->res_tree[type], id);
1494        if (r && (r->owner == slave))
1495                r->state = r->from_state;
1496        spin_unlock_irq(mlx4_tlock(dev));
1497}
1498
1499static void res_end_move(struct mlx4_dev *dev, int slave,
1500                         enum mlx4_resource type, int id)
1501{
1502        struct mlx4_priv *priv = mlx4_priv(dev);
1503        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1504        struct res_common *r;
1505
1506        spin_lock_irq(mlx4_tlock(dev));
1507        r = res_tracker_lookup(&tracker->res_tree[type], id);
1508        if (r && (r->owner == slave))
1509                r->state = r->to_state;
1510        spin_unlock_irq(mlx4_tlock(dev));
1511}
1512
1513static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1514{
1515        return mlx4_is_qp_reserved(dev, qpn) &&
1516                (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1517}
1518
1519static int fw_reserved(struct mlx4_dev *dev, int qpn)
1520{
1521        return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1522}
1523
1524static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1525                        u64 in_param, u64 *out_param)
1526{
1527        int err;
1528        int count;
1529        int align;
1530        int base;
1531        int qpn;
1532
1533        switch (op) {
1534        case RES_OP_RESERVE:
1535                count = get_param_l(&in_param);
1536                align = get_param_h(&in_param);
1537                err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1538                if (err)
1539                        return err;
1540
1541                err = __mlx4_qp_reserve_range(dev, count, align, &base);
1542                if (err) {
1543                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1544                        return err;
1545                }
1546
1547                err = add_res_range(dev, slave, base, count, RES_QP, 0);
1548                if (err) {
1549                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1550                        __mlx4_qp_release_range(dev, base, count);
1551                        return err;
1552                }
1553                set_param_l(out_param, base);
1554                break;
1555        case RES_OP_MAP_ICM:
1556                qpn = get_param_l(&in_param) & 0x7fffff;
1557                if (valid_reserved(dev, slave, qpn)) {
1558                        err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1559                        if (err)
1560                                return err;
1561                }
1562
1563                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1564                                           NULL, 1);
1565                if (err)
1566                        return err;
1567
1568                if (!fw_reserved(dev, qpn)) {
1569                        err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1570                        if (err) {
1571                                res_abort_move(dev, slave, RES_QP, qpn);
1572                                return err;
1573                        }
1574                }
1575
1576                res_end_move(dev, slave, RES_QP, qpn);
1577                break;
1578
1579        default:
1580                err = -EINVAL;
1581                break;
1582        }
1583        return err;
1584}
1585
1586static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1587                         u64 in_param, u64 *out_param)
1588{
1589        int err = -EINVAL;
1590        int base;
1591        int order;
1592
1593        if (op != RES_OP_RESERVE_AND_MAP)
1594                return err;
1595
1596        order = get_param_l(&in_param);
1597
1598        err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1599        if (err)
1600                return err;
1601
1602        base = __mlx4_alloc_mtt_range(dev, order);
1603        if (base == -1) {
1604                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1605                return -ENOMEM;
1606        }
1607
1608        err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1609        if (err) {
1610                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1611                __mlx4_free_mtt_range(dev, base, order);
1612        } else {
1613                set_param_l(out_param, base);
1614        }
1615
1616        return err;
1617}
1618
1619static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1620                         u64 in_param, u64 *out_param)
1621{
1622        int err = -EINVAL;
1623        int index;
1624        int id;
1625        struct res_mpt *mpt;
1626
1627        switch (op) {
1628        case RES_OP_RESERVE:
1629                err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1630                if (err)
1631                        break;
1632
1633                index = __mlx4_mpt_reserve(dev);
1634                if (index == -1) {
1635                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1636                        break;
1637                }
1638                id = index & mpt_mask(dev);
1639
1640                err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1641                if (err) {
1642                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1643                        __mlx4_mpt_release(dev, index);
1644                        break;
1645                }
1646                set_param_l(out_param, index);
1647                break;
1648        case RES_OP_MAP_ICM:
1649                index = get_param_l(&in_param);
1650                id = index & mpt_mask(dev);
1651                err = mr_res_start_move_to(dev, slave, id,
1652                                           RES_MPT_MAPPED, &mpt);
1653                if (err)
1654                        return err;
1655
1656                err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1657                if (err) {
1658                        res_abort_move(dev, slave, RES_MPT, id);
1659                        return err;
1660                }
1661
1662                res_end_move(dev, slave, RES_MPT, id);
1663                break;
1664        }
1665        return err;
1666}
1667
1668static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1669                        u64 in_param, u64 *out_param)
1670{
1671        int cqn;
1672        int err;
1673
1674        switch (op) {
1675        case RES_OP_RESERVE_AND_MAP:
1676                err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1677                if (err)
1678                        break;
1679
1680                err = __mlx4_cq_alloc_icm(dev, &cqn);
1681                if (err) {
1682                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1683                        break;
1684                }
1685
1686                err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1687                if (err) {
1688                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1689                        __mlx4_cq_free_icm(dev, cqn);
1690                        break;
1691                }
1692
1693                set_param_l(out_param, cqn);
1694                break;
1695
1696        default:
1697                err = -EINVAL;
1698        }
1699
1700        return err;
1701}
1702
1703static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1704                         u64 in_param, u64 *out_param)
1705{
1706        int srqn;
1707        int err;
1708
1709        switch (op) {
1710        case RES_OP_RESERVE_AND_MAP:
1711                err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1712                if (err)
1713                        break;
1714
1715                err = __mlx4_srq_alloc_icm(dev, &srqn);
1716                if (err) {
1717                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1718                        break;
1719                }
1720
1721                err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1722                if (err) {
1723                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1724                        __mlx4_srq_free_icm(dev, srqn);
1725                        break;
1726                }
1727
1728                set_param_l(out_param, srqn);
1729                break;
1730
1731        default:
1732                err = -EINVAL;
1733        }
1734
1735        return err;
1736}
1737
1738static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1739                                     u8 smac_index, u64 *mac)
1740{
1741        struct mlx4_priv *priv = mlx4_priv(dev);
1742        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1743        struct list_head *mac_list =
1744                &tracker->slave_list[slave].res_list[RES_MAC];
1745        struct mac_res *res, *tmp;
1746
1747        list_for_each_entry_safe(res, tmp, mac_list, list) {
1748                if (res->smac_index == smac_index && res->port == (u8) port) {
1749                        *mac = res->mac;
1750                        return 0;
1751                }
1752        }
1753        return -ENOENT;
1754}
1755
1756static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1757{
1758        struct mlx4_priv *priv = mlx4_priv(dev);
1759        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1760        struct list_head *mac_list =
1761                &tracker->slave_list[slave].res_list[RES_MAC];
1762        struct mac_res *res, *tmp;
1763
1764        list_for_each_entry_safe(res, tmp, mac_list, list) {
1765                if (res->mac == mac && res->port == (u8) port) {
1766                        /* mac found. update ref count */
1767                        ++res->ref_count;
1768                        return 0;
1769                }
1770        }
1771
1772        if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1773                return -EINVAL;
1774        res = kzalloc(sizeof *res, GFP_KERNEL);
1775        if (!res) {
1776                mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1777                return -ENOMEM;
1778        }
1779        res->mac = mac;
1780        res->port = (u8) port;
1781        res->smac_index = smac_index;
1782        res->ref_count = 1;
1783        list_add_tail(&res->list,
1784                      &tracker->slave_list[slave].res_list[RES_MAC]);
1785        return 0;
1786}
1787
1788static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1789                               int port)
1790{
1791        struct mlx4_priv *priv = mlx4_priv(dev);
1792        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1793        struct list_head *mac_list =
1794                &tracker->slave_list[slave].res_list[RES_MAC];
1795        struct mac_res *res, *tmp;
1796
1797        list_for_each_entry_safe(res, tmp, mac_list, list) {
1798                if (res->mac == mac && res->port == (u8) port) {
1799                        if (!--res->ref_count) {
1800                                list_del(&res->list);
1801                                mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1802                                kfree(res);
1803                        }
1804                        break;
1805                }
1806        }
1807}
1808
1809static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1810{
1811        struct mlx4_priv *priv = mlx4_priv(dev);
1812        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1813        struct list_head *mac_list =
1814                &tracker->slave_list[slave].res_list[RES_MAC];
1815        struct mac_res *res, *tmp;
1816        int i;
1817
1818        list_for_each_entry_safe(res, tmp, mac_list, list) {
1819                list_del(&res->list);
1820                /* dereference the mac the num times the slave referenced it */
1821                for (i = 0; i < res->ref_count; i++)
1822                        __mlx4_unregister_mac(dev, res->port, res->mac);
1823                mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1824                kfree(res);
1825        }
1826}
1827
1828static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1829                         u64 in_param, u64 *out_param, int in_port)
1830{
1831        int err = -EINVAL;
1832        int port;
1833        u64 mac;
1834        u8 smac_index;
1835
1836        if (op != RES_OP_RESERVE_AND_MAP)
1837                return err;
1838
1839        port = !in_port ? get_param_l(out_param) : in_port;
1840        port = mlx4_slave_convert_port(
1841                        dev, slave, port);
1842
1843        if (port < 0)
1844                return -EINVAL;
1845        mac = in_param;
1846
1847        err = __mlx4_register_mac(dev, port, mac);
1848        if (err >= 0) {
1849                smac_index = err;
1850                set_param_l(out_param, err);
1851                err = 0;
1852        }
1853
1854        if (!err) {
1855                err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1856                if (err)
1857                        __mlx4_unregister_mac(dev, port, mac);
1858        }
1859        return err;
1860}
1861
1862static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1863                             int port, int vlan_index)
1864{
1865        struct mlx4_priv *priv = mlx4_priv(dev);
1866        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1867        struct list_head *vlan_list =
1868                &tracker->slave_list[slave].res_list[RES_VLAN];
1869        struct vlan_res *res, *tmp;
1870
1871        list_for_each_entry_safe(res, tmp, vlan_list, list) {
1872                if (res->vlan == vlan && res->port == (u8) port) {
1873                        /* vlan found. update ref count */
1874                        ++res->ref_count;
1875                        return 0;
1876                }
1877        }
1878
1879        if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1880                return -EINVAL;
1881        res = kzalloc(sizeof(*res), GFP_KERNEL);
1882        if (!res) {
1883                mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1884                return -ENOMEM;
1885        }
1886        res->vlan = vlan;
1887        res->port = (u8) port;
1888        res->vlan_index = vlan_index;
1889        res->ref_count = 1;
1890        list_add_tail(&res->list,
1891                      &tracker->slave_list[slave].res_list[RES_VLAN]);
1892        return 0;
1893}
1894
1895
1896static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1897                                int port)
1898{
1899        struct mlx4_priv *priv = mlx4_priv(dev);
1900        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1901        struct list_head *vlan_list =
1902                &tracker->slave_list[slave].res_list[RES_VLAN];
1903        struct vlan_res *res, *tmp;
1904
1905        list_for_each_entry_safe(res, tmp, vlan_list, list) {
1906                if (res->vlan == vlan && res->port == (u8) port) {
1907                        if (!--res->ref_count) {
1908                                list_del(&res->list);
1909                                mlx4_release_resource(dev, slave, RES_VLAN,
1910                                                      1, port);
1911                                kfree(res);
1912                        }
1913                        break;
1914                }
1915        }
1916}
1917
1918static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1919{
1920        struct mlx4_priv *priv = mlx4_priv(dev);
1921        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1922        struct list_head *vlan_list =
1923                &tracker->slave_list[slave].res_list[RES_VLAN];
1924        struct vlan_res *res, *tmp;
1925        int i;
1926
1927        list_for_each_entry_safe(res, tmp, vlan_list, list) {
1928                list_del(&res->list);
1929                /* dereference the vlan the num times the slave referenced it */
1930                for (i = 0; i < res->ref_count; i++)
1931                        __mlx4_unregister_vlan(dev, res->port, res->vlan);
1932                mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1933                kfree(res);
1934        }
1935}
1936
1937static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1938                          u64 in_param, u64 *out_param, int in_port)
1939{
1940        struct mlx4_priv *priv = mlx4_priv(dev);
1941        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1942        int err;
1943        u16 vlan;
1944        int vlan_index;
1945        int port;
1946
1947        port = !in_port ? get_param_l(out_param) : in_port;
1948
1949        if (!port || op != RES_OP_RESERVE_AND_MAP)
1950                return -EINVAL;
1951
1952        port = mlx4_slave_convert_port(
1953                        dev, slave, port);
1954
1955        if (port < 0)
1956                return -EINVAL;
1957        /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1958        if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1959                slave_state[slave].old_vlan_api = true;
1960                return 0;
1961        }
1962
1963        vlan = (u16) in_param;
1964
1965        err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1966        if (!err) {
1967                set_param_l(out_param, (u32) vlan_index);
1968                err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1969                if (err)
1970                        __mlx4_unregister_vlan(dev, port, vlan);
1971        }
1972        return err;
1973}
1974
1975static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1976                             u64 in_param, u64 *out_param)
1977{
1978        u32 index;
1979        int err;
1980
1981        if (op != RES_OP_RESERVE)
1982                return -EINVAL;
1983
1984        err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
1985        if (err)
1986                return err;
1987
1988        err = __mlx4_counter_alloc(dev, &index);
1989        if (err) {
1990                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1991                return err;
1992        }
1993
1994        err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1995        if (err) {
1996                __mlx4_counter_free(dev, index);
1997                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
1998        } else {
1999                set_param_l(out_param, index);
2000        }
2001
2002        return err;
2003}
2004
2005static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2006                           u64 in_param, u64 *out_param)
2007{
2008        u32 xrcdn;
2009        int err;
2010
2011        if (op != RES_OP_RESERVE)
2012                return -EINVAL;
2013
2014        err = __mlx4_xrcd_alloc(dev, &xrcdn);
2015        if (err)
2016                return err;
2017
2018        err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2019        if (err)
2020                __mlx4_xrcd_free(dev, xrcdn);
2021        else
2022                set_param_l(out_param, xrcdn);
2023
2024        return err;
2025}
2026
2027int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2028                           struct mlx4_vhcr *vhcr,
2029                           struct mlx4_cmd_mailbox *inbox,
2030                           struct mlx4_cmd_mailbox *outbox,
2031                           struct mlx4_cmd_info *cmd)
2032{
2033        int err;
2034        int alop = vhcr->op_modifier;
2035
2036        switch (vhcr->in_modifier & 0xFF) {
2037        case RES_QP:
2038                err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2039                                   vhcr->in_param, &vhcr->out_param);
2040                break;
2041
2042        case RES_MTT:
2043                err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2044                                    vhcr->in_param, &vhcr->out_param);
2045                break;
2046
2047        case RES_MPT:
2048                err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2049                                    vhcr->in_param, &vhcr->out_param);
2050                break;
2051
2052        case RES_CQ:
2053                err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2054                                   vhcr->in_param, &vhcr->out_param);
2055                break;
2056
2057        case RES_SRQ:
2058                err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2059                                    vhcr->in_param, &vhcr->out_param);
2060                break;
2061
2062        case RES_MAC:
2063                err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2064                                    vhcr->in_param, &vhcr->out_param,
2065                                    (vhcr->in_modifier >> 8) & 0xFF);
2066                break;
2067
2068        case RES_VLAN:
2069                err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2070                                     vhcr->in_param, &vhcr->out_param,
2071                                     (vhcr->in_modifier >> 8) & 0xFF);
2072                break;
2073
2074        case RES_COUNTER:
2075                err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2076                                        vhcr->in_param, &vhcr->out_param);
2077                break;
2078
2079        case RES_XRCD:
2080                err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2081                                      vhcr->in_param, &vhcr->out_param);
2082                break;
2083
2084        default:
2085                err = -EINVAL;
2086                break;
2087        }
2088
2089        return err;
2090}
2091
2092static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2093                       u64 in_param)
2094{
2095        int err;
2096        int count;
2097        int base;
2098        int qpn;
2099
2100        switch (op) {
2101        case RES_OP_RESERVE:
2102                base = get_param_l(&in_param) & 0x7fffff;
2103                count = get_param_h(&in_param);
2104                err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2105                if (err)
2106                        break;
2107                mlx4_release_resource(dev, slave, RES_QP, count, 0);
2108                __mlx4_qp_release_range(dev, base, count);
2109                break;
2110        case RES_OP_MAP_ICM:
2111                qpn = get_param_l(&in_param) & 0x7fffff;
2112                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2113                                           NULL, 0);
2114                if (err)
2115                        return err;
2116
2117                if (!fw_reserved(dev, qpn))
2118                        __mlx4_qp_free_icm(dev, qpn);
2119
2120                res_end_move(dev, slave, RES_QP, qpn);
2121
2122                if (valid_reserved(dev, slave, qpn))
2123                        err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2124                break;
2125        default:
2126                err = -EINVAL;
2127                break;
2128        }
2129        return err;
2130}
2131
2132static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2133                        u64 in_param, u64 *out_param)
2134{
2135        int err = -EINVAL;
2136        int base;
2137        int order;
2138
2139        if (op != RES_OP_RESERVE_AND_MAP)
2140                return err;
2141
2142        base = get_param_l(&in_param);
2143        order = get_param_h(&in_param);
2144        err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2145        if (!err) {
2146                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2147                __mlx4_free_mtt_range(dev, base, order);
2148        }
2149        return err;
2150}
2151
2152static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2153                        u64 in_param)
2154{
2155        int err = -EINVAL;
2156        int index;
2157        int id;
2158        struct res_mpt *mpt;
2159
2160        switch (op) {
2161        case RES_OP_RESERVE:
2162                index = get_param_l(&in_param);
2163                id = index & mpt_mask(dev);
2164                err = get_res(dev, slave, id, RES_MPT, &mpt);
2165                if (err)
2166                        break;
2167                index = mpt->key;
2168                put_res(dev, slave, id, RES_MPT);
2169
2170                err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2171                if (err)
2172                        break;
2173                mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2174                __mlx4_mpt_release(dev, index);
2175                break;
2176        case RES_OP_MAP_ICM:
2177                        index = get_param_l(&in_param);
2178                        id = index & mpt_mask(dev);
2179                        err = mr_res_start_move_to(dev, slave, id,
2180                                                   RES_MPT_RESERVED, &mpt);
2181                        if (err)
2182                                return err;
2183
2184                        __mlx4_mpt_free_icm(dev, mpt->key);
2185                        res_end_move(dev, slave, RES_MPT, id);
2186                        return err;
2187                break;
2188        default:
2189                err = -EINVAL;
2190                break;
2191        }
2192        return err;
2193}
2194
2195static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2196                       u64 in_param, u64 *out_param)
2197{
2198        int cqn;
2199        int err;
2200
2201        switch (op) {
2202        case RES_OP_RESERVE_AND_MAP:
2203                cqn = get_param_l(&in_param);
2204                err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2205                if (err)
2206                        break;
2207
2208                mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2209                __mlx4_cq_free_icm(dev, cqn);
2210                break;
2211
2212        default:
2213                err = -EINVAL;
2214                break;
2215        }
2216
2217        return err;
2218}
2219
2220static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2221                        u64 in_param, u64 *out_param)
2222{
2223        int srqn;
2224        int err;
2225
2226        switch (op) {
2227        case RES_OP_RESERVE_AND_MAP:
2228                srqn = get_param_l(&in_param);
2229                err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2230                if (err)
2231                        break;
2232
2233                mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2234                __mlx4_srq_free_icm(dev, srqn);
2235                break;
2236
2237        default:
2238                err = -EINVAL;
2239                break;
2240        }
2241
2242        return err;
2243}
2244
2245static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2246                            u64 in_param, u64 *out_param, int in_port)
2247{
2248        int port;
2249        int err = 0;
2250
2251        switch (op) {
2252        case RES_OP_RESERVE_AND_MAP:
2253                port = !in_port ? get_param_l(out_param) : in_port;
2254                port = mlx4_slave_convert_port(
2255                                dev, slave, port);
2256
2257                if (port < 0)
2258                        return -EINVAL;
2259                mac_del_from_slave(dev, slave, in_param, port);
2260                __mlx4_unregister_mac(dev, port, in_param);
2261                break;
2262        default:
2263                err = -EINVAL;
2264                break;
2265        }
2266
2267        return err;
2268
2269}
2270
2271static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2272                            u64 in_param, u64 *out_param, int port)
2273{
2274        struct mlx4_priv *priv = mlx4_priv(dev);
2275        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2276        int err = 0;
2277
2278        port = mlx4_slave_convert_port(
2279                        dev, slave, port);
2280
2281        if (port < 0)
2282                return -EINVAL;
2283        switch (op) {
2284        case RES_OP_RESERVE_AND_MAP:
2285                if (slave_state[slave].old_vlan_api)
2286                        return 0;
2287                if (!port)
2288                        return -EINVAL;
2289                vlan_del_from_slave(dev, slave, in_param, port);
2290                __mlx4_unregister_vlan(dev, port, in_param);
2291                break;
2292        default:
2293                err = -EINVAL;
2294                break;
2295        }
2296
2297        return err;
2298}
2299
2300static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2301                            u64 in_param, u64 *out_param)
2302{
2303        int index;
2304        int err;
2305
2306        if (op != RES_OP_RESERVE)
2307                return -EINVAL;
2308
2309        index = get_param_l(&in_param);
2310        err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2311        if (err)
2312                return err;
2313
2314        __mlx4_counter_free(dev, index);
2315        mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2316
2317        return err;
2318}
2319
2320static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2321                          u64 in_param, u64 *out_param)
2322{
2323        int xrcdn;
2324        int err;
2325
2326        if (op != RES_OP_RESERVE)
2327                return -EINVAL;
2328
2329        xrcdn = get_param_l(&in_param);
2330        err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2331        if (err)
2332                return err;
2333
2334        __mlx4_xrcd_free(dev, xrcdn);
2335
2336        return err;
2337}
2338
2339int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2340                          struct mlx4_vhcr *vhcr,
2341                          struct mlx4_cmd_mailbox *inbox,
2342                          struct mlx4_cmd_mailbox *outbox,
2343                          struct mlx4_cmd_info *cmd)
2344{
2345        int err = -EINVAL;
2346        int alop = vhcr->op_modifier;
2347
2348        switch (vhcr->in_modifier & 0xFF) {
2349        case RES_QP:
2350                err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2351                                  vhcr->in_param);
2352                break;
2353
2354        case RES_MTT:
2355                err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2356                                   vhcr->in_param, &vhcr->out_param);
2357                break;
2358
2359        case RES_MPT:
2360                err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2361                                   vhcr->in_param);
2362                break;
2363
2364        case RES_CQ:
2365                err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2366                                  vhcr->in_param, &vhcr->out_param);
2367                break;
2368
2369        case RES_SRQ:
2370                err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2371                                   vhcr->in_param, &vhcr->out_param);
2372                break;
2373
2374        case RES_MAC:
2375                err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2376                                   vhcr->in_param, &vhcr->out_param,
2377                                   (vhcr->in_modifier >> 8) & 0xFF);
2378                break;
2379
2380        case RES_VLAN:
2381                err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2382                                    vhcr->in_param, &vhcr->out_param,
2383                                    (vhcr->in_modifier >> 8) & 0xFF);
2384                break;
2385
2386        case RES_COUNTER:
2387                err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2388                                       vhcr->in_param, &vhcr->out_param);
2389                break;
2390
2391        case RES_XRCD:
2392                err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2393                                     vhcr->in_param, &vhcr->out_param);
2394
2395        default:
2396                break;
2397        }
2398        return err;
2399}
2400
2401/* ugly but other choices are uglier */
2402static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2403{
2404        return (be32_to_cpu(mpt->flags) >> 9) & 1;
2405}
2406
2407static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2408{
2409        return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2410}
2411
2412static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2413{
2414        return be32_to_cpu(mpt->mtt_sz);
2415}
2416
2417static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2418{
2419        return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2420}
2421
2422static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2423{
2424        return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2425}
2426
2427static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2428{
2429        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2430}
2431
2432static int mr_is_region(struct mlx4_mpt_entry *mpt)
2433{
2434        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2435}
2436
2437static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2438{
2439        return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2440}
2441
2442static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2443{
2444        return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2445}
2446
2447static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2448{
2449        int page_shift = (qpc->log_page_size & 0x3f) + 12;
2450        int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2451        int log_sq_sride = qpc->sq_size_stride & 7;
2452        int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2453        int log_rq_stride = qpc->rq_size_stride & 7;
2454        int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2455        int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2456        u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2457        int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2458        int sq_size;
2459        int rq_size;
2460        int total_pages;
2461        int total_mem;
2462        int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2463
2464        sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2465        rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2466        total_mem = sq_size + rq_size;
2467        total_pages =
2468                roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2469                                   page_shift);
2470
2471        return total_pages;
2472}
2473
2474static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2475                           int size, struct res_mtt *mtt)
2476{
2477        int res_start = mtt->com.res_id;
2478        int res_size = (1 << mtt->order);
2479
2480        if (start < res_start || start + size > res_start + res_size)
2481                return -EPERM;
2482        return 0;
2483}
2484
2485int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2486                           struct mlx4_vhcr *vhcr,
2487                           struct mlx4_cmd_mailbox *inbox,
2488                           struct mlx4_cmd_mailbox *outbox,
2489                           struct mlx4_cmd_info *cmd)
2490{
2491        int err;
2492        int index = vhcr->in_modifier;
2493        struct res_mtt *mtt;
2494        struct res_mpt *mpt;
2495        int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2496        int phys;
2497        int id;
2498        u32 pd;
2499        int pd_slave;
2500
2501        id = index & mpt_mask(dev);
2502        err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2503        if (err)
2504                return err;
2505
2506        /* Disable memory windows for VFs. */
2507        if (!mr_is_region(inbox->buf)) {
2508                err = -EPERM;
2509                goto ex_abort;
2510        }
2511
2512        /* Make sure that the PD bits related to the slave id are zeros. */
2513        pd = mr_get_pd(inbox->buf);
2514        pd_slave = (pd >> 17) & 0x7f;
2515        if (pd_slave != 0 && pd_slave != slave) {
2516                err = -EPERM;
2517                goto ex_abort;
2518        }
2519
2520        if (mr_is_fmr(inbox->buf)) {
2521                /* FMR and Bind Enable are forbidden in slave devices. */
2522                if (mr_is_bind_enabled(inbox->buf)) {
2523                        err = -EPERM;
2524                        goto ex_abort;
2525                }
2526                /* FMR and Memory Windows are also forbidden. */
2527                if (!mr_is_region(inbox->buf)) {
2528                        err = -EPERM;
2529                        goto ex_abort;
2530                }
2531        }
2532
2533        phys = mr_phys_mpt(inbox->buf);
2534        if (!phys) {
2535                err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2536                if (err)
2537                        goto ex_abort;
2538
2539                err = check_mtt_range(dev, slave, mtt_base,
2540                                      mr_get_mtt_size(inbox->buf), mtt);
2541                if (err)
2542                        goto ex_put;
2543
2544                mpt->mtt = mtt;
2545        }
2546
2547        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2548        if (err)
2549                goto ex_put;
2550
2551        if (!phys) {
2552                atomic_inc(&mtt->ref_count);
2553                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2554        }
2555
2556        res_end_move(dev, slave, RES_MPT, id);
2557        return 0;
2558
2559ex_put:
2560        if (!phys)
2561                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2562ex_abort:
2563        res_abort_move(dev, slave, RES_MPT, id);
2564
2565        return err;
2566}
2567
2568int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2569                           struct mlx4_vhcr *vhcr,
2570                           struct mlx4_cmd_mailbox *inbox,
2571                           struct mlx4_cmd_mailbox *outbox,
2572                           struct mlx4_cmd_info *cmd)
2573{
2574        int err;
2575        int index = vhcr->in_modifier;
2576        struct res_mpt *mpt;
2577        int id;
2578
2579        id = index & mpt_mask(dev);
2580        err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2581        if (err)
2582                return err;
2583
2584        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2585        if (err)
2586                goto ex_abort;
2587
2588        if (mpt->mtt)
2589                atomic_dec(&mpt->mtt->ref_count);
2590
2591        res_end_move(dev, slave, RES_MPT, id);
2592        return 0;
2593
2594ex_abort:
2595        res_abort_move(dev, slave, RES_MPT, id);
2596
2597        return err;
2598}
2599
2600int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2601                           struct mlx4_vhcr *vhcr,
2602                           struct mlx4_cmd_mailbox *inbox,
2603                           struct mlx4_cmd_mailbox *outbox,
2604                           struct mlx4_cmd_info *cmd)
2605{
2606        int err;
2607        int index = vhcr->in_modifier;
2608        struct res_mpt *mpt;
2609        int id;
2610
2611        id = index & mpt_mask(dev);
2612        err = get_res(dev, slave, id, RES_MPT, &mpt);
2613        if (err)
2614                return err;
2615
2616        if (mpt->com.from_state != RES_MPT_HW) {
2617                err = -EBUSY;
2618                goto out;
2619        }
2620
2621        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2622
2623out:
2624        put_res(dev, slave, id, RES_MPT);
2625        return err;
2626}
2627
2628static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2629{
2630        return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2631}
2632
2633static int qp_get_scqn(struct mlx4_qp_context *qpc)
2634{
2635        return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2636}
2637
2638static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2639{
2640        return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2641}
2642
2643static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2644                                  struct mlx4_qp_context *context)
2645{
2646        u32 qpn = vhcr->in_modifier & 0xffffff;
2647        u32 qkey = 0;
2648
2649        if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2650                return;
2651
2652        /* adjust qkey in qp context */
2653        context->qkey = cpu_to_be32(qkey);
2654}
2655
2656int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2657                             struct mlx4_vhcr *vhcr,
2658                             struct mlx4_cmd_mailbox *inbox,
2659                             struct mlx4_cmd_mailbox *outbox,
2660                             struct mlx4_cmd_info *cmd)
2661{
2662        int err;
2663        int qpn = vhcr->in_modifier & 0x7fffff;
2664        struct res_mtt *mtt;
2665        struct res_qp *qp;
2666        struct mlx4_qp_context *qpc = inbox->buf + 8;
2667        int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2668        int mtt_size = qp_get_mtt_size(qpc);
2669        struct res_cq *rcq;
2670        struct res_cq *scq;
2671        int rcqn = qp_get_rcqn(qpc);
2672        int scqn = qp_get_scqn(qpc);
2673        u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2674        int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2675        struct res_srq *srq;
2676        int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2677
2678        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2679        if (err)
2680                return err;
2681        qp->local_qpn = local_qpn;
2682        qp->sched_queue = 0;
2683        qp->param3 = 0;
2684        qp->vlan_control = 0;
2685        qp->fvl_rx = 0;
2686        qp->pri_path_fl = 0;
2687        qp->vlan_index = 0;
2688        qp->feup = 0;
2689        qp->qpc_flags = be32_to_cpu(qpc->flags);
2690
2691        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2692        if (err)
2693                goto ex_abort;
2694
2695        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2696        if (err)
2697                goto ex_put_mtt;
2698
2699        err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2700        if (err)
2701                goto ex_put_mtt;
2702
2703        if (scqn != rcqn) {
2704                err = get_res(dev, slave, scqn, RES_CQ, &scq);
2705                if (err)
2706                        goto ex_put_rcq;
2707        } else
2708                scq = rcq;
2709
2710        if (use_srq) {
2711                err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2712                if (err)
2713                        goto ex_put_scq;
2714        }
2715
2716        adjust_proxy_tun_qkey(dev, vhcr, qpc);
2717        update_pkey_index(dev, slave, inbox);
2718        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2719        if (err)
2720                goto ex_put_srq;
2721        atomic_inc(&mtt->ref_count);
2722        qp->mtt = mtt;
2723        atomic_inc(&rcq->ref_count);
2724        qp->rcq = rcq;
2725        atomic_inc(&scq->ref_count);
2726        qp->scq = scq;
2727
2728        if (scqn != rcqn)
2729                put_res(dev, slave, scqn, RES_CQ);
2730
2731        if (use_srq) {
2732                atomic_inc(&srq->ref_count);
2733                put_res(dev, slave, srqn, RES_SRQ);
2734                qp->srq = srq;
2735        }
2736        put_res(dev, slave, rcqn, RES_CQ);
2737        put_res(dev, slave, mtt_base, RES_MTT);
2738        res_end_move(dev, slave, RES_QP, qpn);
2739
2740        return 0;
2741
2742ex_put_srq:
2743        if (use_srq)
2744                put_res(dev, slave, srqn, RES_SRQ);
2745ex_put_scq:
2746        if (scqn != rcqn)
2747                put_res(dev, slave, scqn, RES_CQ);
2748ex_put_rcq:
2749        put_res(dev, slave, rcqn, RES_CQ);
2750ex_put_mtt:
2751        put_res(dev, slave, mtt_base, RES_MTT);
2752ex_abort:
2753        res_abort_move(dev, slave, RES_QP, qpn);
2754
2755        return err;
2756}
2757
2758static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2759{
2760        return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2761}
2762
2763static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2764{
2765        int log_eq_size = eqc->log_eq_size & 0x1f;
2766        int page_shift = (eqc->log_page_size & 0x3f) + 12;
2767
2768        if (log_eq_size + 5 < page_shift)
2769                return 1;
2770
2771        return 1 << (log_eq_size + 5 - page_shift);
2772}
2773
2774static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2775{
2776        return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2777}
2778
2779static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2780{
2781        int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2782        int page_shift = (cqc->log_page_size & 0x3f) + 12;
2783
2784        if (log_cq_size + 5 < page_shift)
2785                return 1;
2786
2787        return 1 << (log_cq_size + 5 - page_shift);
2788}
2789
2790int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2791                          struct mlx4_vhcr *vhcr,
2792                          struct mlx4_cmd_mailbox *inbox,
2793                          struct mlx4_cmd_mailbox *outbox,
2794                          struct mlx4_cmd_info *cmd)
2795{
2796        int err;
2797        int eqn = vhcr->in_modifier;
2798        int res_id = (slave << 8) | eqn;
2799        struct mlx4_eq_context *eqc = inbox->buf;
2800        int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2801        int mtt_size = eq_get_mtt_size(eqc);
2802        struct res_eq *eq;
2803        struct res_mtt *mtt;
2804
2805        err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2806        if (err)
2807                return err;
2808        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2809        if (err)
2810                goto out_add;
2811
2812        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2813        if (err)
2814                goto out_move;
2815
2816        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2817        if (err)
2818                goto out_put;
2819
2820        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2821        if (err)
2822                goto out_put;
2823
2824        atomic_inc(&mtt->ref_count);
2825        eq->mtt = mtt;
2826        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2827        res_end_move(dev, slave, RES_EQ, res_id);
2828        return 0;
2829
2830out_put:
2831        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2832out_move:
2833        res_abort_move(dev, slave, RES_EQ, res_id);
2834out_add:
2835        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2836        return err;
2837}
2838
2839static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2840                              int len, struct res_mtt **res)
2841{
2842        struct mlx4_priv *priv = mlx4_priv(dev);
2843        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2844        struct res_mtt *mtt;
2845        int err = -EINVAL;
2846
2847        spin_lock_irq(mlx4_tlock(dev));
2848        list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2849                            com.list) {
2850                if (!check_mtt_range(dev, slave, start, len, mtt)) {
2851                        *res = mtt;
2852                        mtt->com.from_state = mtt->com.state;
2853                        mtt->com.state = RES_MTT_BUSY;
2854                        err = 0;
2855                        break;
2856                }
2857        }
2858        spin_unlock_irq(mlx4_tlock(dev));
2859
2860        return err;
2861}
2862
2863static int verify_qp_parameters(struct mlx4_dev *dev,
2864                                struct mlx4_vhcr *vhcr,
2865                                struct mlx4_cmd_mailbox *inbox,
2866                                enum qp_transition transition, u8 slave)
2867{
2868        u32                     qp_type;
2869        u32                     qpn;
2870        struct mlx4_qp_context  *qp_ctx;
2871        enum mlx4_qp_optpar     optpar;
2872        int port;
2873        int num_gids;
2874
2875        qp_ctx  = inbox->buf + 8;
2876        qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2877        optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2878
2879        switch (qp_type) {
2880        case MLX4_QP_ST_RC:
2881        case MLX4_QP_ST_XRC:
2882        case MLX4_QP_ST_UC:
2883                switch (transition) {
2884                case QP_TRANS_INIT2RTR:
2885                case QP_TRANS_RTR2RTS:
2886                case QP_TRANS_RTS2RTS:
2887                case QP_TRANS_SQD2SQD:
2888                case QP_TRANS_SQD2RTS:
2889                        if (slave != mlx4_master_func_num(dev))
2890                                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2891                                        port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2892                                        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2893                                                num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2894                                        else
2895                                                num_gids = 1;
2896                                        if (qp_ctx->pri_path.mgid_index >= num_gids)
2897                                                return -EINVAL;
2898                                }
2899                                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2900                                        port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2901                                        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2902                                                num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2903                                        else
2904                                                num_gids = 1;
2905                                        if (qp_ctx->alt_path.mgid_index >= num_gids)
2906                                                return -EINVAL;
2907                                }
2908                        break;
2909                default:
2910                        break;
2911                }
2912                break;
2913
2914        case MLX4_QP_ST_MLX:
2915                qpn = vhcr->in_modifier & 0x7fffff;
2916                port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2917                if (transition == QP_TRANS_INIT2RTR &&
2918                    slave != mlx4_master_func_num(dev) &&
2919                    mlx4_is_qp_reserved(dev, qpn) &&
2920                    !mlx4_vf_smi_enabled(dev, slave, port)) {
2921                        /* only enabled VFs may create MLX proxy QPs */
2922                        mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2923                                 __func__, slave, port);
2924                        return -EPERM;
2925                }
2926                break;
2927
2928        default:
2929                break;
2930        }
2931
2932        return 0;
2933}
2934
2935int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2936                           struct mlx4_vhcr *vhcr,
2937                           struct mlx4_cmd_mailbox *inbox,
2938                           struct mlx4_cmd_mailbox *outbox,
2939                           struct mlx4_cmd_info *cmd)
2940{
2941        struct mlx4_mtt mtt;
2942        __be64 *page_list = inbox->buf;
2943        u64 *pg_list = (u64 *)page_list;
2944        int i;
2945        struct res_mtt *rmtt = NULL;
2946        int start = be64_to_cpu(page_list[0]);
2947        int npages = vhcr->in_modifier;
2948        int err;
2949
2950        err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2951        if (err)
2952                return err;
2953
2954        /* Call the SW implementation of write_mtt:
2955         * - Prepare a dummy mtt struct
2956         * - Translate inbox contents to simple addresses in host endianess */
2957        mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2958                            we don't really use it */
2959        mtt.order = 0;
2960        mtt.page_shift = 0;
2961        for (i = 0; i < npages; ++i)
2962                pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2963
2964        err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2965                               ((u64 *)page_list + 2));
2966
2967        if (rmtt)
2968                put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2969
2970        return err;
2971}
2972
2973int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2974                          struct mlx4_vhcr *vhcr,
2975                          struct mlx4_cmd_mailbox *inbox,
2976                          struct mlx4_cmd_mailbox *outbox,
2977                          struct mlx4_cmd_info *cmd)
2978{
2979        int eqn = vhcr->in_modifier;
2980        int res_id = eqn | (slave << 8);
2981        struct res_eq *eq;
2982        int err;
2983
2984        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2985        if (err)
2986                return err;
2987
2988        err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2989        if (err)
2990                goto ex_abort;
2991
2992        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2993        if (err)
2994                goto ex_put;
2995
2996        atomic_dec(&eq->mtt->ref_count);
2997        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2998        res_end_move(dev, slave, RES_EQ, res_id);
2999        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3000
3001        return 0;
3002
3003ex_put:
3004        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3005ex_abort:
3006        res_abort_move(dev, slave, RES_EQ, res_id);
3007
3008        return err;
3009}
3010
3011int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3012{
3013        struct mlx4_priv *priv = mlx4_priv(dev);
3014        struct mlx4_slave_event_eq_info *event_eq;
3015        struct mlx4_cmd_mailbox *mailbox;
3016        u32 in_modifier = 0;
3017        int err;
3018        int res_id;
3019        struct res_eq *req;
3020
3021        if (!priv->mfunc.master.slave_state)
3022                return -EINVAL;
3023
3024        event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3025
3026        /* Create the event only if the slave is registered */
3027        if (event_eq->eqn < 0)
3028                return 0;
3029
3030        mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3031        res_id = (slave << 8) | event_eq->eqn;
3032        err = get_res(dev, slave, res_id, RES_EQ, &req);
3033        if (err)
3034                goto unlock;
3035
3036        if (req->com.from_state != RES_EQ_HW) {
3037                err = -EINVAL;
3038                goto put;
3039        }
3040
3041        mailbox = mlx4_alloc_cmd_mailbox(dev);
3042        if (IS_ERR(mailbox)) {
3043                err = PTR_ERR(mailbox);
3044                goto put;
3045        }
3046
3047        if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3048                ++event_eq->token;
3049                eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3050        }
3051
3052        memcpy(mailbox->buf, (u8 *) eqe, 28);
3053
3054        in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3055
3056        err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3057                       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3058                       MLX4_CMD_NATIVE);
3059
3060        put_res(dev, slave, res_id, RES_EQ);
3061        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3062        mlx4_free_cmd_mailbox(dev, mailbox);
3063        return err;
3064
3065put:
3066        put_res(dev, slave, res_id, RES_EQ);
3067
3068unlock:
3069        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3070        return err;
3071}
3072
3073int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3074                          struct mlx4_vhcr *vhcr,
3075                          struct mlx4_cmd_mailbox *inbox,
3076                          struct mlx4_cmd_mailbox *outbox,
3077                          struct mlx4_cmd_info *cmd)
3078{
3079        int eqn = vhcr->in_modifier;
3080        int res_id = eqn | (slave << 8);
3081        struct res_eq *eq;
3082        int err;
3083
3084        err = get_res(dev, slave, res_id, RES_EQ, &eq);
3085        if (err)
3086                return err;
3087
3088        if (eq->com.from_state != RES_EQ_HW) {
3089                err = -EINVAL;
3090                goto ex_put;
3091        }
3092
3093        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3094
3095ex_put:
3096        put_res(dev, slave, res_id, RES_EQ);
3097        return err;
3098}
3099
3100int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3101                          struct mlx4_vhcr *vhcr,
3102                          struct mlx4_cmd_mailbox *inbox,
3103                          struct mlx4_cmd_mailbox *outbox,
3104                          struct mlx4_cmd_info *cmd)
3105{
3106        int err;
3107        int cqn = vhcr->in_modifier;
3108        struct mlx4_cq_context *cqc = inbox->buf;
3109        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3110        struct res_cq *cq;
3111        struct res_mtt *mtt;
3112
3113        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3114        if (err)
3115                return err;
3116        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3117        if (err)
3118                goto out_move;
3119        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3120        if (err)
3121                goto out_put;
3122        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3123        if (err)
3124                goto out_put;
3125        atomic_inc(&mtt->ref_count);
3126        cq->mtt = mtt;
3127        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3128        res_end_move(dev, slave, RES_CQ, cqn);
3129        return 0;
3130
3131out_put:
3132        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3133out_move:
3134        res_abort_move(dev, slave, RES_CQ, cqn);
3135        return err;
3136}
3137
3138int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3139                          struct mlx4_vhcr *vhcr,
3140                          struct mlx4_cmd_mailbox *inbox,
3141                          struct mlx4_cmd_mailbox *outbox,
3142                          struct mlx4_cmd_info *cmd)
3143{
3144        int err;
3145        int cqn = vhcr->in_modifier;
3146        struct res_cq *cq;
3147
3148        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3149        if (err)
3150                return err;
3151        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3152        if (err)
3153                goto out_move;
3154        atomic_dec(&cq->mtt->ref_count);
3155        res_end_move(dev, slave, RES_CQ, cqn);
3156        return 0;
3157
3158out_move:
3159        res_abort_move(dev, slave, RES_CQ, cqn);
3160        return err;
3161}
3162
3163int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3164                          struct mlx4_vhcr *vhcr,
3165                          struct mlx4_cmd_mailbox *inbox,
3166                          struct mlx4_cmd_mailbox *outbox,
3167                          struct mlx4_cmd_info *cmd)
3168{
3169        int cqn = vhcr->in_modifier;
3170        struct res_cq *cq;
3171        int err;
3172
3173        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3174        if (err)
3175                return err;
3176
3177        if (cq->com.from_state != RES_CQ_HW)
3178                goto ex_put;
3179
3180        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3181ex_put:
3182        put_res(dev, slave, cqn, RES_CQ);
3183
3184        return err;
3185}
3186
3187static int handle_resize(struct mlx4_dev *dev, int slave,
3188                         struct mlx4_vhcr *vhcr,
3189                         struct mlx4_cmd_mailbox *inbox,
3190                         struct mlx4_cmd_mailbox *outbox,
3191                         struct mlx4_cmd_info *cmd,
3192                         struct res_cq *cq)
3193{
3194        int err;
3195        struct res_mtt *orig_mtt;
3196        struct res_mtt *mtt;
3197        struct mlx4_cq_context *cqc = inbox->buf;
3198        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3199
3200        err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3201        if (err)
3202                return err;
3203
3204        if (orig_mtt != cq->mtt) {
3205                err = -EINVAL;
3206                goto ex_put;
3207        }
3208
3209        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3210        if (err)
3211                goto ex_put;
3212
3213        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3214        if (err)
3215                goto ex_put1;
3216        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3217        if (err)
3218                goto ex_put1;
3219        atomic_dec(&orig_mtt->ref_count);
3220        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3221        atomic_inc(&mtt->ref_count);
3222        cq->mtt = mtt;
3223        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3224        return 0;
3225
3226ex_put1:
3227        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3228ex_put:
3229        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3230
3231        return err;
3232
3233}
3234
3235int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3236                           struct mlx4_vhcr *vhcr,
3237                           struct mlx4_cmd_mailbox *inbox,
3238                           struct mlx4_cmd_mailbox *outbox,
3239                           struct mlx4_cmd_info *cmd)
3240{
3241        int cqn = vhcr->in_modifier;
3242        struct res_cq *cq;
3243        int err;
3244
3245        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3246        if (err)
3247                return err;
3248
3249        if (cq->com.from_state != RES_CQ_HW)
3250                goto ex_put;
3251
3252        if (vhcr->op_modifier == 0) {
3253                err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3254                goto ex_put;
3255        }
3256
3257        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3258ex_put:
3259        put_res(dev, slave, cqn, RES_CQ);
3260
3261        return err;
3262}
3263
3264static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3265{
3266        int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3267        int log_rq_stride = srqc->logstride & 7;
3268        int page_shift = (srqc->log_page_size & 0x3f) + 12;
3269
3270        if (log_srq_size + log_rq_stride + 4 < page_shift)
3271                return 1;
3272
3273        return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3274}
3275
3276int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3277                           struct mlx4_vhcr *vhcr,
3278                           struct mlx4_cmd_mailbox *inbox,
3279                           struct mlx4_cmd_mailbox *outbox,
3280                           struct mlx4_cmd_info *cmd)
3281{
3282        int err;
3283        int srqn = vhcr->in_modifier;
3284        struct res_mtt *mtt;
3285        struct res_srq *srq;
3286        struct mlx4_srq_context *srqc = inbox->buf;
3287        int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3288
3289        if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3290                return -EINVAL;
3291
3292        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3293        if (err)
3294                return err;
3295        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3296        if (err)
3297                goto ex_abort;
3298        err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3299                              mtt);
3300        if (err)
3301                goto ex_put_mtt;
3302
3303        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3304        if (err)
3305                goto ex_put_mtt;
3306
3307        atomic_inc(&mtt->ref_count);
3308        srq->mtt = mtt;
3309        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3310        res_end_move(dev, slave, RES_SRQ, srqn);
3311        return 0;
3312
3313ex_put_mtt:
3314        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3315ex_abort:
3316        res_abort_move(dev, slave, RES_SRQ, srqn);
3317
3318        return err;
3319}
3320
3321int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3322                           struct mlx4_vhcr *vhcr,
3323                           struct mlx4_cmd_mailbox *inbox,
3324                           struct mlx4_cmd_mailbox *outbox,
3325                           struct mlx4_cmd_info *cmd)
3326{
3327        int err;
3328        int srqn = vhcr->in_modifier;
3329        struct res_srq *srq;
3330
3331        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3332        if (err)
3333                return err;
3334        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3335        if (err)
3336                goto ex_abort;
3337        atomic_dec(&srq->mtt->ref_count);
3338        if (srq->cq)
3339                atomic_dec(&srq->cq->ref_count);
3340        res_end_move(dev, slave, RES_SRQ, srqn);
3341
3342        return 0;
3343
3344ex_abort:
3345        res_abort_move(dev, slave, RES_SRQ, srqn);
3346
3347        return err;
3348}
3349
3350int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3351                           struct mlx4_vhcr *vhcr,
3352                           struct mlx4_cmd_mailbox *inbox,
3353                           struct mlx4_cmd_mailbox *outbox,
3354                           struct mlx4_cmd_info *cmd)
3355{
3356        int err;
3357        int srqn = vhcr->in_modifier;
3358        struct res_srq *srq;
3359
3360        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3361        if (err)
3362                return err;
3363        if (srq->com.from_state != RES_SRQ_HW) {
3364                err = -EBUSY;
3365                goto out;
3366        }
3367        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3368out:
3369        put_res(dev, slave, srqn, RES_SRQ);
3370        return err;
3371}
3372
3373int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3374                         struct mlx4_vhcr *vhcr,
3375                         struct mlx4_cmd_mailbox *inbox,
3376                         struct mlx4_cmd_mailbox *outbox,
3377                         struct mlx4_cmd_info *cmd)
3378{
3379        int err;
3380        int srqn = vhcr->in_modifier;
3381        struct res_srq *srq;
3382
3383        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3384        if (err)
3385                return err;
3386
3387        if (srq->com.from_state != RES_SRQ_HW) {
3388                err = -EBUSY;
3389                goto out;
3390        }
3391
3392        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3393out:
3394        put_res(dev, slave, srqn, RES_SRQ);
3395        return err;
3396}
3397
3398int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3399                        struct mlx4_vhcr *vhcr,
3400                        struct mlx4_cmd_mailbox *inbox,
3401                        struct mlx4_cmd_mailbox *outbox,
3402                        struct mlx4_cmd_info *cmd)
3403{
3404        int err;
3405        int qpn = vhcr->in_modifier & 0x7fffff;
3406        struct res_qp *qp;
3407
3408        err = get_res(dev, slave, qpn, RES_QP, &qp);
3409        if (err)
3410                return err;
3411        if (qp->com.from_state != RES_QP_HW) {
3412                err = -EBUSY;
3413                goto out;
3414        }
3415
3416        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3417out:
3418        put_res(dev, slave, qpn, RES_QP);
3419        return err;
3420}
3421
3422int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3423                              struct mlx4_vhcr *vhcr,
3424                              struct mlx4_cmd_mailbox *inbox,
3425                              struct mlx4_cmd_mailbox *outbox,
3426                              struct mlx4_cmd_info *cmd)
3427{
3428        struct mlx4_qp_context *context = inbox->buf + 8;
3429        adjust_proxy_tun_qkey(dev, vhcr, context);
3430        update_pkey_index(dev, slave, inbox);
3431        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3432}
3433
3434static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3435                                  struct mlx4_qp_context *qpc,
3436                                  struct mlx4_cmd_mailbox *inbox)
3437{
3438        enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3439        u8 pri_sched_queue;
3440        int port = mlx4_slave_convert_port(
3441                   dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3442
3443        if (port < 0)
3444                return -EINVAL;
3445
3446        pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3447                          ((port & 1) << 6);
3448
3449        if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3450            mlx4_is_eth(dev, port + 1)) {
3451                qpc->pri_path.sched_queue = pri_sched_queue;
3452        }
3453
3454        if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3455                port = mlx4_slave_convert_port(
3456                                dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3457                                + 1) - 1;
3458                if (port < 0)
3459                        return -EINVAL;
3460                qpc->alt_path.sched_queue =
3461                        (qpc->alt_path.sched_queue & ~(1 << 6)) |
3462                        (port & 1) << 6;
3463        }
3464        return 0;
3465}
3466
3467static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3468                                struct mlx4_qp_context *qpc,
3469                                struct mlx4_cmd_mailbox *inbox)
3470{
3471        u64 mac;
3472        int port;
3473        u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3474        u8 sched = *(u8 *)(inbox->buf + 64);
3475        u8 smac_ix;
3476
3477        port = (sched >> 6 & 1) + 1;
3478        if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3479                smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3480                if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3481                        return -ENOENT;
3482        }
3483        return 0;
3484}
3485
3486int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3487                             struct mlx4_vhcr *vhcr,
3488                             struct mlx4_cmd_mailbox *inbox,
3489                             struct mlx4_cmd_mailbox *outbox,
3490                             struct mlx4_cmd_info *cmd)
3491{
3492        int err;
3493        struct mlx4_qp_context *qpc = inbox->buf + 8;
3494        int qpn = vhcr->in_modifier & 0x7fffff;
3495        struct res_qp *qp;
3496        u8 orig_sched_queue;
3497        __be32  orig_param3 = qpc->param3;
3498        u8 orig_vlan_control = qpc->pri_path.vlan_control;
3499        u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3500        u8 orig_pri_path_fl = qpc->pri_path.fl;
3501        u8 orig_vlan_index = qpc->pri_path.vlan_index;
3502        u8 orig_feup = qpc->pri_path.feup;
3503
3504        err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3505        if (err)
3506                return err;
3507        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3508        if (err)
3509                return err;
3510
3511        if (roce_verify_mac(dev, slave, qpc, inbox))
3512                return -EINVAL;
3513
3514        update_pkey_index(dev, slave, inbox);
3515        update_gid(dev, inbox, (u8)slave);
3516        adjust_proxy_tun_qkey(dev, vhcr, qpc);
3517        orig_sched_queue = qpc->pri_path.sched_queue;
3518        err = update_vport_qp_param(dev, inbox, slave, qpn);
3519        if (err)
3520                return err;
3521
3522        err = get_res(dev, slave, qpn, RES_QP, &qp);
3523        if (err)
3524                return err;
3525        if (qp->com.from_state != RES_QP_HW) {
3526                err = -EBUSY;
3527                goto out;
3528        }
3529
3530        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3531out:
3532        /* if no error, save sched queue value passed in by VF. This is
3533         * essentially the QOS value provided by the VF. This will be useful
3534         * if we allow dynamic changes from VST back to VGT
3535         */
3536        if (!err) {
3537                qp->sched_queue = orig_sched_queue;
3538                qp->param3      = orig_param3;
3539                qp->vlan_control = orig_vlan_control;
3540                qp->fvl_rx      =  orig_fvl_rx;
3541                qp->pri_path_fl = orig_pri_path_fl;
3542                qp->vlan_index  = orig_vlan_index;
3543                qp->feup        = orig_feup;
3544        }
3545        put_res(dev, slave, qpn, RES_QP);
3546        return err;
3547}
3548
3549int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3550                            struct mlx4_vhcr *vhcr,
3551                            struct mlx4_cmd_mailbox *inbox,
3552                            struct mlx4_cmd_mailbox *outbox,
3553                            struct mlx4_cmd_info *cmd)
3554{
3555        int err;
3556        struct mlx4_qp_context *context = inbox->buf + 8;
3557
3558        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3559        if (err)
3560                return err;
3561        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3562        if (err)
3563                return err;
3564
3565        update_pkey_index(dev, slave, inbox);
3566        update_gid(dev, inbox, (u8)slave);
3567        adjust_proxy_tun_qkey(dev, vhcr, context);
3568        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3569}
3570
3571int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3572                            struct mlx4_vhcr *vhcr,
3573                            struct mlx4_cmd_mailbox *inbox,
3574                            struct mlx4_cmd_mailbox *outbox,
3575                            struct mlx4_cmd_info *cmd)
3576{
3577        int err;
3578        struct mlx4_qp_context *context = inbox->buf + 8;
3579
3580        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3581        if (err)
3582                return err;
3583        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3584        if (err)
3585                return err;
3586
3587        update_pkey_index(dev, slave, inbox);
3588        update_gid(dev, inbox, (u8)slave);
3589        adjust_proxy_tun_qkey(dev, vhcr, context);
3590        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3591}
3592
3593
3594int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3595                              struct mlx4_vhcr *vhcr,
3596                              struct mlx4_cmd_mailbox *inbox,
3597                              struct mlx4_cmd_mailbox *outbox,
3598                              struct mlx4_cmd_info *cmd)
3599{
3600        struct mlx4_qp_context *context = inbox->buf + 8;
3601        int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3602        if (err)
3603                return err;
3604        adjust_proxy_tun_qkey(dev, vhcr, context);
3605        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3606}
3607
3608int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3609                            struct mlx4_vhcr *vhcr,
3610                            struct mlx4_cmd_mailbox *inbox,
3611                            struct mlx4_cmd_mailbox *outbox,
3612                            struct mlx4_cmd_info *cmd)
3613{
3614        int err;
3615        struct mlx4_qp_context *context = inbox->buf + 8;
3616
3617        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3618        if (err)
3619                return err;
3620        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3621        if (err)
3622                return err;
3623
3624        adjust_proxy_tun_qkey(dev, vhcr, context);
3625        update_gid(dev, inbox, (u8)slave);
3626        update_pkey_index(dev, slave, inbox);
3627        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3628}
3629
3630int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3631                            struct mlx4_vhcr *vhcr,
3632                            struct mlx4_cmd_mailbox *inbox,
3633                            struct mlx4_cmd_mailbox *outbox,
3634                            struct mlx4_cmd_info *cmd)
3635{
3636        int err;
3637        struct mlx4_qp_context *context = inbox->buf + 8;
3638
3639        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3640        if (err)
3641                return err;
3642        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3643        if (err)
3644                return err;
3645
3646        adjust_proxy_tun_qkey(dev, vhcr, context);
3647        update_gid(dev, inbox, (u8)slave);
3648        update_pkey_index(dev, slave, inbox);
3649        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3650}
3651
3652int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3653                         struct mlx4_vhcr *vhcr,
3654                         struct mlx4_cmd_mailbox *inbox,
3655                         struct mlx4_cmd_mailbox *outbox,
3656                         struct mlx4_cmd_info *cmd)
3657{
3658        int err;
3659        int qpn = vhcr->in_modifier & 0x7fffff;
3660        struct res_qp *qp;
3661
3662        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3663        if (err)
3664                return err;
3665        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3666        if (err)
3667                goto ex_abort;
3668
3669        atomic_dec(&qp->mtt->ref_count);
3670        atomic_dec(&qp->rcq->ref_count);
3671        atomic_dec(&qp->scq->ref_count);
3672        if (qp->srq)
3673                atomic_dec(&qp->srq->ref_count);
3674        res_end_move(dev, slave, RES_QP, qpn);
3675        return 0;
3676
3677ex_abort:
3678        res_abort_move(dev, slave, RES_QP, qpn);
3679
3680        return err;
3681}
3682
3683static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3684                                struct res_qp *rqp, u8 *gid)
3685{
3686        struct res_gid *res;
3687
3688        list_for_each_entry(res, &rqp->mcg_list, list) {
3689                if (!memcmp(res->gid, gid, 16))
3690                        return res;
3691        }
3692        return NULL;
3693}
3694
3695static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3696                       u8 *gid, enum mlx4_protocol prot,
3697                       enum mlx4_steer_type steer, u64 reg_id)
3698{
3699        struct res_gid *res;
3700        int err;
3701
3702        res = kzalloc(sizeof *res, GFP_KERNEL);
3703        if (!res)
3704                return -ENOMEM;
3705
3706        spin_lock_irq(&rqp->mcg_spl);
3707        if (find_gid(dev, slave, rqp, gid)) {
3708                kfree(res);
3709                err = -EEXIST;
3710        } else {
3711                memcpy(res->gid, gid, 16);
3712                res->prot = prot;
3713                res->steer = steer;
3714                res->reg_id = reg_id;
3715                list_add_tail(&res->list, &rqp->mcg_list);
3716                err = 0;
3717        }
3718        spin_unlock_irq(&rqp->mcg_spl);
3719
3720        return err;
3721}
3722
3723static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3724                       u8 *gid, enum mlx4_protocol prot,
3725                       enum mlx4_steer_type steer, u64 *reg_id)
3726{
3727        struct res_gid *res;
3728        int err;
3729
3730        spin_lock_irq(&rqp->mcg_spl);
3731        res = find_gid(dev, slave, rqp, gid);
3732        if (!res || res->prot != prot || res->steer != steer)
3733                err = -EINVAL;
3734        else {
3735                *reg_id = res->reg_id;
3736                list_del(&res->list);
3737                kfree(res);
3738                err = 0;
3739        }
3740        spin_unlock_irq(&rqp->mcg_spl);
3741
3742        return err;
3743}
3744
3745static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3746                     u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3747                     enum mlx4_steer_type type, u64 *reg_id)
3748{
3749        switch (dev->caps.steering_mode) {
3750        case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3751                int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3752                if (port < 0)
3753                        return port;
3754                return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3755                                                block_loopback, prot,
3756                                                reg_id);
3757        }
3758        case MLX4_STEERING_MODE_B0:
3759                if (prot == MLX4_PROT_ETH) {
3760                        int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3761                        if (port < 0)
3762                                return port;
3763                        gid[5] = port;
3764                }
3765                return mlx4_qp_attach_common(dev, qp, gid,
3766                                            block_loopback, prot, type);
3767        default:
3768                return -EINVAL;
3769        }
3770}
3771
3772static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3773                     u8 gid[16], enum mlx4_protocol prot,
3774                     enum mlx4_steer_type type, u64 reg_id)
3775{
3776        switch (dev->caps.steering_mode) {
3777        case MLX4_STEERING_MODE_DEVICE_MANAGED:
3778                return mlx4_flow_detach(dev, reg_id);
3779        case MLX4_STEERING_MODE_B0:
3780                return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3781        default:
3782                return -EINVAL;
3783        }
3784}
3785
3786static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3787                            u8 *gid, enum mlx4_protocol prot)
3788{
3789        int real_port;
3790
3791        if (prot != MLX4_PROT_ETH)
3792                return 0;
3793
3794        if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3795            dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3796                real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3797                if (real_port < 0)
3798                        return -EINVAL;
3799                gid[5] = real_port;
3800        }
3801
3802        return 0;
3803}
3804
3805int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3806                               struct mlx4_vhcr *vhcr,
3807                               struct mlx4_cmd_mailbox *inbox,
3808                               struct mlx4_cmd_mailbox *outbox,
3809                               struct mlx4_cmd_info *cmd)
3810{
3811        struct mlx4_qp qp; /* dummy for calling attach/detach */
3812        u8 *gid = inbox->buf;
3813        enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3814        int err;
3815        int qpn;
3816        struct res_qp *rqp;
3817        u64 reg_id = 0;
3818        int attach = vhcr->op_modifier;
3819        int block_loopback = vhcr->in_modifier >> 31;
3820        u8 steer_type_mask = 2;
3821        enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3822
3823        qpn = vhcr->in_modifier & 0xffffff;
3824        err = get_res(dev, slave, qpn, RES_QP, &rqp);
3825        if (err)
3826                return err;
3827
3828        qp.qpn = qpn;
3829        if (attach) {
3830                err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3831                                type, &reg_id);
3832                if (err) {
3833                        pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3834                        goto ex_put;
3835                }
3836                err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3837                if (err)
3838                        goto ex_detach;
3839        } else {
3840                err = mlx4_adjust_port(dev, slave, gid, prot);
3841                if (err)
3842                        goto ex_put;
3843
3844                err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3845                if (err)
3846                        goto ex_put;
3847
3848                err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3849                if (err)
3850                        pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3851                               qpn, reg_id);
3852        }
3853        put_res(dev, slave, qpn, RES_QP);
3854        return err;
3855
3856ex_detach:
3857        qp_detach(dev, &qp, gid, prot, type, reg_id);
3858ex_put:
3859        put_res(dev, slave, qpn, RES_QP);
3860        return err;
3861}
3862
3863/*
3864 * MAC validation for Flow Steering rules.
3865 * VF can attach rules only with a mac address which is assigned to it.
3866 */
3867static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3868                                   struct list_head *rlist)
3869{
3870        struct mac_res *res, *tmp;
3871        __be64 be_mac;
3872
3873        /* make sure it isn't multicast or broadcast mac*/
3874        if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3875            !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3876                list_for_each_entry_safe(res, tmp, rlist, list) {
3877                        be_mac = cpu_to_be64(res->mac << 16);
3878                        if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3879                                return 0;
3880                }
3881                pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3882                       eth_header->eth.dst_mac, slave);
3883                return -EINVAL;
3884        }
3885        return 0;
3886}
3887
3888/*
3889 * In case of missing eth header, append eth header with a MAC address
3890 * assigned to the VF.
3891 */
3892static int add_eth_header(struct mlx4_dev *dev, int slave,
3893                          struct mlx4_cmd_mailbox *inbox,
3894                          struct list_head *rlist, int header_id)
3895{
3896        struct mac_res *res, *tmp;
3897        u8 port;
3898        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3899        struct mlx4_net_trans_rule_hw_eth *eth_header;
3900        struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3901        struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3902        __be64 be_mac = 0;
3903        __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3904
3905        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3906        port = ctrl->port;
3907        eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3908
3909        /* Clear a space in the inbox for eth header */
3910        switch (header_id) {
3911        case MLX4_NET_TRANS_RULE_ID_IPV4:
3912                ip_header =
3913                        (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3914                memmove(ip_header, eth_header,
3915                        sizeof(*ip_header) + sizeof(*l4_header));
3916                break;
3917        case MLX4_NET_TRANS_RULE_ID_TCP:
3918        case MLX4_NET_TRANS_RULE_ID_UDP:
3919                l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3920                            (eth_header + 1);
3921                memmove(l4_header, eth_header, sizeof(*l4_header));
3922                break;
3923        default:
3924                return -EINVAL;
3925        }
3926        list_for_each_entry_safe(res, tmp, rlist, list) {
3927                if (port == res->port) {
3928                        be_mac = cpu_to_be64(res->mac << 16);
3929                        break;
3930                }
3931        }
3932        if (!be_mac) {
3933                pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
3934                       port);
3935                return -EINVAL;
3936        }
3937
3938        memset(eth_header, 0, sizeof(*eth_header));
3939        eth_header->size = sizeof(*eth_header) >> 2;
3940        eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3941        memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3942        memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3943
3944        return 0;
3945
3946}
3947
3948#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
3949int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3950                           struct mlx4_vhcr *vhcr,
3951                           struct mlx4_cmd_mailbox *inbox,
3952                           struct mlx4_cmd_mailbox *outbox,
3953                           struct mlx4_cmd_info *cmd_info)
3954{
3955        int err;
3956        u32 qpn = vhcr->in_modifier & 0xffffff;
3957        struct res_qp *rqp;
3958        u64 mac;
3959        unsigned port;
3960        u64 pri_addr_path_mask;
3961        struct mlx4_update_qp_context *cmd;
3962        int smac_index;
3963
3964        cmd = (struct mlx4_update_qp_context *)inbox->buf;
3965
3966        pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
3967        if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
3968            (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
3969                return -EPERM;
3970
3971        /* Just change the smac for the QP */
3972        err = get_res(dev, slave, qpn, RES_QP, &rqp);
3973        if (err) {
3974                mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
3975                return err;
3976        }
3977
3978        port = (rqp->sched_queue >> 6 & 1) + 1;
3979        smac_index = cmd->qp_context.pri_path.grh_mylmc;
3980        err = mac_find_smac_ix_in_slave(dev, slave, port,
3981                                        smac_index, &mac);
3982        if (err) {
3983                mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
3984                         qpn, smac_index);
3985                goto err_mac;
3986        }
3987
3988        err = mlx4_cmd(dev, inbox->dma,
3989                       vhcr->in_modifier, 0,
3990                       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
3991                       MLX4_CMD_NATIVE);
3992        if (err) {
3993                mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
3994                goto err_mac;
3995        }
3996
3997err_mac:
3998        put_res(dev, slave, qpn, RES_QP);
3999        return err;
4000}
4001
4002int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4003                                         struct mlx4_vhcr *vhcr,
4004                                         struct mlx4_cmd_mailbox *inbox,
4005                                         struct mlx4_cmd_mailbox *outbox,
4006                                         struct mlx4_cmd_info *cmd)
4007{
4008
4009        struct mlx4_priv *priv = mlx4_priv(dev);
4010        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4011        struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4012        int err;
4013        int qpn;
4014        struct res_qp *rqp;
4015        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4016        struct _rule_hw  *rule_header;
4017        int header_id;
4018
4019        if (dev->caps.steering_mode !=
4020            MLX4_STEERING_MODE_DEVICE_MANAGED)
4021                return -EOPNOTSUPP;
4022
4023        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4024        ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4025        if (ctrl->port <= 0)
4026                return -EINVAL;
4027        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4028        err = get_res(dev, slave, qpn, RES_QP, &rqp);
4029        if (err) {
4030                pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4031                return err;
4032        }
4033        rule_header = (struct _rule_hw *)(ctrl + 1);
4034        header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4035
4036        switch (header_id) {
4037        case MLX4_NET_TRANS_RULE_ID_ETH:
4038                if (validate_eth_header_mac(slave, rule_header, rlist)) {
4039                        err = -EINVAL;
4040                        goto err_put;
4041                }
4042                break;
4043        case MLX4_NET_TRANS_RULE_ID_IB:
4044                break;
4045        case MLX4_NET_TRANS_RULE_ID_IPV4:
4046        case MLX4_NET_TRANS_RULE_ID_TCP:
4047        case MLX4_NET_TRANS_RULE_ID_UDP:
4048                pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4049                if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4050                        err = -EINVAL;
4051                        goto err_put;
4052                }
4053                vhcr->in_modifier +=
4054                        sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4055                break;
4056        default:
4057                pr_err("Corrupted mailbox\n");
4058                err = -EINVAL;
4059                goto err_put;
4060        }
4061
4062        err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4063                           vhcr->in_modifier, 0,
4064                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4065                           MLX4_CMD_NATIVE);
4066        if (err)
4067                goto err_put;
4068
4069        err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4070        if (err) {
4071                mlx4_err(dev, "Fail to add flow steering resources\n");
4072                /* detach rule*/
4073                mlx4_cmd(dev, vhcr->out_param, 0, 0,
4074                         MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4075                         MLX4_CMD_NATIVE);
4076                goto err_put;
4077        }
4078        atomic_inc(&rqp->ref_count);
4079err_put:
4080        put_res(dev, slave, qpn, RES_QP);
4081        return err;
4082}
4083
4084int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4085                                         struct mlx4_vhcr *vhcr,
4086                                         struct mlx4_cmd_mailbox *inbox,
4087                                         struct mlx4_cmd_mailbox *outbox,
4088                                         struct mlx4_cmd_info *cmd)
4089{
4090        int err;
4091        struct res_qp *rqp;
4092        struct res_fs_rule *rrule;
4093
4094        if (dev->caps.steering_mode !=
4095            MLX4_STEERING_MODE_DEVICE_MANAGED)
4096                return -EOPNOTSUPP;
4097
4098        err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4099        if (err)
4100                return err;
4101        /* Release the rule form busy state before removal */
4102        put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4103        err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4104        if (err)
4105                return err;
4106
4107        err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4108        if (err) {
4109                mlx4_err(dev, "Fail to remove flow steering resources\n");
4110                goto out;
4111        }
4112
4113        err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4114                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4115                       MLX4_CMD_NATIVE);
4116        if (!err)
4117                atomic_dec(&rqp->ref_count);
4118out:
4119        put_res(dev, slave, rrule->qpn, RES_QP);
4120        return err;
4121}
4122
4123enum {
4124        BUSY_MAX_RETRIES = 10
4125};
4126
4127int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4128                               struct mlx4_vhcr *vhcr,
4129                               struct mlx4_cmd_mailbox *inbox,
4130                               struct mlx4_cmd_mailbox *outbox,
4131                               struct mlx4_cmd_info *cmd)
4132{
4133        int err;
4134        int index = vhcr->in_modifier & 0xffff;
4135
4136        err = get_res(dev, slave, index, RES_COUNTER, NULL);
4137        if (err)
4138                return err;
4139
4140        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4141        put_res(dev, slave, index, RES_COUNTER);
4142        return err;
4143}
4144
4145static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4146{
4147        struct res_gid *rgid;
4148        struct res_gid *tmp;
4149        struct mlx4_qp qp; /* dummy for calling attach/detach */
4150
4151        list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4152                switch (dev->caps.steering_mode) {
4153                case MLX4_STEERING_MODE_DEVICE_MANAGED:
4154                        mlx4_flow_detach(dev, rgid->reg_id);
4155                        break;
4156                case MLX4_STEERING_MODE_B0:
4157                        qp.qpn = rqp->local_qpn;
4158                        (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4159                                                     rgid->prot, rgid->steer);
4160                        break;
4161                }
4162                list_del(&rgid->list);
4163                kfree(rgid);
4164        }
4165}
4166
4167static int _move_all_busy(struct mlx4_dev *dev, int slave,
4168                          enum mlx4_resource type, int print)
4169{
4170        struct mlx4_priv *priv = mlx4_priv(dev);
4171        struct mlx4_resource_tracker *tracker =
4172                &priv->mfunc.master.res_tracker;
4173        struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4174        struct res_common *r;
4175        struct res_common *tmp;
4176        int busy;
4177
4178        busy = 0;
4179        spin_lock_irq(mlx4_tlock(dev));
4180        list_for_each_entry_safe(r, tmp, rlist, list) {
4181                if (r->owner == slave) {
4182                        if (!r->removing) {
4183                                if (r->state == RES_ANY_BUSY) {
4184                                        if (print)
4185                                                mlx4_dbg(dev,
4186                                                         "%s id 0x%llx is busy\n",
4187                                                          resource_str(type),
4188                                                          r->res_id);
4189                                        ++busy;
4190                                } else {
4191                                        r->from_state = r->state;
4192                                        r->state = RES_ANY_BUSY;
4193                                        r->removing = 1;
4194                                }
4195                        }
4196                }
4197        }
4198        spin_unlock_irq(mlx4_tlock(dev));
4199
4200        return busy;
4201}
4202
4203static int move_all_busy(struct mlx4_dev *dev, int slave,
4204                         enum mlx4_resource type)
4205{
4206        unsigned long begin;
4207        int busy;
4208
4209        begin = jiffies;
4210        do {
4211                busy = _move_all_busy(dev, slave, type, 0);
4212                if (time_after(jiffies, begin + 5 * HZ))
4213                        break;
4214                if (busy)
4215                        cond_resched();
4216        } while (busy);
4217
4218        if (busy)
4219                busy = _move_all_busy(dev, slave, type, 1);
4220
4221        return busy;
4222}
4223static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4224{
4225        struct mlx4_priv *priv = mlx4_priv(dev);
4226        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4227        struct list_head *qp_list =
4228                &tracker->slave_list[slave].res_list[RES_QP];
4229        struct res_qp *qp;
4230        struct res_qp *tmp;
4231        int state;
4232        u64 in_param;
4233        int qpn;
4234        int err;
4235
4236        err = move_all_busy(dev, slave, RES_QP);
4237        if (err)
4238                mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4239                          slave);
4240
4241        spin_lock_irq(mlx4_tlock(dev));
4242        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4243                spin_unlock_irq(mlx4_tlock(dev));
4244                if (qp->com.owner == slave) {
4245                        qpn = qp->com.res_id;
4246                        detach_qp(dev, slave, qp);
4247                        state = qp->com.from_state;
4248                        while (state != 0) {
4249                                switch (state) {
4250                                case RES_QP_RESERVED:
4251                                        spin_lock_irq(mlx4_tlock(dev));
4252                                        rb_erase(&qp->com.node,
4253                                                 &tracker->res_tree[RES_QP]);
4254                                        list_del(&qp->com.list);
4255                                        spin_unlock_irq(mlx4_tlock(dev));
4256                                        if (!valid_reserved(dev, slave, qpn)) {
4257                                                __mlx4_qp_release_range(dev, qpn, 1);
4258                                                mlx4_release_resource(dev, slave,
4259                                                                      RES_QP, 1, 0);
4260                                        }
4261                                        kfree(qp);
4262                                        state = 0;
4263                                        break;
4264                                case RES_QP_MAPPED:
4265                                        if (!valid_reserved(dev, slave, qpn))
4266                                                __mlx4_qp_free_icm(dev, qpn);
4267                                        state = RES_QP_RESERVED;
4268                                        break;
4269                                case RES_QP_HW:
4270                                        in_param = slave;
4271                                        err = mlx4_cmd(dev, in_param,
4272                                                       qp->local_qpn, 2,
4273                                                       MLX4_CMD_2RST_QP,
4274                                                       MLX4_CMD_TIME_CLASS_A,
4275                                                       MLX4_CMD_NATIVE);
4276                                        if (err)
4277                                                mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4278                                                         slave, qp->local_qpn);
4279                                        atomic_dec(&qp->rcq->ref_count);
4280                                        atomic_dec(&qp->scq->ref_count);
4281                                        atomic_dec(&qp->mtt->ref_count);
4282                                        if (qp->srq)
4283                                                atomic_dec(&qp->srq->ref_count);
4284                                        state = RES_QP_MAPPED;
4285                                        break;
4286                                default:
4287                                        state = 0;
4288                                }
4289                        }
4290                }
4291                spin_lock_irq(mlx4_tlock(dev));
4292        }
4293        spin_unlock_irq(mlx4_tlock(dev));
4294}
4295
4296static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4297{
4298        struct mlx4_priv *priv = mlx4_priv(dev);
4299        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4300        struct list_head *srq_list =
4301                &tracker->slave_list[slave].res_list[RES_SRQ];
4302        struct res_srq *srq;
4303        struct res_srq *tmp;
4304        int state;
4305        u64 in_param;
4306        LIST_HEAD(tlist);
4307        int srqn;
4308        int err;
4309
4310        err = move_all_busy(dev, slave, RES_SRQ);
4311        if (err)
4312                mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4313                          slave);
4314
4315        spin_lock_irq(mlx4_tlock(dev));
4316        list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4317                spin_unlock_irq(mlx4_tlock(dev));
4318                if (srq->com.owner == slave) {
4319                        srqn = srq->com.res_id;
4320                        state = srq->com.from_state;
4321                        while (state != 0) {
4322                                switch (state) {
4323                                case RES_SRQ_ALLOCATED:
4324                                        __mlx4_srq_free_icm(dev, srqn);
4325                                        spin_lock_irq(mlx4_tlock(dev));
4326                                        rb_erase(&srq->com.node,
4327                                                 &tracker->res_tree[RES_SRQ]);
4328                                        list_del(&srq->com.list);
4329                                        spin_unlock_irq(mlx4_tlock(dev));
4330                                        mlx4_release_resource(dev, slave,
4331                                                              RES_SRQ, 1, 0);
4332                                        kfree(srq);
4333                                        state = 0;
4334                                        break;
4335
4336                                case RES_SRQ_HW:
4337                                        in_param = slave;
4338                                        err = mlx4_cmd(dev, in_param, srqn, 1,
4339                                                       MLX4_CMD_HW2SW_SRQ,
4340                                                       MLX4_CMD_TIME_CLASS_A,
4341                                                       MLX4_CMD_NATIVE);
4342                                        if (err)
4343                                                mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4344                                                         slave, srqn);
4345
4346                                        atomic_dec(&srq->mtt->ref_count);
4347                                        if (srq->cq)
4348                                                atomic_dec(&srq->cq->ref_count);
4349                                        state = RES_SRQ_ALLOCATED;
4350                                        break;
4351
4352                                default:
4353                                        state = 0;
4354                                }
4355                        }
4356                }
4357                spin_lock_irq(mlx4_tlock(dev));
4358        }
4359        spin_unlock_irq(mlx4_tlock(dev));
4360}
4361
4362static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4363{
4364        struct mlx4_priv *priv = mlx4_priv(dev);
4365        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4366        struct list_head *cq_list =
4367                &tracker->slave_list[slave].res_list[RES_CQ];
4368        struct res_cq *cq;
4369        struct res_cq *tmp;
4370        int state;
4371        u64 in_param;
4372        LIST_HEAD(tlist);
4373        int cqn;
4374        int err;
4375
4376        err = move_all_busy(dev, slave, RES_CQ);
4377        if (err)
4378                mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4379                          slave);
4380
4381        spin_lock_irq(mlx4_tlock(dev));
4382        list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4383                spin_unlock_irq(mlx4_tlock(dev));
4384                if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4385                        cqn = cq->com.res_id;
4386                        state = cq->com.from_state;
4387                        while (state != 0) {
4388                                switch (state) {
4389                                case RES_CQ_ALLOCATED:
4390                                        __mlx4_cq_free_icm(dev, cqn);
4391                                        spin_lock_irq(mlx4_tlock(dev));
4392                                        rb_erase(&cq->com.node,
4393                                                 &tracker->res_tree[RES_CQ]);
4394                                        list_del(&cq->com.list);
4395                                        spin_unlock_irq(mlx4_tlock(dev));
4396                                        mlx4_release_resource(dev, slave,
4397                                                              RES_CQ, 1, 0);
4398                                        kfree(cq);
4399                                        state = 0;
4400                                        break;
4401
4402                                case RES_CQ_HW:
4403                                        in_param = slave;
4404                                        err = mlx4_cmd(dev, in_param, cqn, 1,
4405                                                       MLX4_CMD_HW2SW_CQ,
4406                                                       MLX4_CMD_TIME_CLASS_A,
4407                                                       MLX4_CMD_NATIVE);
4408                                        if (err)
4409                                                mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4410                                                         slave, cqn);
4411                                        atomic_dec(&cq->mtt->ref_count);
4412                                        state = RES_CQ_ALLOCATED;
4413                                        break;
4414
4415                                default:
4416                                        state = 0;
4417                                }
4418                        }
4419                }
4420                spin_lock_irq(mlx4_tlock(dev));
4421        }
4422        spin_unlock_irq(mlx4_tlock(dev));
4423}
4424
4425static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4426{
4427        struct mlx4_priv *priv = mlx4_priv(dev);
4428        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4429        struct list_head *mpt_list =
4430                &tracker->slave_list[slave].res_list[RES_MPT];
4431        struct res_mpt *mpt;
4432        struct res_mpt *tmp;
4433        int state;
4434        u64 in_param;
4435        LIST_HEAD(tlist);
4436        int mptn;
4437        int err;
4438
4439        err = move_all_busy(dev, slave, RES_MPT);
4440        if (err)
4441                mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4442                          slave);
4443
4444        spin_lock_irq(mlx4_tlock(dev));
4445        list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4446                spin_unlock_irq(mlx4_tlock(dev));
4447                if (mpt->com.owner == slave) {
4448                        mptn = mpt->com.res_id;
4449                        state = mpt->com.from_state;
4450                        while (state != 0) {
4451                                switch (state) {
4452                                case RES_MPT_RESERVED:
4453                                        __mlx4_mpt_release(dev, mpt->key);
4454                                        spin_lock_irq(mlx4_tlock(dev));
4455                                        rb_erase(&mpt->com.node,
4456                                                 &tracker->res_tree[RES_MPT]);
4457                                        list_del(&mpt->com.list);
4458                                        spin_unlock_irq(mlx4_tlock(dev));
4459                                        mlx4_release_resource(dev, slave,
4460                                                              RES_MPT, 1, 0);
4461                                        kfree(mpt);
4462                                        state = 0;
4463                                        break;
4464
4465                                case RES_MPT_MAPPED:
4466                                        __mlx4_mpt_free_icm(dev, mpt->key);
4467                                        state = RES_MPT_RESERVED;
4468                                        break;
4469
4470                                case RES_MPT_HW:
4471                                        in_param = slave;
4472                                        err = mlx4_cmd(dev, in_param, mptn, 0,
4473                                                     MLX4_CMD_HW2SW_MPT,
4474                                                     MLX4_CMD_TIME_CLASS_A,
4475                                                     MLX4_CMD_NATIVE);
4476                                        if (err)
4477                                                mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4478                                                         slave, mptn);
4479                                        if (mpt->mtt)
4480                                                atomic_dec(&mpt->mtt->ref_count);
4481                                        state = RES_MPT_MAPPED;
4482                                        break;
4483                                default:
4484                                        state = 0;
4485                                }
4486                        }
4487                }
4488                spin_lock_irq(mlx4_tlock(dev));
4489        }
4490        spin_unlock_irq(mlx4_tlock(dev));
4491}
4492
4493static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4494{
4495        struct mlx4_priv *priv = mlx4_priv(dev);
4496        struct mlx4_resource_tracker *tracker =
4497                &priv->mfunc.master.res_tracker;
4498        struct list_head *mtt_list =
4499                &tracker->slave_list[slave].res_list[RES_MTT];
4500        struct res_mtt *mtt;
4501        struct res_mtt *tmp;
4502        int state;
4503        LIST_HEAD(tlist);
4504        int base;
4505        int err;
4506
4507        err = move_all_busy(dev, slave, RES_MTT);
4508        if (err)
4509                mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4510                          slave);
4511
4512        spin_lock_irq(mlx4_tlock(dev));
4513        list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4514                spin_unlock_irq(mlx4_tlock(dev));
4515                if (mtt->com.owner == slave) {
4516                        base = mtt->com.res_id;
4517                        state = mtt->com.from_state;
4518                        while (state != 0) {
4519                                switch (state) {
4520                                case RES_MTT_ALLOCATED:
4521                                        __mlx4_free_mtt_range(dev, base,
4522                                                              mtt->order);
4523                                        spin_lock_irq(mlx4_tlock(dev));
4524                                        rb_erase(&mtt->com.node,
4525                                                 &tracker->res_tree[RES_MTT]);
4526                                        list_del(&mtt->com.list);
4527                                        spin_unlock_irq(mlx4_tlock(dev));
4528                                        mlx4_release_resource(dev, slave, RES_MTT,
4529                                                              1 << mtt->order, 0);
4530                                        kfree(mtt);
4531                                        state = 0;
4532                                        break;
4533
4534                                default:
4535                                        state = 0;
4536                                }
4537                        }
4538                }
4539                spin_lock_irq(mlx4_tlock(dev));
4540        }
4541        spin_unlock_irq(mlx4_tlock(dev));
4542}
4543
4544static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4545{
4546        struct mlx4_priv *priv = mlx4_priv(dev);
4547        struct mlx4_resource_tracker *tracker =
4548                &priv->mfunc.master.res_tracker;
4549        struct list_head *fs_rule_list =
4550                &tracker->slave_list[slave].res_list[RES_FS_RULE];
4551        struct res_fs_rule *fs_rule;
4552        struct res_fs_rule *tmp;
4553        int state;
4554        u64 base;
4555        int err;
4556
4557        err = move_all_busy(dev, slave, RES_FS_RULE);
4558        if (err)
4559                mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4560                          slave);
4561
4562        spin_lock_irq(mlx4_tlock(dev));
4563        list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4564                spin_unlock_irq(mlx4_tlock(dev));
4565                if (fs_rule->com.owner == slave) {
4566                        base = fs_rule->com.res_id;
4567                        state = fs_rule->com.from_state;
4568                        while (state != 0) {
4569                                switch (state) {
4570                                case RES_FS_RULE_ALLOCATED:
4571                                        /* detach rule */
4572                                        err = mlx4_cmd(dev, base, 0, 0,
4573                                                       MLX4_QP_FLOW_STEERING_DETACH,
4574                                                       MLX4_CMD_TIME_CLASS_A,
4575                                                       MLX4_CMD_NATIVE);
4576
4577                                        spin_lock_irq(mlx4_tlock(dev));
4578                                        rb_erase(&fs_rule->com.node,
4579                                                 &tracker->res_tree[RES_FS_RULE]);
4580                                        list_del(&fs_rule->com.list);
4581                                        spin_unlock_irq(mlx4_tlock(dev));
4582                                        kfree(fs_rule);
4583                                        state = 0;
4584                                        break;
4585
4586                                default:
4587                                        state = 0;
4588                                }
4589                        }
4590                }
4591                spin_lock_irq(mlx4_tlock(dev));
4592        }
4593        spin_unlock_irq(mlx4_tlock(dev));
4594}
4595
4596static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4597{
4598        struct mlx4_priv *priv = mlx4_priv(dev);
4599        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4600        struct list_head *eq_list =
4601                &tracker->slave_list[slave].res_list[RES_EQ];
4602        struct res_eq *eq;
4603        struct res_eq *tmp;
4604        int err;
4605        int state;
4606        LIST_HEAD(tlist);
4607        int eqn;
4608        struct mlx4_cmd_mailbox *mailbox;
4609
4610        err = move_all_busy(dev, slave, RES_EQ);
4611        if (err)
4612                mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4613                          slave);
4614
4615        spin_lock_irq(mlx4_tlock(dev));
4616        list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4617                spin_unlock_irq(mlx4_tlock(dev));
4618                if (eq->com.owner == slave) {
4619                        eqn = eq->com.res_id;
4620                        state = eq->com.from_state;
4621                        while (state != 0) {
4622                                switch (state) {
4623                                case RES_EQ_RESERVED:
4624                                        spin_lock_irq(mlx4_tlock(dev));
4625                                        rb_erase(&eq->com.node,
4626                                                 &tracker->res_tree[RES_EQ]);
4627                                        list_del(&eq->com.list);
4628                                        spin_unlock_irq(mlx4_tlock(dev));
4629                                        kfree(eq);
4630                                        state = 0;
4631                                        break;
4632
4633                                case RES_EQ_HW:
4634                                        mailbox = mlx4_alloc_cmd_mailbox(dev);
4635                                        if (IS_ERR(mailbox)) {
4636                                                cond_resched();
4637                                                continue;
4638                                        }
4639                                        err = mlx4_cmd_box(dev, slave, 0,
4640                                                           eqn & 0xff, 0,
4641                                                           MLX4_CMD_HW2SW_EQ,
4642                                                           MLX4_CMD_TIME_CLASS_A,
4643                                                           MLX4_CMD_NATIVE);
4644                                        if (err)
4645                                                mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4646                                                         slave, eqn);
4647                                        mlx4_free_cmd_mailbox(dev, mailbox);
4648                                        atomic_dec(&eq->mtt->ref_count);
4649                                        state = RES_EQ_RESERVED;
4650                                        break;
4651
4652                                default:
4653                                        state = 0;
4654                                }
4655                        }
4656                }
4657                spin_lock_irq(mlx4_tlock(dev));
4658        }
4659        spin_unlock_irq(mlx4_tlock(dev));
4660}
4661
4662static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4663{
4664        struct mlx4_priv *priv = mlx4_priv(dev);
4665        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4666        struct list_head *counter_list =
4667                &tracker->slave_list[slave].res_list[RES_COUNTER];
4668        struct res_counter *counter;
4669        struct res_counter *tmp;
4670        int err;
4671        int index;
4672
4673        err = move_all_busy(dev, slave, RES_COUNTER);
4674        if (err)
4675                mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4676                          slave);
4677
4678        spin_lock_irq(mlx4_tlock(dev));
4679        list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4680                if (counter->com.owner == slave) {
4681                        index = counter->com.res_id;
4682                        rb_erase(&counter->com.node,
4683                                 &tracker->res_tree[RES_COUNTER]);
4684                        list_del(&counter->com.list);
4685                        kfree(counter);
4686                        __mlx4_counter_free(dev, index);
4687                        mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4688                }
4689        }
4690        spin_unlock_irq(mlx4_tlock(dev));
4691}
4692
4693static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4694{
4695        struct mlx4_priv *priv = mlx4_priv(dev);
4696        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4697        struct list_head *xrcdn_list =
4698                &tracker->slave_list[slave].res_list[RES_XRCD];
4699        struct res_xrcdn *xrcd;
4700        struct res_xrcdn *tmp;
4701        int err;
4702        int xrcdn;
4703
4704        err = move_all_busy(dev, slave, RES_XRCD);
4705        if (err)
4706                mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4707                          slave);
4708
4709        spin_lock_irq(mlx4_tlock(dev));
4710        list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4711                if (xrcd->com.owner == slave) {
4712                        xrcdn = xrcd->com.res_id;
4713                        rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4714                        list_del(&xrcd->com.list);
4715                        kfree(xrcd);
4716                        __mlx4_xrcd_free(dev, xrcdn);
4717                }
4718        }
4719        spin_unlock_irq(mlx4_tlock(dev));
4720}
4721
4722void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4723{
4724        struct mlx4_priv *priv = mlx4_priv(dev);
4725        mlx4_reset_roce_gids(dev, slave);
4726        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4727        rem_slave_vlans(dev, slave);
4728        rem_slave_macs(dev, slave);
4729        rem_slave_fs_rule(dev, slave);
4730        rem_slave_qps(dev, slave);
4731        rem_slave_srqs(dev, slave);
4732        rem_slave_cqs(dev, slave);
4733        rem_slave_mrs(dev, slave);
4734        rem_slave_eqs(dev, slave);
4735        rem_slave_mtts(dev, slave);
4736        rem_slave_counters(dev, slave);
4737        rem_slave_xrcdns(dev, slave);
4738        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4739}
4740
4741void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4742{
4743        struct mlx4_vf_immed_vlan_work *work =
4744                container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4745        struct mlx4_cmd_mailbox *mailbox;
4746        struct mlx4_update_qp_context *upd_context;
4747        struct mlx4_dev *dev = &work->priv->dev;
4748        struct mlx4_resource_tracker *tracker =
4749                &work->priv->mfunc.master.res_tracker;
4750        struct list_head *qp_list =
4751                &tracker->slave_list[work->slave].res_list[RES_QP];
4752        struct res_qp *qp;
4753        struct res_qp *tmp;
4754        u64 qp_path_mask_vlan_ctrl =
4755                       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4756                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4757                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4758                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4759                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4760                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4761
4762        u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4763                       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4764                       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4765                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4766                       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4767                       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4768                       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4769
4770        int err;
4771        int port, errors = 0;
4772        u8 vlan_control;
4773
4774        if (mlx4_is_slave(dev)) {
4775                mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4776                          work->slave);
4777                goto out;
4778        }
4779
4780        mailbox = mlx4_alloc_cmd_mailbox(dev);
4781        if (IS_ERR(mailbox))
4782                goto out;
4783        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4784                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4785                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4786                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4787                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4788                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4789                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4790        else if (!work->vlan_id)
4791                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4792                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4793        else
4794                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4795                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4796                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4797
4798        upd_context = mailbox->buf;
4799        upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
4800
4801        spin_lock_irq(mlx4_tlock(dev));
4802        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4803                spin_unlock_irq(mlx4_tlock(dev));
4804                if (qp->com.owner == work->slave) {
4805                        if (qp->com.from_state != RES_QP_HW ||
4806                            !qp->sched_queue ||  /* no INIT2RTR trans yet */
4807                            mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4808                            qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4809                                spin_lock_irq(mlx4_tlock(dev));
4810                                continue;
4811                        }
4812                        port = (qp->sched_queue >> 6 & 1) + 1;
4813                        if (port != work->port) {
4814                                spin_lock_irq(mlx4_tlock(dev));
4815                                continue;
4816                        }
4817                        if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4818                                upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4819                        else
4820                                upd_context->primary_addr_path_mask =
4821                                        cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4822                        if (work->vlan_id == MLX4_VGT) {
4823                                upd_context->qp_context.param3 = qp->param3;
4824                                upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4825                                upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4826                                upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4827                                upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4828                                upd_context->qp_context.pri_path.feup = qp->feup;
4829                                upd_context->qp_context.pri_path.sched_queue =
4830                                        qp->sched_queue;
4831                        } else {
4832                                upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4833                                upd_context->qp_context.pri_path.vlan_control = vlan_control;
4834                                upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4835                                upd_context->qp_context.pri_path.fvl_rx =
4836                                        qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4837                                upd_context->qp_context.pri_path.fl =
4838                                        qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4839                                upd_context->qp_context.pri_path.feup =
4840                                        qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4841                                upd_context->qp_context.pri_path.sched_queue =
4842                                        qp->sched_queue & 0xC7;
4843                                upd_context->qp_context.pri_path.sched_queue |=
4844                                        ((work->qos & 0x7) << 3);
4845                        }
4846
4847                        err = mlx4_cmd(dev, mailbox->dma,
4848                                       qp->local_qpn & 0xffffff,
4849                                       0, MLX4_CMD_UPDATE_QP,
4850                                       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4851                        if (err) {
4852                                mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4853                                          work->slave, port, qp->local_qpn, err);
4854                                errors++;
4855                        }
4856                }
4857                spin_lock_irq(mlx4_tlock(dev));
4858        }
4859        spin_unlock_irq(mlx4_tlock(dev));
4860        mlx4_free_cmd_mailbox(dev, mailbox);
4861
4862        if (errors)
4863                mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4864                         errors, work->slave, work->port);
4865
4866        /* unregister previous vlan_id if needed and we had no errors
4867         * while updating the QPs
4868         */
4869        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4870            NO_INDX != work->orig_vlan_ix)
4871                __mlx4_unregister_vlan(&work->priv->dev, work->port,
4872                                       work->orig_vlan_id);
4873out:
4874        kfree(work);
4875        return;
4876}
4877