linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
   4 * All rights reserved.
   5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/sched.h>
  37#include <linux/pci.h>
  38#include <linux/errno.h>
  39#include <linux/kernel.h>
  40#include <linux/io.h>
  41#include <linux/slab.h>
  42#include <linux/mlx4/cmd.h>
  43#include <linux/mlx4/qp.h>
  44#include <linux/if_ether.h>
  45#include <linux/etherdevice.h>
  46
  47#include "mlx4.h"
  48#include "fw.h"
  49#include "mlx4_stats.h"
  50
  51#define MLX4_MAC_VALID          (1ull << 63)
  52#define MLX4_PF_COUNTERS_PER_PORT       2
  53#define MLX4_VF_COUNTERS_PER_PORT       1
  54
  55struct mac_res {
  56        struct list_head list;
  57        u64 mac;
  58        int ref_count;
  59        u8 smac_index;
  60        u8 port;
  61};
  62
  63struct vlan_res {
  64        struct list_head list;
  65        u16 vlan;
  66        int ref_count;
  67        int vlan_index;
  68        u8 port;
  69};
  70
  71struct res_common {
  72        struct list_head        list;
  73        struct rb_node          node;
  74        u64                     res_id;
  75        int                     owner;
  76        int                     state;
  77        int                     from_state;
  78        int                     to_state;
  79        int                     removing;
  80};
  81
  82enum {
  83        RES_ANY_BUSY = 1
  84};
  85
  86struct res_gid {
  87        struct list_head        list;
  88        u8                      gid[16];
  89        enum mlx4_protocol      prot;
  90        enum mlx4_steer_type    steer;
  91        u64                     reg_id;
  92};
  93
  94enum res_qp_states {
  95        RES_QP_BUSY = RES_ANY_BUSY,
  96
  97        /* QP number was allocated */
  98        RES_QP_RESERVED,
  99
 100        /* ICM memory for QP context was mapped */
 101        RES_QP_MAPPED,
 102
 103        /* QP is in hw ownership */
 104        RES_QP_HW
 105};
 106
 107struct res_qp {
 108        struct res_common       com;
 109        struct res_mtt         *mtt;
 110        struct res_cq          *rcq;
 111        struct res_cq          *scq;
 112        struct res_srq         *srq;
 113        struct list_head        mcg_list;
 114        spinlock_t              mcg_spl;
 115        int                     local_qpn;
 116        atomic_t                ref_count;
 117        u32                     qpc_flags;
 118        /* saved qp params before VST enforcement in order to restore on VGT */
 119        u8                      sched_queue;
 120        __be32                  param3;
 121        u8                      vlan_control;
 122        u8                      fvl_rx;
 123        u8                      pri_path_fl;
 124        u8                      vlan_index;
 125        u8                      feup;
 126};
 127
 128enum res_mtt_states {
 129        RES_MTT_BUSY = RES_ANY_BUSY,
 130        RES_MTT_ALLOCATED,
 131};
 132
 133static inline const char *mtt_states_str(enum res_mtt_states state)
 134{
 135        switch (state) {
 136        case RES_MTT_BUSY: return "RES_MTT_BUSY";
 137        case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
 138        default: return "Unknown";
 139        }
 140}
 141
 142struct res_mtt {
 143        struct res_common       com;
 144        int                     order;
 145        atomic_t                ref_count;
 146};
 147
 148enum res_mpt_states {
 149        RES_MPT_BUSY = RES_ANY_BUSY,
 150        RES_MPT_RESERVED,
 151        RES_MPT_MAPPED,
 152        RES_MPT_HW,
 153};
 154
 155struct res_mpt {
 156        struct res_common       com;
 157        struct res_mtt         *mtt;
 158        int                     key;
 159};
 160
 161enum res_eq_states {
 162        RES_EQ_BUSY = RES_ANY_BUSY,
 163        RES_EQ_RESERVED,
 164        RES_EQ_HW,
 165};
 166
 167struct res_eq {
 168        struct res_common       com;
 169        struct res_mtt         *mtt;
 170};
 171
 172enum res_cq_states {
 173        RES_CQ_BUSY = RES_ANY_BUSY,
 174        RES_CQ_ALLOCATED,
 175        RES_CQ_HW,
 176};
 177
 178struct res_cq {
 179        struct res_common       com;
 180        struct res_mtt         *mtt;
 181        atomic_t                ref_count;
 182};
 183
 184enum res_srq_states {
 185        RES_SRQ_BUSY = RES_ANY_BUSY,
 186        RES_SRQ_ALLOCATED,
 187        RES_SRQ_HW,
 188};
 189
 190struct res_srq {
 191        struct res_common       com;
 192        struct res_mtt         *mtt;
 193        struct res_cq          *cq;
 194        atomic_t                ref_count;
 195};
 196
 197enum res_counter_states {
 198        RES_COUNTER_BUSY = RES_ANY_BUSY,
 199        RES_COUNTER_ALLOCATED,
 200};
 201
 202struct res_counter {
 203        struct res_common       com;
 204        int                     port;
 205};
 206
 207enum res_xrcdn_states {
 208        RES_XRCD_BUSY = RES_ANY_BUSY,
 209        RES_XRCD_ALLOCATED,
 210};
 211
 212struct res_xrcdn {
 213        struct res_common       com;
 214        int                     port;
 215};
 216
 217enum res_fs_rule_states {
 218        RES_FS_RULE_BUSY = RES_ANY_BUSY,
 219        RES_FS_RULE_ALLOCATED,
 220};
 221
 222struct res_fs_rule {
 223        struct res_common       com;
 224        int                     qpn;
 225        /* VF DMFS mbox with port flipped */
 226        void                    *mirr_mbox;
 227        /* > 0 --> apply mirror when getting into HA mode      */
 228        /* = 0 --> un-apply mirror when getting out of HA mode */
 229        u32                     mirr_mbox_size;
 230        struct list_head        mirr_list;
 231        u64                     mirr_rule_id;
 232};
 233
 234static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
 235{
 236        struct rb_node *node = root->rb_node;
 237
 238        while (node) {
 239                struct res_common *res = container_of(node, struct res_common,
 240                                                      node);
 241
 242                if (res_id < res->res_id)
 243                        node = node->rb_left;
 244                else if (res_id > res->res_id)
 245                        node = node->rb_right;
 246                else
 247                        return res;
 248        }
 249        return NULL;
 250}
 251
 252static int res_tracker_insert(struct rb_root *root, struct res_common *res)
 253{
 254        struct rb_node **new = &(root->rb_node), *parent = NULL;
 255
 256        /* Figure out where to put new node */
 257        while (*new) {
 258                struct res_common *this = container_of(*new, struct res_common,
 259                                                       node);
 260
 261                parent = *new;
 262                if (res->res_id < this->res_id)
 263                        new = &((*new)->rb_left);
 264                else if (res->res_id > this->res_id)
 265                        new = &((*new)->rb_right);
 266                else
 267                        return -EEXIST;
 268        }
 269
 270        /* Add new node and rebalance tree. */
 271        rb_link_node(&res->node, parent, new);
 272        rb_insert_color(&res->node, root);
 273
 274        return 0;
 275}
 276
 277enum qp_transition {
 278        QP_TRANS_INIT2RTR,
 279        QP_TRANS_RTR2RTS,
 280        QP_TRANS_RTS2RTS,
 281        QP_TRANS_SQERR2RTS,
 282        QP_TRANS_SQD2SQD,
 283        QP_TRANS_SQD2RTS
 284};
 285
 286/* For Debug uses */
 287static const char *resource_str(enum mlx4_resource rt)
 288{
 289        switch (rt) {
 290        case RES_QP: return "RES_QP";
 291        case RES_CQ: return "RES_CQ";
 292        case RES_SRQ: return "RES_SRQ";
 293        case RES_MPT: return "RES_MPT";
 294        case RES_MTT: return "RES_MTT";
 295        case RES_MAC: return  "RES_MAC";
 296        case RES_VLAN: return  "RES_VLAN";
 297        case RES_EQ: return "RES_EQ";
 298        case RES_COUNTER: return "RES_COUNTER";
 299        case RES_FS_RULE: return "RES_FS_RULE";
 300        case RES_XRCD: return "RES_XRCD";
 301        default: return "Unknown resource type !!!";
 302        };
 303}
 304
 305static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
 306static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
 307                                      enum mlx4_resource res_type, int count,
 308                                      int port)
 309{
 310        struct mlx4_priv *priv = mlx4_priv(dev);
 311        struct resource_allocator *res_alloc =
 312                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 313        int err = -EINVAL;
 314        int allocated, free, reserved, guaranteed, from_free;
 315        int from_rsvd;
 316
 317        if (slave > dev->persist->num_vfs)
 318                return -EINVAL;
 319
 320        spin_lock(&res_alloc->alloc_lock);
 321        allocated = (port > 0) ?
 322                res_alloc->allocated[(port - 1) *
 323                (dev->persist->num_vfs + 1) + slave] :
 324                res_alloc->allocated[slave];
 325        free = (port > 0) ? res_alloc->res_port_free[port - 1] :
 326                res_alloc->res_free;
 327        reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
 328                res_alloc->res_reserved;
 329        guaranteed = res_alloc->guaranteed[slave];
 330
 331        if (allocated + count > res_alloc->quota[slave]) {
 332                mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
 333                          slave, port, resource_str(res_type), count,
 334                          allocated, res_alloc->quota[slave]);
 335                goto out;
 336        }
 337
 338        if (allocated + count <= guaranteed) {
 339                err = 0;
 340                from_rsvd = count;
 341        } else {
 342                /* portion may need to be obtained from free area */
 343                if (guaranteed - allocated > 0)
 344                        from_free = count - (guaranteed - allocated);
 345                else
 346                        from_free = count;
 347
 348                from_rsvd = count - from_free;
 349
 350                if (free - from_free >= reserved)
 351                        err = 0;
 352                else
 353                        mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
 354                                  slave, port, resource_str(res_type), free,
 355                                  from_free, reserved);
 356        }
 357
 358        if (!err) {
 359                /* grant the request */
 360                if (port > 0) {
 361                        res_alloc->allocated[(port - 1) *
 362                        (dev->persist->num_vfs + 1) + slave] += count;
 363                        res_alloc->res_port_free[port - 1] -= count;
 364                        res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
 365                } else {
 366                        res_alloc->allocated[slave] += count;
 367                        res_alloc->res_free -= count;
 368                        res_alloc->res_reserved -= from_rsvd;
 369                }
 370        }
 371
 372out:
 373        spin_unlock(&res_alloc->alloc_lock);
 374        return err;
 375}
 376
 377static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
 378                                    enum mlx4_resource res_type, int count,
 379                                    int port)
 380{
 381        struct mlx4_priv *priv = mlx4_priv(dev);
 382        struct resource_allocator *res_alloc =
 383                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 384        int allocated, guaranteed, from_rsvd;
 385
 386        if (slave > dev->persist->num_vfs)
 387                return;
 388
 389        spin_lock(&res_alloc->alloc_lock);
 390
 391        allocated = (port > 0) ?
 392                res_alloc->allocated[(port - 1) *
 393                (dev->persist->num_vfs + 1) + slave] :
 394                res_alloc->allocated[slave];
 395        guaranteed = res_alloc->guaranteed[slave];
 396
 397        if (allocated - count >= guaranteed) {
 398                from_rsvd = 0;
 399        } else {
 400                /* portion may need to be returned to reserved area */
 401                if (allocated - guaranteed > 0)
 402                        from_rsvd = count - (allocated - guaranteed);
 403                else
 404                        from_rsvd = count;
 405        }
 406
 407        if (port > 0) {
 408                res_alloc->allocated[(port - 1) *
 409                (dev->persist->num_vfs + 1) + slave] -= count;
 410                res_alloc->res_port_free[port - 1] += count;
 411                res_alloc->res_port_rsvd[port - 1] += from_rsvd;
 412        } else {
 413                res_alloc->allocated[slave] -= count;
 414                res_alloc->res_free += count;
 415                res_alloc->res_reserved += from_rsvd;
 416        }
 417
 418        spin_unlock(&res_alloc->alloc_lock);
 419        return;
 420}
 421
 422static inline void initialize_res_quotas(struct mlx4_dev *dev,
 423                                         struct resource_allocator *res_alloc,
 424                                         enum mlx4_resource res_type,
 425                                         int vf, int num_instances)
 426{
 427        res_alloc->guaranteed[vf] = num_instances /
 428                                    (2 * (dev->persist->num_vfs + 1));
 429        res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
 430        if (vf == mlx4_master_func_num(dev)) {
 431                res_alloc->res_free = num_instances;
 432                if (res_type == RES_MTT) {
 433                        /* reserved mtts will be taken out of the PF allocation */
 434                        res_alloc->res_free += dev->caps.reserved_mtts;
 435                        res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
 436                        res_alloc->quota[vf] += dev->caps.reserved_mtts;
 437                }
 438        }
 439}
 440
 441void mlx4_init_quotas(struct mlx4_dev *dev)
 442{
 443        struct mlx4_priv *priv = mlx4_priv(dev);
 444        int pf;
 445
 446        /* quotas for VFs are initialized in mlx4_slave_cap */
 447        if (mlx4_is_slave(dev))
 448                return;
 449
 450        if (!mlx4_is_mfunc(dev)) {
 451                dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
 452                        mlx4_num_reserved_sqps(dev);
 453                dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
 454                dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
 455                dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
 456                dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
 457                return;
 458        }
 459
 460        pf = mlx4_master_func_num(dev);
 461        dev->quotas.qp =
 462                priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
 463        dev->quotas.cq =
 464                priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
 465        dev->quotas.srq =
 466                priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
 467        dev->quotas.mtt =
 468                priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
 469        dev->quotas.mpt =
 470                priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
 471}
 472
 473static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
 474{
 475        /* reduce the sink counter */
 476        return (dev->caps.max_counters - 1 -
 477                (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
 478                / MLX4_MAX_PORTS;
 479}
 480
 481int mlx4_init_resource_tracker(struct mlx4_dev *dev)
 482{
 483        struct mlx4_priv *priv = mlx4_priv(dev);
 484        int i, j;
 485        int t;
 486        int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
 487
 488        priv->mfunc.master.res_tracker.slave_list =
 489                kzalloc(dev->num_slaves * sizeof(struct slave_list),
 490                        GFP_KERNEL);
 491        if (!priv->mfunc.master.res_tracker.slave_list)
 492                return -ENOMEM;
 493
 494        for (i = 0 ; i < dev->num_slaves; i++) {
 495                for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
 496                        INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
 497                                       slave_list[i].res_list[t]);
 498                mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 499        }
 500
 501        mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
 502                 dev->num_slaves);
 503        for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
 504                priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
 505
 506        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 507                struct resource_allocator *res_alloc =
 508                        &priv->mfunc.master.res_tracker.res_alloc[i];
 509                res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
 510                                           sizeof(int), GFP_KERNEL);
 511                res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
 512                                                sizeof(int), GFP_KERNEL);
 513                if (i == RES_MAC || i == RES_VLAN)
 514                        res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
 515                                                       (dev->persist->num_vfs
 516                                                       + 1) *
 517                                                       sizeof(int), GFP_KERNEL);
 518                else
 519                        res_alloc->allocated = kzalloc((dev->persist->
 520                                                        num_vfs + 1) *
 521                                                       sizeof(int), GFP_KERNEL);
 522                /* Reduce the sink counter */
 523                if (i == RES_COUNTER)
 524                        res_alloc->res_free = dev->caps.max_counters - 1;
 525
 526                if (!res_alloc->quota || !res_alloc->guaranteed ||
 527                    !res_alloc->allocated)
 528                        goto no_mem_err;
 529
 530                spin_lock_init(&res_alloc->alloc_lock);
 531                for (t = 0; t < dev->persist->num_vfs + 1; t++) {
 532                        struct mlx4_active_ports actv_ports =
 533                                mlx4_get_active_ports(dev, t);
 534                        switch (i) {
 535                        case RES_QP:
 536                                initialize_res_quotas(dev, res_alloc, RES_QP,
 537                                                      t, dev->caps.num_qps -
 538                                                      dev->caps.reserved_qps -
 539                                                      mlx4_num_reserved_sqps(dev));
 540                                break;
 541                        case RES_CQ:
 542                                initialize_res_quotas(dev, res_alloc, RES_CQ,
 543                                                      t, dev->caps.num_cqs -
 544                                                      dev->caps.reserved_cqs);
 545                                break;
 546                        case RES_SRQ:
 547                                initialize_res_quotas(dev, res_alloc, RES_SRQ,
 548                                                      t, dev->caps.num_srqs -
 549                                                      dev->caps.reserved_srqs);
 550                                break;
 551                        case RES_MPT:
 552                                initialize_res_quotas(dev, res_alloc, RES_MPT,
 553                                                      t, dev->caps.num_mpts -
 554                                                      dev->caps.reserved_mrws);
 555                                break;
 556                        case RES_MTT:
 557                                initialize_res_quotas(dev, res_alloc, RES_MTT,
 558                                                      t, dev->caps.num_mtts -
 559                                                      dev->caps.reserved_mtts);
 560                                break;
 561                        case RES_MAC:
 562                                if (t == mlx4_master_func_num(dev)) {
 563                                        int max_vfs_pport = 0;
 564                                        /* Calculate the max vfs per port for */
 565                                        /* both ports.                        */
 566                                        for (j = 0; j < dev->caps.num_ports;
 567                                             j++) {
 568                                                struct mlx4_slaves_pport slaves_pport =
 569                                                        mlx4_phys_to_slaves_pport(dev, j + 1);
 570                                                unsigned current_slaves =
 571                                                        bitmap_weight(slaves_pport.slaves,
 572                                                                      dev->caps.num_ports) - 1;
 573                                                if (max_vfs_pport < current_slaves)
 574                                                        max_vfs_pport =
 575                                                                current_slaves;
 576                                        }
 577                                        res_alloc->quota[t] =
 578                                                MLX4_MAX_MAC_NUM -
 579                                                2 * max_vfs_pport;
 580                                        res_alloc->guaranteed[t] = 2;
 581                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 582                                                res_alloc->res_port_free[j] =
 583                                                        MLX4_MAX_MAC_NUM;
 584                                } else {
 585                                        res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
 586                                        res_alloc->guaranteed[t] = 2;
 587                                }
 588                                break;
 589                        case RES_VLAN:
 590                                if (t == mlx4_master_func_num(dev)) {
 591                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
 592                                        res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
 593                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 594                                                res_alloc->res_port_free[j] =
 595                                                        res_alloc->quota[t];
 596                                } else {
 597                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
 598                                        res_alloc->guaranteed[t] = 0;
 599                                }
 600                                break;
 601                        case RES_COUNTER:
 602                                res_alloc->quota[t] = dev->caps.max_counters;
 603                                if (t == mlx4_master_func_num(dev))
 604                                        res_alloc->guaranteed[t] =
 605                                                MLX4_PF_COUNTERS_PER_PORT *
 606                                                MLX4_MAX_PORTS;
 607                                else if (t <= max_vfs_guarantee_counter)
 608                                        res_alloc->guaranteed[t] =
 609                                                MLX4_VF_COUNTERS_PER_PORT *
 610                                                MLX4_MAX_PORTS;
 611                                else
 612                                        res_alloc->guaranteed[t] = 0;
 613                                res_alloc->res_free -= res_alloc->guaranteed[t];
 614                                break;
 615                        default:
 616                                break;
 617                        }
 618                        if (i == RES_MAC || i == RES_VLAN) {
 619                                for (j = 0; j < dev->caps.num_ports; j++)
 620                                        if (test_bit(j, actv_ports.ports))
 621                                                res_alloc->res_port_rsvd[j] +=
 622                                                        res_alloc->guaranteed[t];
 623                        } else {
 624                                res_alloc->res_reserved += res_alloc->guaranteed[t];
 625                        }
 626                }
 627        }
 628        spin_lock_init(&priv->mfunc.master.res_tracker.lock);
 629        return 0;
 630
 631no_mem_err:
 632        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 633                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 634                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 635                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 636                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 637                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 638                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 639        }
 640        return -ENOMEM;
 641}
 642
 643void mlx4_free_resource_tracker(struct mlx4_dev *dev,
 644                                enum mlx4_res_tracker_free_type type)
 645{
 646        struct mlx4_priv *priv = mlx4_priv(dev);
 647        int i;
 648
 649        if (priv->mfunc.master.res_tracker.slave_list) {
 650                if (type != RES_TR_FREE_STRUCTS_ONLY) {
 651                        for (i = 0; i < dev->num_slaves; i++) {
 652                                if (type == RES_TR_FREE_ALL ||
 653                                    dev->caps.function != i)
 654                                        mlx4_delete_all_resources_for_slave(dev, i);
 655                        }
 656                        /* free master's vlans */
 657                        i = dev->caps.function;
 658                        mlx4_reset_roce_gids(dev, i);
 659                        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 660                        rem_slave_vlans(dev, i);
 661                        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 662                }
 663
 664                if (type != RES_TR_FREE_SLAVES_ONLY) {
 665                        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 666                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 667                                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 668                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 669                                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 670                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 671                                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 672                        }
 673                        kfree(priv->mfunc.master.res_tracker.slave_list);
 674                        priv->mfunc.master.res_tracker.slave_list = NULL;
 675                }
 676        }
 677}
 678
 679static void update_pkey_index(struct mlx4_dev *dev, int slave,
 680                              struct mlx4_cmd_mailbox *inbox)
 681{
 682        u8 sched = *(u8 *)(inbox->buf + 64);
 683        u8 orig_index = *(u8 *)(inbox->buf + 35);
 684        u8 new_index;
 685        struct mlx4_priv *priv = mlx4_priv(dev);
 686        int port;
 687
 688        port = (sched >> 6 & 1) + 1;
 689
 690        new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
 691        *(u8 *)(inbox->buf + 35) = new_index;
 692}
 693
 694static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
 695                       u8 slave)
 696{
 697        struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
 698        enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
 699        u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
 700        int port;
 701
 702        if (MLX4_QP_ST_UD == ts) {
 703                port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 704                if (mlx4_is_eth(dev, port))
 705                        qp_ctx->pri_path.mgid_index =
 706                                mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
 707                else
 708                        qp_ctx->pri_path.mgid_index = slave | 0x80;
 709
 710        } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
 711                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
 712                        port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 713                        if (mlx4_is_eth(dev, port)) {
 714                                qp_ctx->pri_path.mgid_index +=
 715                                        mlx4_get_base_gid_ix(dev, slave, port);
 716                                qp_ctx->pri_path.mgid_index &= 0x7f;
 717                        } else {
 718                                qp_ctx->pri_path.mgid_index = slave & 0x7F;
 719                        }
 720                }
 721                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
 722                        port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
 723                        if (mlx4_is_eth(dev, port)) {
 724                                qp_ctx->alt_path.mgid_index +=
 725                                        mlx4_get_base_gid_ix(dev, slave, port);
 726                                qp_ctx->alt_path.mgid_index &= 0x7f;
 727                        } else {
 728                                qp_ctx->alt_path.mgid_index = slave & 0x7F;
 729                        }
 730                }
 731        }
 732}
 733
 734static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
 735                          u8 slave, int port);
 736
 737static int update_vport_qp_param(struct mlx4_dev *dev,
 738                                 struct mlx4_cmd_mailbox *inbox,
 739                                 u8 slave, u32 qpn)
 740{
 741        struct mlx4_qp_context  *qpc = inbox->buf + 8;
 742        struct mlx4_vport_oper_state *vp_oper;
 743        struct mlx4_priv *priv;
 744        u32 qp_type;
 745        int port, err = 0;
 746
 747        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
 748        priv = mlx4_priv(dev);
 749        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 750        qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
 751
 752        err = handle_counter(dev, qpc, slave, port);
 753        if (err)
 754                goto out;
 755
 756        if (MLX4_VGT != vp_oper->state.default_vlan) {
 757                /* the reserved QPs (special, proxy, tunnel)
 758                 * do not operate over vlans
 759                 */
 760                if (mlx4_is_qp_reserved(dev, qpn))
 761                        return 0;
 762
 763                /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
 764                if (qp_type == MLX4_QP_ST_UD ||
 765                    (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
 766                        if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
 767                                *(__be32 *)inbox->buf =
 768                                        cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
 769                                        MLX4_QP_OPTPAR_VLAN_STRIPPING);
 770                                qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
 771                        } else {
 772                                struct mlx4_update_qp_params params = {.flags = 0};
 773
 774                                err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
 775                                if (err)
 776                                        goto out;
 777                        }
 778                }
 779
 780                /* preserve IF_COUNTER flag */
 781                qpc->pri_path.vlan_control &=
 782                        MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
 783                if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
 784                    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
 785                        qpc->pri_path.vlan_control |=
 786                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 787                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 788                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
 789                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 790                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
 791                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 792                } else if (0 != vp_oper->state.default_vlan) {
 793                        if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
 794                                /* vst QinQ should block untagged on TX,
 795                                 * but cvlan is in payload and phv is set so
 796                                 * hw see it as untagged. Block tagged instead.
 797                                 */
 798                                qpc->pri_path.vlan_control |=
 799                                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 800                                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 801                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 802                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 803                        } else { /* vst 802.1Q */
 804                                qpc->pri_path.vlan_control |=
 805                                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 806                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 807                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 808                        }
 809                } else { /* priority tagged */
 810                        qpc->pri_path.vlan_control |=
 811                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 812                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 813                }
 814
 815                qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
 816                qpc->pri_path.vlan_index = vp_oper->vlan_idx;
 817                qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
 818                if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
 819                        qpc->pri_path.fl |= MLX4_FL_SV;
 820                else
 821                        qpc->pri_path.fl |= MLX4_FL_CV;
 822                qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
 823                qpc->pri_path.sched_queue &= 0xC7;
 824                qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
 825                qpc->qos_vport = vp_oper->state.qos_vport;
 826        }
 827        if (vp_oper->state.spoofchk) {
 828                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
 829                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
 830        }
 831out:
 832        return err;
 833}
 834
 835static int mpt_mask(struct mlx4_dev *dev)
 836{
 837        return dev->caps.num_mpts - 1;
 838}
 839
 840static void *find_res(struct mlx4_dev *dev, u64 res_id,
 841                      enum mlx4_resource type)
 842{
 843        struct mlx4_priv *priv = mlx4_priv(dev);
 844
 845        return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
 846                                  res_id);
 847}
 848
 849static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
 850                   enum mlx4_resource type,
 851                   void *res)
 852{
 853        struct res_common *r;
 854        int err = 0;
 855
 856        spin_lock_irq(mlx4_tlock(dev));
 857        r = find_res(dev, res_id, type);
 858        if (!r) {
 859                err = -ENONET;
 860                goto exit;
 861        }
 862
 863        if (r->state == RES_ANY_BUSY) {
 864                err = -EBUSY;
 865                goto exit;
 866        }
 867
 868        if (r->owner != slave) {
 869                err = -EPERM;
 870                goto exit;
 871        }
 872
 873        r->from_state = r->state;
 874        r->state = RES_ANY_BUSY;
 875
 876        if (res)
 877                *((struct res_common **)res) = r;
 878
 879exit:
 880        spin_unlock_irq(mlx4_tlock(dev));
 881        return err;
 882}
 883
 884int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
 885                                    enum mlx4_resource type,
 886                                    u64 res_id, int *slave)
 887{
 888
 889        struct res_common *r;
 890        int err = -ENOENT;
 891        int id = res_id;
 892
 893        if (type == RES_QP)
 894                id &= 0x7fffff;
 895        spin_lock(mlx4_tlock(dev));
 896
 897        r = find_res(dev, id, type);
 898        if (r) {
 899                *slave = r->owner;
 900                err = 0;
 901        }
 902        spin_unlock(mlx4_tlock(dev));
 903
 904        return err;
 905}
 906
 907static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
 908                    enum mlx4_resource type)
 909{
 910        struct res_common *r;
 911
 912        spin_lock_irq(mlx4_tlock(dev));
 913        r = find_res(dev, res_id, type);
 914        if (r)
 915                r->state = r->from_state;
 916        spin_unlock_irq(mlx4_tlock(dev));
 917}
 918
 919static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
 920                             u64 in_param, u64 *out_param, int port);
 921
 922static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
 923                                   int counter_index)
 924{
 925        struct res_common *r;
 926        struct res_counter *counter;
 927        int ret = 0;
 928
 929        if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
 930                return ret;
 931
 932        spin_lock_irq(mlx4_tlock(dev));
 933        r = find_res(dev, counter_index, RES_COUNTER);
 934        if (!r || r->owner != slave) {
 935                ret = -EINVAL;
 936        } else {
 937                counter = container_of(r, struct res_counter, com);
 938                if (!counter->port)
 939                        counter->port = port;
 940        }
 941
 942        spin_unlock_irq(mlx4_tlock(dev));
 943        return ret;
 944}
 945
 946static int handle_unexisting_counter(struct mlx4_dev *dev,
 947                                     struct mlx4_qp_context *qpc, u8 slave,
 948                                     int port)
 949{
 950        struct mlx4_priv *priv = mlx4_priv(dev);
 951        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
 952        struct res_common *tmp;
 953        struct res_counter *counter;
 954        u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
 955        int err = 0;
 956
 957        spin_lock_irq(mlx4_tlock(dev));
 958        list_for_each_entry(tmp,
 959                            &tracker->slave_list[slave].res_list[RES_COUNTER],
 960                            list) {
 961                counter = container_of(tmp, struct res_counter, com);
 962                if (port == counter->port) {
 963                        qpc->pri_path.counter_index  = counter->com.res_id;
 964                        spin_unlock_irq(mlx4_tlock(dev));
 965                        return 0;
 966                }
 967        }
 968        spin_unlock_irq(mlx4_tlock(dev));
 969
 970        /* No existing counter, need to allocate a new counter */
 971        err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
 972                                port);
 973        if (err == -ENOENT) {
 974                err = 0;
 975        } else if (err && err != -ENOSPC) {
 976                mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
 977                         __func__, slave, err);
 978        } else {
 979                qpc->pri_path.counter_index = counter_idx;
 980                mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
 981                         __func__, slave, qpc->pri_path.counter_index);
 982                err = 0;
 983        }
 984
 985        return err;
 986}
 987
 988static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
 989                          u8 slave, int port)
 990{
 991        if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
 992                return handle_existing_counter(dev, slave, port,
 993                                               qpc->pri_path.counter_index);
 994
 995        return handle_unexisting_counter(dev, qpc, slave, port);
 996}
 997
 998static struct res_common *alloc_qp_tr(int id)
 999{
1000        struct res_qp *ret;
1001
1002        ret = kzalloc(sizeof *ret, GFP_KERNEL);
1003        if (!ret)
1004                return NULL;
1005
1006        ret->com.res_id = id;
1007        ret->com.state = RES_QP_RESERVED;
1008        ret->local_qpn = id;
1009        INIT_LIST_HEAD(&ret->mcg_list);
1010        spin_lock_init(&ret->mcg_spl);
1011        atomic_set(&ret->ref_count, 0);
1012
1013        return &ret->com;
1014}
1015
1016static struct res_common *alloc_mtt_tr(int id, int order)
1017{
1018        struct res_mtt *ret;
1019
1020        ret = kzalloc(sizeof *ret, GFP_KERNEL);
1021        if (!ret)
1022                return NULL;
1023
1024        ret->com.res_id = id;
1025        ret->order = order;
1026        ret->com.state = RES_MTT_ALLOCATED;
1027        atomic_set(&ret->ref_count, 0);
1028
1029        return &ret->com;
1030}
1031
1032static struct res_common *alloc_mpt_tr(int id, int key)
1033{
1034        struct res_mpt *ret;
1035
1036        ret = kzalloc(sizeof *ret, GFP_KERNEL);
1037        if (!ret)
1038                return NULL;
1039
1040        ret->com.res_id = id;
1041        ret->com.state = RES_MPT_RESERVED;
1042        ret->key = key;
1043
1044        return &ret->com;
1045}
1046
1047static struct res_common *alloc_eq_tr(int id)
1048{
1049        struct res_eq *ret;
1050
1051        ret = kzalloc(sizeof *ret, GFP_KERNEL);
1052        if (!ret)
1053                return NULL;
1054
1055        ret->com.res_id = id;
1056        ret->com.state = RES_EQ_RESERVED;
1057
1058        return &ret->com;
1059}
1060
1061static struct res_common *alloc_cq_tr(int id)
1062{
1063        struct res_cq *ret;
1064
1065        ret = kzalloc(sizeof *ret, GFP_KERNEL);
1066        if (!ret)
1067                return NULL;
1068
1069        ret->com.res_id = id;
1070        ret->com.state = RES_CQ_ALLOCATED;
1071        atomic_set(&ret->ref_count, 0);
1072
1073        return &ret->com;
1074}
1075
1076static struct res_common *alloc_srq_tr(int id)
1077{
1078        struct res_srq *ret;
1079
1080        ret = kzalloc(sizeof *ret, GFP_KERNEL);
1081        if (!ret)
1082                return NULL;
1083
1084        ret->com.res_id = id;
1085        ret->com.state = RES_SRQ_ALLOCATED;
1086        atomic_set(&ret->ref_count, 0);
1087
1088        return &ret->com;
1089}
1090
1091static struct res_common *alloc_counter_tr(int id, int port)
1092{
1093        struct res_counter *ret;
1094
1095        ret = kzalloc(sizeof *ret, GFP_KERNEL);
1096        if (!ret)
1097                return NULL;
1098
1099        ret->com.res_id = id;
1100        ret->com.state = RES_COUNTER_ALLOCATED;
1101        ret->port = port;
1102
1103        return &ret->com;
1104}
1105
1106static struct res_common *alloc_xrcdn_tr(int id)
1107{
1108        struct res_xrcdn *ret;
1109
1110        ret = kzalloc(sizeof *ret, GFP_KERNEL);
1111        if (!ret)
1112                return NULL;
1113
1114        ret->com.res_id = id;
1115        ret->com.state = RES_XRCD_ALLOCATED;
1116
1117        return &ret->com;
1118}
1119
1120static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1121{
1122        struct res_fs_rule *ret;
1123
1124        ret = kzalloc(sizeof *ret, GFP_KERNEL);
1125        if (!ret)
1126                return NULL;
1127
1128        ret->com.res_id = id;
1129        ret->com.state = RES_FS_RULE_ALLOCATED;
1130        ret->qpn = qpn;
1131        return &ret->com;
1132}
1133
1134static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1135                                   int extra)
1136{
1137        struct res_common *ret;
1138
1139        switch (type) {
1140        case RES_QP:
1141                ret = alloc_qp_tr(id);
1142                break;
1143        case RES_MPT:
1144                ret = alloc_mpt_tr(id, extra);
1145                break;
1146        case RES_MTT:
1147                ret = alloc_mtt_tr(id, extra);
1148                break;
1149        case RES_EQ:
1150                ret = alloc_eq_tr(id);
1151                break;
1152        case RES_CQ:
1153                ret = alloc_cq_tr(id);
1154                break;
1155        case RES_SRQ:
1156                ret = alloc_srq_tr(id);
1157                break;
1158        case RES_MAC:
1159                pr_err("implementation missing\n");
1160                return NULL;
1161        case RES_COUNTER:
1162                ret = alloc_counter_tr(id, extra);
1163                break;
1164        case RES_XRCD:
1165                ret = alloc_xrcdn_tr(id);
1166                break;
1167        case RES_FS_RULE:
1168                ret = alloc_fs_rule_tr(id, extra);
1169                break;
1170        default:
1171                return NULL;
1172        }
1173        if (ret)
1174                ret->owner = slave;
1175
1176        return ret;
1177}
1178
1179int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1180                          struct mlx4_counter *data)
1181{
1182        struct mlx4_priv *priv = mlx4_priv(dev);
1183        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1184        struct res_common *tmp;
1185        struct res_counter *counter;
1186        int *counters_arr;
1187        int i = 0, err = 0;
1188
1189        memset(data, 0, sizeof(*data));
1190
1191        counters_arr = kmalloc_array(dev->caps.max_counters,
1192                                     sizeof(*counters_arr), GFP_KERNEL);
1193        if (!counters_arr)
1194                return -ENOMEM;
1195
1196        spin_lock_irq(mlx4_tlock(dev));
1197        list_for_each_entry(tmp,
1198                            &tracker->slave_list[slave].res_list[RES_COUNTER],
1199                            list) {
1200                counter = container_of(tmp, struct res_counter, com);
1201                if (counter->port == port) {
1202                        counters_arr[i] = (int)tmp->res_id;
1203                        i++;
1204                }
1205        }
1206        spin_unlock_irq(mlx4_tlock(dev));
1207        counters_arr[i] = -1;
1208
1209        i = 0;
1210
1211        while (counters_arr[i] != -1) {
1212                err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1213                                             0);
1214                if (err) {
1215                        memset(data, 0, sizeof(*data));
1216                        goto table_changed;
1217                }
1218                i++;
1219        }
1220
1221table_changed:
1222        kfree(counters_arr);
1223        return 0;
1224}
1225
1226static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1227                         enum mlx4_resource type, int extra)
1228{
1229        int i;
1230        int err;
1231        struct mlx4_priv *priv = mlx4_priv(dev);
1232        struct res_common **res_arr;
1233        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1234        struct rb_root *root = &tracker->res_tree[type];
1235
1236        res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1237        if (!res_arr)
1238                return -ENOMEM;
1239
1240        for (i = 0; i < count; ++i) {
1241                res_arr[i] = alloc_tr(base + i, type, slave, extra);
1242                if (!res_arr[i]) {
1243                        for (--i; i >= 0; --i)
1244                                kfree(res_arr[i]);
1245
1246                        kfree(res_arr);
1247                        return -ENOMEM;
1248                }
1249        }
1250
1251        spin_lock_irq(mlx4_tlock(dev));
1252        for (i = 0; i < count; ++i) {
1253                if (find_res(dev, base + i, type)) {
1254                        err = -EEXIST;
1255                        goto undo;
1256                }
1257                err = res_tracker_insert(root, res_arr[i]);
1258                if (err)
1259                        goto undo;
1260                list_add_tail(&res_arr[i]->list,
1261                              &tracker->slave_list[slave].res_list[type]);
1262        }
1263        spin_unlock_irq(mlx4_tlock(dev));
1264        kfree(res_arr);
1265
1266        return 0;
1267
1268undo:
1269        for (--i; i >= 0; --i) {
1270                rb_erase(&res_arr[i]->node, root);
1271                list_del_init(&res_arr[i]->list);
1272        }
1273
1274        spin_unlock_irq(mlx4_tlock(dev));
1275
1276        for (i = 0; i < count; ++i)
1277                kfree(res_arr[i]);
1278
1279        kfree(res_arr);
1280
1281        return err;
1282}
1283
1284static int remove_qp_ok(struct res_qp *res)
1285{
1286        if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1287            !list_empty(&res->mcg_list)) {
1288                pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1289                       res->com.state, atomic_read(&res->ref_count));
1290                return -EBUSY;
1291        } else if (res->com.state != RES_QP_RESERVED) {
1292                return -EPERM;
1293        }
1294
1295        return 0;
1296}
1297
1298static int remove_mtt_ok(struct res_mtt *res, int order)
1299{
1300        if (res->com.state == RES_MTT_BUSY ||
1301            atomic_read(&res->ref_count)) {
1302                pr_devel("%s-%d: state %s, ref_count %d\n",
1303                         __func__, __LINE__,
1304                         mtt_states_str(res->com.state),
1305                         atomic_read(&res->ref_count));
1306                return -EBUSY;
1307        } else if (res->com.state != RES_MTT_ALLOCATED)
1308                return -EPERM;
1309        else if (res->order != order)
1310                return -EINVAL;
1311
1312        return 0;
1313}
1314
1315static int remove_mpt_ok(struct res_mpt *res)
1316{
1317        if (res->com.state == RES_MPT_BUSY)
1318                return -EBUSY;
1319        else if (res->com.state != RES_MPT_RESERVED)
1320                return -EPERM;
1321
1322        return 0;
1323}
1324
1325static int remove_eq_ok(struct res_eq *res)
1326{
1327        if (res->com.state == RES_MPT_BUSY)
1328                return -EBUSY;
1329        else if (res->com.state != RES_MPT_RESERVED)
1330                return -EPERM;
1331
1332        return 0;
1333}
1334
1335static int remove_counter_ok(struct res_counter *res)
1336{
1337        if (res->com.state == RES_COUNTER_BUSY)
1338                return -EBUSY;
1339        else if (res->com.state != RES_COUNTER_ALLOCATED)
1340                return -EPERM;
1341
1342        return 0;
1343}
1344
1345static int remove_xrcdn_ok(struct res_xrcdn *res)
1346{
1347        if (res->com.state == RES_XRCD_BUSY)
1348                return -EBUSY;
1349        else if (res->com.state != RES_XRCD_ALLOCATED)
1350                return -EPERM;
1351
1352        return 0;
1353}
1354
1355static int remove_fs_rule_ok(struct res_fs_rule *res)
1356{
1357        if (res->com.state == RES_FS_RULE_BUSY)
1358                return -EBUSY;
1359        else if (res->com.state != RES_FS_RULE_ALLOCATED)
1360                return -EPERM;
1361
1362        return 0;
1363}
1364
1365static int remove_cq_ok(struct res_cq *res)
1366{
1367        if (res->com.state == RES_CQ_BUSY)
1368                return -EBUSY;
1369        else if (res->com.state != RES_CQ_ALLOCATED)
1370                return -EPERM;
1371
1372        return 0;
1373}
1374
1375static int remove_srq_ok(struct res_srq *res)
1376{
1377        if (res->com.state == RES_SRQ_BUSY)
1378                return -EBUSY;
1379        else if (res->com.state != RES_SRQ_ALLOCATED)
1380                return -EPERM;
1381
1382        return 0;
1383}
1384
1385static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1386{
1387        switch (type) {
1388        case RES_QP:
1389                return remove_qp_ok((struct res_qp *)res);
1390        case RES_CQ:
1391                return remove_cq_ok((struct res_cq *)res);
1392        case RES_SRQ:
1393                return remove_srq_ok((struct res_srq *)res);
1394        case RES_MPT:
1395                return remove_mpt_ok((struct res_mpt *)res);
1396        case RES_MTT:
1397                return remove_mtt_ok((struct res_mtt *)res, extra);
1398        case RES_MAC:
1399                return -ENOSYS;
1400        case RES_EQ:
1401                return remove_eq_ok((struct res_eq *)res);
1402        case RES_COUNTER:
1403                return remove_counter_ok((struct res_counter *)res);
1404        case RES_XRCD:
1405                return remove_xrcdn_ok((struct res_xrcdn *)res);
1406        case RES_FS_RULE:
1407                return remove_fs_rule_ok((struct res_fs_rule *)res);
1408        default:
1409                return -EINVAL;
1410        }
1411}
1412
1413static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1414                         enum mlx4_resource type, int extra)
1415{
1416        u64 i;
1417        int err;
1418        struct mlx4_priv *priv = mlx4_priv(dev);
1419        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1420        struct res_common *r;
1421
1422        spin_lock_irq(mlx4_tlock(dev));
1423        for (i = base; i < base + count; ++i) {
1424                r = res_tracker_lookup(&tracker->res_tree[type], i);
1425                if (!r) {
1426                        err = -ENOENT;
1427                        goto out;
1428                }
1429                if (r->owner != slave) {
1430                        err = -EPERM;
1431                        goto out;
1432                }
1433                err = remove_ok(r, type, extra);
1434                if (err)
1435                        goto out;
1436        }
1437
1438        for (i = base; i < base + count; ++i) {
1439                r = res_tracker_lookup(&tracker->res_tree[type], i);
1440                rb_erase(&r->node, &tracker->res_tree[type]);
1441                list_del(&r->list);
1442                kfree(r);
1443        }
1444        err = 0;
1445
1446out:
1447        spin_unlock_irq(mlx4_tlock(dev));
1448
1449        return err;
1450}
1451
1452static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1453                                enum res_qp_states state, struct res_qp **qp,
1454                                int alloc)
1455{
1456        struct mlx4_priv *priv = mlx4_priv(dev);
1457        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1458        struct res_qp *r;
1459        int err = 0;
1460
1461        spin_lock_irq(mlx4_tlock(dev));
1462        r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1463        if (!r)
1464                err = -ENOENT;
1465        else if (r->com.owner != slave)
1466                err = -EPERM;
1467        else {
1468                switch (state) {
1469                case RES_QP_BUSY:
1470                        mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1471                                 __func__, r->com.res_id);
1472                        err = -EBUSY;
1473                        break;
1474
1475                case RES_QP_RESERVED:
1476                        if (r->com.state == RES_QP_MAPPED && !alloc)
1477                                break;
1478
1479                        mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1480                        err = -EINVAL;
1481                        break;
1482
1483                case RES_QP_MAPPED:
1484                        if ((r->com.state == RES_QP_RESERVED && alloc) ||
1485                            r->com.state == RES_QP_HW)
1486                                break;
1487                        else {
1488                                mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1489                                          r->com.res_id);
1490                                err = -EINVAL;
1491                        }
1492
1493                        break;
1494
1495                case RES_QP_HW:
1496                        if (r->com.state != RES_QP_MAPPED)
1497                                err = -EINVAL;
1498                        break;
1499                default:
1500                        err = -EINVAL;
1501                }
1502
1503                if (!err) {
1504                        r->com.from_state = r->com.state;
1505                        r->com.to_state = state;
1506                        r->com.state = RES_QP_BUSY;
1507                        if (qp)
1508                                *qp = r;
1509                }
1510        }
1511
1512        spin_unlock_irq(mlx4_tlock(dev));
1513
1514        return err;
1515}
1516
1517static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1518                                enum res_mpt_states state, struct res_mpt **mpt)
1519{
1520        struct mlx4_priv *priv = mlx4_priv(dev);
1521        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1522        struct res_mpt *r;
1523        int err = 0;
1524
1525        spin_lock_irq(mlx4_tlock(dev));
1526        r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1527        if (!r)
1528                err = -ENOENT;
1529        else if (r->com.owner != slave)
1530                err = -EPERM;
1531        else {
1532                switch (state) {
1533                case RES_MPT_BUSY:
1534                        err = -EINVAL;
1535                        break;
1536
1537                case RES_MPT_RESERVED:
1538                        if (r->com.state != RES_MPT_MAPPED)
1539                                err = -EINVAL;
1540                        break;
1541
1542                case RES_MPT_MAPPED:
1543                        if (r->com.state != RES_MPT_RESERVED &&
1544                            r->com.state != RES_MPT_HW)
1545                                err = -EINVAL;
1546                        break;
1547
1548                case RES_MPT_HW:
1549                        if (r->com.state != RES_MPT_MAPPED)
1550                                err = -EINVAL;
1551                        break;
1552                default:
1553                        err = -EINVAL;
1554                }
1555
1556                if (!err) {
1557                        r->com.from_state = r->com.state;
1558                        r->com.to_state = state;
1559                        r->com.state = RES_MPT_BUSY;
1560                        if (mpt)
1561                                *mpt = r;
1562                }
1563        }
1564
1565        spin_unlock_irq(mlx4_tlock(dev));
1566
1567        return err;
1568}
1569
1570static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1571                                enum res_eq_states state, struct res_eq **eq)
1572{
1573        struct mlx4_priv *priv = mlx4_priv(dev);
1574        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1575        struct res_eq *r;
1576        int err = 0;
1577
1578        spin_lock_irq(mlx4_tlock(dev));
1579        r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1580        if (!r)
1581                err = -ENOENT;
1582        else if (r->com.owner != slave)
1583                err = -EPERM;
1584        else {
1585                switch (state) {
1586                case RES_EQ_BUSY:
1587                        err = -EINVAL;
1588                        break;
1589
1590                case RES_EQ_RESERVED:
1591                        if (r->com.state != RES_EQ_HW)
1592                                err = -EINVAL;
1593                        break;
1594
1595                case RES_EQ_HW:
1596                        if (r->com.state != RES_EQ_RESERVED)
1597                                err = -EINVAL;
1598                        break;
1599
1600                default:
1601                        err = -EINVAL;
1602                }
1603
1604                if (!err) {
1605                        r->com.from_state = r->com.state;
1606                        r->com.to_state = state;
1607                        r->com.state = RES_EQ_BUSY;
1608                }
1609        }
1610
1611        spin_unlock_irq(mlx4_tlock(dev));
1612
1613        if (!err && eq)
1614                *eq = r;
1615
1616        return err;
1617}
1618
1619static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1620                                enum res_cq_states state, struct res_cq **cq)
1621{
1622        struct mlx4_priv *priv = mlx4_priv(dev);
1623        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1624        struct res_cq *r;
1625        int err;
1626
1627        spin_lock_irq(mlx4_tlock(dev));
1628        r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1629        if (!r) {
1630                err = -ENOENT;
1631        } else if (r->com.owner != slave) {
1632                err = -EPERM;
1633        } else if (state == RES_CQ_ALLOCATED) {
1634                if (r->com.state != RES_CQ_HW)
1635                        err = -EINVAL;
1636                else if (atomic_read(&r->ref_count))
1637                        err = -EBUSY;
1638                else
1639                        err = 0;
1640        } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1641                err = -EINVAL;
1642        } else {
1643                err = 0;
1644        }
1645
1646        if (!err) {
1647                r->com.from_state = r->com.state;
1648                r->com.to_state = state;
1649                r->com.state = RES_CQ_BUSY;
1650                if (cq)
1651                        *cq = r;
1652        }
1653
1654        spin_unlock_irq(mlx4_tlock(dev));
1655
1656        return err;
1657}
1658
1659static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1660                                 enum res_srq_states state, struct res_srq **srq)
1661{
1662        struct mlx4_priv *priv = mlx4_priv(dev);
1663        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1664        struct res_srq *r;
1665        int err = 0;
1666
1667        spin_lock_irq(mlx4_tlock(dev));
1668        r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1669        if (!r) {
1670                err = -ENOENT;
1671        } else if (r->com.owner != slave) {
1672                err = -EPERM;
1673        } else if (state == RES_SRQ_ALLOCATED) {
1674                if (r->com.state != RES_SRQ_HW)
1675                        err = -EINVAL;
1676                else if (atomic_read(&r->ref_count))
1677                        err = -EBUSY;
1678        } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1679                err = -EINVAL;
1680        }
1681
1682        if (!err) {
1683                r->com.from_state = r->com.state;
1684                r->com.to_state = state;
1685                r->com.state = RES_SRQ_BUSY;
1686                if (srq)
1687                        *srq = r;
1688        }
1689
1690        spin_unlock_irq(mlx4_tlock(dev));
1691
1692        return err;
1693}
1694
1695static void res_abort_move(struct mlx4_dev *dev, int slave,
1696                           enum mlx4_resource type, int id)
1697{
1698        struct mlx4_priv *priv = mlx4_priv(dev);
1699        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1700        struct res_common *r;
1701
1702        spin_lock_irq(mlx4_tlock(dev));
1703        r = res_tracker_lookup(&tracker->res_tree[type], id);
1704        if (r && (r->owner == slave))
1705                r->state = r->from_state;
1706        spin_unlock_irq(mlx4_tlock(dev));
1707}
1708
1709static void res_end_move(struct mlx4_dev *dev, int slave,
1710                         enum mlx4_resource type, int id)
1711{
1712        struct mlx4_priv *priv = mlx4_priv(dev);
1713        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1714        struct res_common *r;
1715
1716        spin_lock_irq(mlx4_tlock(dev));
1717        r = res_tracker_lookup(&tracker->res_tree[type], id);
1718        if (r && (r->owner == slave))
1719                r->state = r->to_state;
1720        spin_unlock_irq(mlx4_tlock(dev));
1721}
1722
1723static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1724{
1725        return mlx4_is_qp_reserved(dev, qpn) &&
1726                (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1727}
1728
1729static int fw_reserved(struct mlx4_dev *dev, int qpn)
1730{
1731        return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1732}
1733
1734static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1735                        u64 in_param, u64 *out_param)
1736{
1737        int err;
1738        int count;
1739        int align;
1740        int base;
1741        int qpn;
1742        u8 flags;
1743
1744        switch (op) {
1745        case RES_OP_RESERVE:
1746                count = get_param_l(&in_param) & 0xffffff;
1747                /* Turn off all unsupported QP allocation flags that the
1748                 * slave tries to set.
1749                 */
1750                flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1751                align = get_param_h(&in_param);
1752                err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1753                if (err)
1754                        return err;
1755
1756                err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1757                if (err) {
1758                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1759                        return err;
1760                }
1761
1762                err = add_res_range(dev, slave, base, count, RES_QP, 0);
1763                if (err) {
1764                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1765                        __mlx4_qp_release_range(dev, base, count);
1766                        return err;
1767                }
1768                set_param_l(out_param, base);
1769                break;
1770        case RES_OP_MAP_ICM:
1771                qpn = get_param_l(&in_param) & 0x7fffff;
1772                if (valid_reserved(dev, slave, qpn)) {
1773                        err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1774                        if (err)
1775                                return err;
1776                }
1777
1778                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1779                                           NULL, 1);
1780                if (err)
1781                        return err;
1782
1783                if (!fw_reserved(dev, qpn)) {
1784                        err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1785                        if (err) {
1786                                res_abort_move(dev, slave, RES_QP, qpn);
1787                                return err;
1788                        }
1789                }
1790
1791                res_end_move(dev, slave, RES_QP, qpn);
1792                break;
1793
1794        default:
1795                err = -EINVAL;
1796                break;
1797        }
1798        return err;
1799}
1800
1801static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1802                         u64 in_param, u64 *out_param)
1803{
1804        int err = -EINVAL;
1805        int base;
1806        int order;
1807
1808        if (op != RES_OP_RESERVE_AND_MAP)
1809                return err;
1810
1811        order = get_param_l(&in_param);
1812
1813        err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1814        if (err)
1815                return err;
1816
1817        base = __mlx4_alloc_mtt_range(dev, order);
1818        if (base == -1) {
1819                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1820                return -ENOMEM;
1821        }
1822
1823        err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1824        if (err) {
1825                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1826                __mlx4_free_mtt_range(dev, base, order);
1827        } else {
1828                set_param_l(out_param, base);
1829        }
1830
1831        return err;
1832}
1833
1834static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1835                         u64 in_param, u64 *out_param)
1836{
1837        int err = -EINVAL;
1838        int index;
1839        int id;
1840        struct res_mpt *mpt;
1841
1842        switch (op) {
1843        case RES_OP_RESERVE:
1844                err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1845                if (err)
1846                        break;
1847
1848                index = __mlx4_mpt_reserve(dev);
1849                if (index == -1) {
1850                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1851                        break;
1852                }
1853                id = index & mpt_mask(dev);
1854
1855                err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1856                if (err) {
1857                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1858                        __mlx4_mpt_release(dev, index);
1859                        break;
1860                }
1861                set_param_l(out_param, index);
1862                break;
1863        case RES_OP_MAP_ICM:
1864                index = get_param_l(&in_param);
1865                id = index & mpt_mask(dev);
1866                err = mr_res_start_move_to(dev, slave, id,
1867                                           RES_MPT_MAPPED, &mpt);
1868                if (err)
1869                        return err;
1870
1871                err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1872                if (err) {
1873                        res_abort_move(dev, slave, RES_MPT, id);
1874                        return err;
1875                }
1876
1877                res_end_move(dev, slave, RES_MPT, id);
1878                break;
1879        }
1880        return err;
1881}
1882
1883static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1884                        u64 in_param, u64 *out_param)
1885{
1886        int cqn;
1887        int err;
1888
1889        switch (op) {
1890        case RES_OP_RESERVE_AND_MAP:
1891                err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1892                if (err)
1893                        break;
1894
1895                err = __mlx4_cq_alloc_icm(dev, &cqn);
1896                if (err) {
1897                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1898                        break;
1899                }
1900
1901                err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1902                if (err) {
1903                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1904                        __mlx4_cq_free_icm(dev, cqn);
1905                        break;
1906                }
1907
1908                set_param_l(out_param, cqn);
1909                break;
1910
1911        default:
1912                err = -EINVAL;
1913        }
1914
1915        return err;
1916}
1917
1918static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1919                         u64 in_param, u64 *out_param)
1920{
1921        int srqn;
1922        int err;
1923
1924        switch (op) {
1925        case RES_OP_RESERVE_AND_MAP:
1926                err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1927                if (err)
1928                        break;
1929
1930                err = __mlx4_srq_alloc_icm(dev, &srqn);
1931                if (err) {
1932                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1933                        break;
1934                }
1935
1936                err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1937                if (err) {
1938                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1939                        __mlx4_srq_free_icm(dev, srqn);
1940                        break;
1941                }
1942
1943                set_param_l(out_param, srqn);
1944                break;
1945
1946        default:
1947                err = -EINVAL;
1948        }
1949
1950        return err;
1951}
1952
1953static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1954                                     u8 smac_index, u64 *mac)
1955{
1956        struct mlx4_priv *priv = mlx4_priv(dev);
1957        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1958        struct list_head *mac_list =
1959                &tracker->slave_list[slave].res_list[RES_MAC];
1960        struct mac_res *res, *tmp;
1961
1962        list_for_each_entry_safe(res, tmp, mac_list, list) {
1963                if (res->smac_index == smac_index && res->port == (u8) port) {
1964                        *mac = res->mac;
1965                        return 0;
1966                }
1967        }
1968        return -ENOENT;
1969}
1970
1971static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1972{
1973        struct mlx4_priv *priv = mlx4_priv(dev);
1974        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1975        struct list_head *mac_list =
1976                &tracker->slave_list[slave].res_list[RES_MAC];
1977        struct mac_res *res, *tmp;
1978
1979        list_for_each_entry_safe(res, tmp, mac_list, list) {
1980                if (res->mac == mac && res->port == (u8) port) {
1981                        /* mac found. update ref count */
1982                        ++res->ref_count;
1983                        return 0;
1984                }
1985        }
1986
1987        if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1988                return -EINVAL;
1989        res = kzalloc(sizeof *res, GFP_KERNEL);
1990        if (!res) {
1991                mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1992                return -ENOMEM;
1993        }
1994        res->mac = mac;
1995        res->port = (u8) port;
1996        res->smac_index = smac_index;
1997        res->ref_count = 1;
1998        list_add_tail(&res->list,
1999                      &tracker->slave_list[slave].res_list[RES_MAC]);
2000        return 0;
2001}
2002
2003static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2004                               int port)
2005{
2006        struct mlx4_priv *priv = mlx4_priv(dev);
2007        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2008        struct list_head *mac_list =
2009                &tracker->slave_list[slave].res_list[RES_MAC];
2010        struct mac_res *res, *tmp;
2011
2012        list_for_each_entry_safe(res, tmp, mac_list, list) {
2013                if (res->mac == mac && res->port == (u8) port) {
2014                        if (!--res->ref_count) {
2015                                list_del(&res->list);
2016                                mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2017                                kfree(res);
2018                        }
2019                        break;
2020                }
2021        }
2022}
2023
2024static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2025{
2026        struct mlx4_priv *priv = mlx4_priv(dev);
2027        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2028        struct list_head *mac_list =
2029                &tracker->slave_list[slave].res_list[RES_MAC];
2030        struct mac_res *res, *tmp;
2031        int i;
2032
2033        list_for_each_entry_safe(res, tmp, mac_list, list) {
2034                list_del(&res->list);
2035                /* dereference the mac the num times the slave referenced it */
2036                for (i = 0; i < res->ref_count; i++)
2037                        __mlx4_unregister_mac(dev, res->port, res->mac);
2038                mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2039                kfree(res);
2040        }
2041}
2042
2043static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2044                         u64 in_param, u64 *out_param, int in_port)
2045{
2046        int err = -EINVAL;
2047        int port;
2048        u64 mac;
2049        u8 smac_index;
2050
2051        if (op != RES_OP_RESERVE_AND_MAP)
2052                return err;
2053
2054        port = !in_port ? get_param_l(out_param) : in_port;
2055        port = mlx4_slave_convert_port(
2056                        dev, slave, port);
2057
2058        if (port < 0)
2059                return -EINVAL;
2060        mac = in_param;
2061
2062        err = __mlx4_register_mac(dev, port, mac);
2063        if (err >= 0) {
2064                smac_index = err;
2065                set_param_l(out_param, err);
2066                err = 0;
2067        }
2068
2069        if (!err) {
2070                err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2071                if (err)
2072                        __mlx4_unregister_mac(dev, port, mac);
2073        }
2074        return err;
2075}
2076
2077static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2078                             int port, int vlan_index)
2079{
2080        struct mlx4_priv *priv = mlx4_priv(dev);
2081        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2082        struct list_head *vlan_list =
2083                &tracker->slave_list[slave].res_list[RES_VLAN];
2084        struct vlan_res *res, *tmp;
2085
2086        list_for_each_entry_safe(res, tmp, vlan_list, list) {
2087                if (res->vlan == vlan && res->port == (u8) port) {
2088                        /* vlan found. update ref count */
2089                        ++res->ref_count;
2090                        return 0;
2091                }
2092        }
2093
2094        if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2095                return -EINVAL;
2096        res = kzalloc(sizeof(*res), GFP_KERNEL);
2097        if (!res) {
2098                mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2099                return -ENOMEM;
2100        }
2101        res->vlan = vlan;
2102        res->port = (u8) port;
2103        res->vlan_index = vlan_index;
2104        res->ref_count = 1;
2105        list_add_tail(&res->list,
2106                      &tracker->slave_list[slave].res_list[RES_VLAN]);
2107        return 0;
2108}
2109
2110
2111static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2112                                int port)
2113{
2114        struct mlx4_priv *priv = mlx4_priv(dev);
2115        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2116        struct list_head *vlan_list =
2117                &tracker->slave_list[slave].res_list[RES_VLAN];
2118        struct vlan_res *res, *tmp;
2119
2120        list_for_each_entry_safe(res, tmp, vlan_list, list) {
2121                if (res->vlan == vlan && res->port == (u8) port) {
2122                        if (!--res->ref_count) {
2123                                list_del(&res->list);
2124                                mlx4_release_resource(dev, slave, RES_VLAN,
2125                                                      1, port);
2126                                kfree(res);
2127                        }
2128                        break;
2129                }
2130        }
2131}
2132
2133static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2134{
2135        struct mlx4_priv *priv = mlx4_priv(dev);
2136        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2137        struct list_head *vlan_list =
2138                &tracker->slave_list[slave].res_list[RES_VLAN];
2139        struct vlan_res *res, *tmp;
2140        int i;
2141
2142        list_for_each_entry_safe(res, tmp, vlan_list, list) {
2143                list_del(&res->list);
2144                /* dereference the vlan the num times the slave referenced it */
2145                for (i = 0; i < res->ref_count; i++)
2146                        __mlx4_unregister_vlan(dev, res->port, res->vlan);
2147                mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2148                kfree(res);
2149        }
2150}
2151
2152static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2153                          u64 in_param, u64 *out_param, int in_port)
2154{
2155        struct mlx4_priv *priv = mlx4_priv(dev);
2156        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2157        int err;
2158        u16 vlan;
2159        int vlan_index;
2160        int port;
2161
2162        port = !in_port ? get_param_l(out_param) : in_port;
2163
2164        if (!port || op != RES_OP_RESERVE_AND_MAP)
2165                return -EINVAL;
2166
2167        port = mlx4_slave_convert_port(
2168                        dev, slave, port);
2169
2170        if (port < 0)
2171                return -EINVAL;
2172        /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2173        if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2174                slave_state[slave].old_vlan_api = true;
2175                return 0;
2176        }
2177
2178        vlan = (u16) in_param;
2179
2180        err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2181        if (!err) {
2182                set_param_l(out_param, (u32) vlan_index);
2183                err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2184                if (err)
2185                        __mlx4_unregister_vlan(dev, port, vlan);
2186        }
2187        return err;
2188}
2189
2190static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2191                             u64 in_param, u64 *out_param, int port)
2192{
2193        u32 index;
2194        int err;
2195
2196        if (op != RES_OP_RESERVE)
2197                return -EINVAL;
2198
2199        err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2200        if (err)
2201                return err;
2202
2203        err = __mlx4_counter_alloc(dev, &index);
2204        if (err) {
2205                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2206                return err;
2207        }
2208
2209        err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2210        if (err) {
2211                __mlx4_counter_free(dev, index);
2212                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2213        } else {
2214                set_param_l(out_param, index);
2215        }
2216
2217        return err;
2218}
2219
2220static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2221                           u64 in_param, u64 *out_param)
2222{
2223        u32 xrcdn;
2224        int err;
2225
2226        if (op != RES_OP_RESERVE)
2227                return -EINVAL;
2228
2229        err = __mlx4_xrcd_alloc(dev, &xrcdn);
2230        if (err)
2231                return err;
2232
2233        err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2234        if (err)
2235                __mlx4_xrcd_free(dev, xrcdn);
2236        else
2237                set_param_l(out_param, xrcdn);
2238
2239        return err;
2240}
2241
2242int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2243                           struct mlx4_vhcr *vhcr,
2244                           struct mlx4_cmd_mailbox *inbox,
2245                           struct mlx4_cmd_mailbox *outbox,
2246                           struct mlx4_cmd_info *cmd)
2247{
2248        int err;
2249        int alop = vhcr->op_modifier;
2250
2251        switch (vhcr->in_modifier & 0xFF) {
2252        case RES_QP:
2253                err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2254                                   vhcr->in_param, &vhcr->out_param);
2255                break;
2256
2257        case RES_MTT:
2258                err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2259                                    vhcr->in_param, &vhcr->out_param);
2260                break;
2261
2262        case RES_MPT:
2263                err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2264                                    vhcr->in_param, &vhcr->out_param);
2265                break;
2266
2267        case RES_CQ:
2268                err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2269                                   vhcr->in_param, &vhcr->out_param);
2270                break;
2271
2272        case RES_SRQ:
2273                err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2274                                    vhcr->in_param, &vhcr->out_param);
2275                break;
2276
2277        case RES_MAC:
2278                err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2279                                    vhcr->in_param, &vhcr->out_param,
2280                                    (vhcr->in_modifier >> 8) & 0xFF);
2281                break;
2282
2283        case RES_VLAN:
2284                err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2285                                     vhcr->in_param, &vhcr->out_param,
2286                                     (vhcr->in_modifier >> 8) & 0xFF);
2287                break;
2288
2289        case RES_COUNTER:
2290                err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2291                                        vhcr->in_param, &vhcr->out_param, 0);
2292                break;
2293
2294        case RES_XRCD:
2295                err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2296                                      vhcr->in_param, &vhcr->out_param);
2297                break;
2298
2299        default:
2300                err = -EINVAL;
2301                break;
2302        }
2303
2304        return err;
2305}
2306
2307static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2308                       u64 in_param)
2309{
2310        int err;
2311        int count;
2312        int base;
2313        int qpn;
2314
2315        switch (op) {
2316        case RES_OP_RESERVE:
2317                base = get_param_l(&in_param) & 0x7fffff;
2318                count = get_param_h(&in_param);
2319                err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2320                if (err)
2321                        break;
2322                mlx4_release_resource(dev, slave, RES_QP, count, 0);
2323                __mlx4_qp_release_range(dev, base, count);
2324                break;
2325        case RES_OP_MAP_ICM:
2326                qpn = get_param_l(&in_param) & 0x7fffff;
2327                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2328                                           NULL, 0);
2329                if (err)
2330                        return err;
2331
2332                if (!fw_reserved(dev, qpn))
2333                        __mlx4_qp_free_icm(dev, qpn);
2334
2335                res_end_move(dev, slave, RES_QP, qpn);
2336
2337                if (valid_reserved(dev, slave, qpn))
2338                        err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2339                break;
2340        default:
2341                err = -EINVAL;
2342                break;
2343        }
2344        return err;
2345}
2346
2347static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2348                        u64 in_param, u64 *out_param)
2349{
2350        int err = -EINVAL;
2351        int base;
2352        int order;
2353
2354        if (op != RES_OP_RESERVE_AND_MAP)
2355                return err;
2356
2357        base = get_param_l(&in_param);
2358        order = get_param_h(&in_param);
2359        err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2360        if (!err) {
2361                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2362                __mlx4_free_mtt_range(dev, base, order);
2363        }
2364        return err;
2365}
2366
2367static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2368                        u64 in_param)
2369{
2370        int err = -EINVAL;
2371        int index;
2372        int id;
2373        struct res_mpt *mpt;
2374
2375        switch (op) {
2376        case RES_OP_RESERVE:
2377                index = get_param_l(&in_param);
2378                id = index & mpt_mask(dev);
2379                err = get_res(dev, slave, id, RES_MPT, &mpt);
2380                if (err)
2381                        break;
2382                index = mpt->key;
2383                put_res(dev, slave, id, RES_MPT);
2384
2385                err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2386                if (err)
2387                        break;
2388                mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2389                __mlx4_mpt_release(dev, index);
2390                break;
2391        case RES_OP_MAP_ICM:
2392                index = get_param_l(&in_param);
2393                id = index & mpt_mask(dev);
2394                err = mr_res_start_move_to(dev, slave, id,
2395                                           RES_MPT_RESERVED, &mpt);
2396                if (err)
2397                        return err;
2398
2399                __mlx4_mpt_free_icm(dev, mpt->key);
2400                res_end_move(dev, slave, RES_MPT, id);
2401                break;
2402        default:
2403                err = -EINVAL;
2404                break;
2405        }
2406        return err;
2407}
2408
2409static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2410                       u64 in_param, u64 *out_param)
2411{
2412        int cqn;
2413        int err;
2414
2415        switch (op) {
2416        case RES_OP_RESERVE_AND_MAP:
2417                cqn = get_param_l(&in_param);
2418                err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2419                if (err)
2420                        break;
2421
2422                mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2423                __mlx4_cq_free_icm(dev, cqn);
2424                break;
2425
2426        default:
2427                err = -EINVAL;
2428                break;
2429        }
2430
2431        return err;
2432}
2433
2434static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2435                        u64 in_param, u64 *out_param)
2436{
2437        int srqn;
2438        int err;
2439
2440        switch (op) {
2441        case RES_OP_RESERVE_AND_MAP:
2442                srqn = get_param_l(&in_param);
2443                err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2444                if (err)
2445                        break;
2446
2447                mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2448                __mlx4_srq_free_icm(dev, srqn);
2449                break;
2450
2451        default:
2452                err = -EINVAL;
2453                break;
2454        }
2455
2456        return err;
2457}
2458
2459static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2460                            u64 in_param, u64 *out_param, int in_port)
2461{
2462        int port;
2463        int err = 0;
2464
2465        switch (op) {
2466        case RES_OP_RESERVE_AND_MAP:
2467                port = !in_port ? get_param_l(out_param) : in_port;
2468                port = mlx4_slave_convert_port(
2469                                dev, slave, port);
2470
2471                if (port < 0)
2472                        return -EINVAL;
2473                mac_del_from_slave(dev, slave, in_param, port);
2474                __mlx4_unregister_mac(dev, port, in_param);
2475                break;
2476        default:
2477                err = -EINVAL;
2478                break;
2479        }
2480
2481        return err;
2482
2483}
2484
2485static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2486                            u64 in_param, u64 *out_param, int port)
2487{
2488        struct mlx4_priv *priv = mlx4_priv(dev);
2489        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2490        int err = 0;
2491
2492        port = mlx4_slave_convert_port(
2493                        dev, slave, port);
2494
2495        if (port < 0)
2496                return -EINVAL;
2497        switch (op) {
2498        case RES_OP_RESERVE_AND_MAP:
2499                if (slave_state[slave].old_vlan_api)
2500                        return 0;
2501                if (!port)
2502                        return -EINVAL;
2503                vlan_del_from_slave(dev, slave, in_param, port);
2504                __mlx4_unregister_vlan(dev, port, in_param);
2505                break;
2506        default:
2507                err = -EINVAL;
2508                break;
2509        }
2510
2511        return err;
2512}
2513
2514static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2515                            u64 in_param, u64 *out_param)
2516{
2517        int index;
2518        int err;
2519
2520        if (op != RES_OP_RESERVE)
2521                return -EINVAL;
2522
2523        index = get_param_l(&in_param);
2524        if (index == MLX4_SINK_COUNTER_INDEX(dev))
2525                return 0;
2526
2527        err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2528        if (err)
2529                return err;
2530
2531        __mlx4_counter_free(dev, index);
2532        mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2533
2534        return err;
2535}
2536
2537static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2538                          u64 in_param, u64 *out_param)
2539{
2540        int xrcdn;
2541        int err;
2542
2543        if (op != RES_OP_RESERVE)
2544                return -EINVAL;
2545
2546        xrcdn = get_param_l(&in_param);
2547        err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2548        if (err)
2549                return err;
2550
2551        __mlx4_xrcd_free(dev, xrcdn);
2552
2553        return err;
2554}
2555
2556int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2557                          struct mlx4_vhcr *vhcr,
2558                          struct mlx4_cmd_mailbox *inbox,
2559                          struct mlx4_cmd_mailbox *outbox,
2560                          struct mlx4_cmd_info *cmd)
2561{
2562        int err = -EINVAL;
2563        int alop = vhcr->op_modifier;
2564
2565        switch (vhcr->in_modifier & 0xFF) {
2566        case RES_QP:
2567                err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2568                                  vhcr->in_param);
2569                break;
2570
2571        case RES_MTT:
2572                err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2573                                   vhcr->in_param, &vhcr->out_param);
2574                break;
2575
2576        case RES_MPT:
2577                err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2578                                   vhcr->in_param);
2579                break;
2580
2581        case RES_CQ:
2582                err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2583                                  vhcr->in_param, &vhcr->out_param);
2584                break;
2585
2586        case RES_SRQ:
2587                err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2588                                   vhcr->in_param, &vhcr->out_param);
2589                break;
2590
2591        case RES_MAC:
2592                err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2593                                   vhcr->in_param, &vhcr->out_param,
2594                                   (vhcr->in_modifier >> 8) & 0xFF);
2595                break;
2596
2597        case RES_VLAN:
2598                err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2599                                    vhcr->in_param, &vhcr->out_param,
2600                                    (vhcr->in_modifier >> 8) & 0xFF);
2601                break;
2602
2603        case RES_COUNTER:
2604                err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2605                                       vhcr->in_param, &vhcr->out_param);
2606                break;
2607
2608        case RES_XRCD:
2609                err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2610                                     vhcr->in_param, &vhcr->out_param);
2611
2612        default:
2613                break;
2614        }
2615        return err;
2616}
2617
2618/* ugly but other choices are uglier */
2619static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2620{
2621        return (be32_to_cpu(mpt->flags) >> 9) & 1;
2622}
2623
2624static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2625{
2626        return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2627}
2628
2629static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2630{
2631        return be32_to_cpu(mpt->mtt_sz);
2632}
2633
2634static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2635{
2636        return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2637}
2638
2639static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2640{
2641        return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2642}
2643
2644static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2645{
2646        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2647}
2648
2649static int mr_is_region(struct mlx4_mpt_entry *mpt)
2650{
2651        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2652}
2653
2654static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2655{
2656        return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2657}
2658
2659static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2660{
2661        return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2662}
2663
2664static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2665{
2666        int page_shift = (qpc->log_page_size & 0x3f) + 12;
2667        int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2668        int log_sq_sride = qpc->sq_size_stride & 7;
2669        int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2670        int log_rq_stride = qpc->rq_size_stride & 7;
2671        int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2672        int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2673        u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2674        int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2675        int sq_size;
2676        int rq_size;
2677        int total_pages;
2678        int total_mem;
2679        int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2680
2681        sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2682        rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2683        total_mem = sq_size + rq_size;
2684        total_pages =
2685                roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2686                                   page_shift);
2687
2688        return total_pages;
2689}
2690
2691static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2692                           int size, struct res_mtt *mtt)
2693{
2694        int res_start = mtt->com.res_id;
2695        int res_size = (1 << mtt->order);
2696
2697        if (start < res_start || start + size > res_start + res_size)
2698                return -EPERM;
2699        return 0;
2700}
2701
2702int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2703                           struct mlx4_vhcr *vhcr,
2704                           struct mlx4_cmd_mailbox *inbox,
2705                           struct mlx4_cmd_mailbox *outbox,
2706                           struct mlx4_cmd_info *cmd)
2707{
2708        int err;
2709        int index = vhcr->in_modifier;
2710        struct res_mtt *mtt;
2711        struct res_mpt *mpt;
2712        int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2713        int phys;
2714        int id;
2715        u32 pd;
2716        int pd_slave;
2717
2718        id = index & mpt_mask(dev);
2719        err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2720        if (err)
2721                return err;
2722
2723        /* Disable memory windows for VFs. */
2724        if (!mr_is_region(inbox->buf)) {
2725                err = -EPERM;
2726                goto ex_abort;
2727        }
2728
2729        /* Make sure that the PD bits related to the slave id are zeros. */
2730        pd = mr_get_pd(inbox->buf);
2731        pd_slave = (pd >> 17) & 0x7f;
2732        if (pd_slave != 0 && --pd_slave != slave) {
2733                err = -EPERM;
2734                goto ex_abort;
2735        }
2736
2737        if (mr_is_fmr(inbox->buf)) {
2738                /* FMR and Bind Enable are forbidden in slave devices. */
2739                if (mr_is_bind_enabled(inbox->buf)) {
2740                        err = -EPERM;
2741                        goto ex_abort;
2742                }
2743                /* FMR and Memory Windows are also forbidden. */
2744                if (!mr_is_region(inbox->buf)) {
2745                        err = -EPERM;
2746                        goto ex_abort;
2747                }
2748        }
2749
2750        phys = mr_phys_mpt(inbox->buf);
2751        if (!phys) {
2752                err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2753                if (err)
2754                        goto ex_abort;
2755
2756                err = check_mtt_range(dev, slave, mtt_base,
2757                                      mr_get_mtt_size(inbox->buf), mtt);
2758                if (err)
2759                        goto ex_put;
2760
2761                mpt->mtt = mtt;
2762        }
2763
2764        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2765        if (err)
2766                goto ex_put;
2767
2768        if (!phys) {
2769                atomic_inc(&mtt->ref_count);
2770                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2771        }
2772
2773        res_end_move(dev, slave, RES_MPT, id);
2774        return 0;
2775
2776ex_put:
2777        if (!phys)
2778                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2779ex_abort:
2780        res_abort_move(dev, slave, RES_MPT, id);
2781
2782        return err;
2783}
2784
2785int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2786                           struct mlx4_vhcr *vhcr,
2787                           struct mlx4_cmd_mailbox *inbox,
2788                           struct mlx4_cmd_mailbox *outbox,
2789                           struct mlx4_cmd_info *cmd)
2790{
2791        int err;
2792        int index = vhcr->in_modifier;
2793        struct res_mpt *mpt;
2794        int id;
2795
2796        id = index & mpt_mask(dev);
2797        err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2798        if (err)
2799                return err;
2800
2801        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2802        if (err)
2803                goto ex_abort;
2804
2805        if (mpt->mtt)
2806                atomic_dec(&mpt->mtt->ref_count);
2807
2808        res_end_move(dev, slave, RES_MPT, id);
2809        return 0;
2810
2811ex_abort:
2812        res_abort_move(dev, slave, RES_MPT, id);
2813
2814        return err;
2815}
2816
2817int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2818                           struct mlx4_vhcr *vhcr,
2819                           struct mlx4_cmd_mailbox *inbox,
2820                           struct mlx4_cmd_mailbox *outbox,
2821                           struct mlx4_cmd_info *cmd)
2822{
2823        int err;
2824        int index = vhcr->in_modifier;
2825        struct res_mpt *mpt;
2826        int id;
2827
2828        id = index & mpt_mask(dev);
2829        err = get_res(dev, slave, id, RES_MPT, &mpt);
2830        if (err)
2831                return err;
2832
2833        if (mpt->com.from_state == RES_MPT_MAPPED) {
2834                /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2835                 * that, the VF must read the MPT. But since the MPT entry memory is not
2836                 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2837                 * entry contents. To guarantee that the MPT cannot be changed, the driver
2838                 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2839                 * ownership fofollowing the change. The change here allows the VF to
2840                 * perform QUERY_MPT also when the entry is in SW ownership.
2841                 */
2842                struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2843                                        &mlx4_priv(dev)->mr_table.dmpt_table,
2844                                        mpt->key, NULL);
2845
2846                if (NULL == mpt_entry || NULL == outbox->buf) {
2847                        err = -EINVAL;
2848                        goto out;
2849                }
2850
2851                memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2852
2853                err = 0;
2854        } else if (mpt->com.from_state == RES_MPT_HW) {
2855                err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2856        } else {
2857                err = -EBUSY;
2858                goto out;
2859        }
2860
2861
2862out:
2863        put_res(dev, slave, id, RES_MPT);
2864        return err;
2865}
2866
2867static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2868{
2869        return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2870}
2871
2872static int qp_get_scqn(struct mlx4_qp_context *qpc)
2873{
2874        return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2875}
2876
2877static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2878{
2879        return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2880}
2881
2882static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2883                                  struct mlx4_qp_context *context)
2884{
2885        u32 qpn = vhcr->in_modifier & 0xffffff;
2886        u32 qkey = 0;
2887
2888        if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2889                return;
2890
2891        /* adjust qkey in qp context */
2892        context->qkey = cpu_to_be32(qkey);
2893}
2894
2895static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2896                                 struct mlx4_qp_context *qpc,
2897                                 struct mlx4_cmd_mailbox *inbox);
2898
2899int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2900                             struct mlx4_vhcr *vhcr,
2901                             struct mlx4_cmd_mailbox *inbox,
2902                             struct mlx4_cmd_mailbox *outbox,
2903                             struct mlx4_cmd_info *cmd)
2904{
2905        int err;
2906        int qpn = vhcr->in_modifier & 0x7fffff;
2907        struct res_mtt *mtt;
2908        struct res_qp *qp;
2909        struct mlx4_qp_context *qpc = inbox->buf + 8;
2910        int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2911        int mtt_size = qp_get_mtt_size(qpc);
2912        struct res_cq *rcq;
2913        struct res_cq *scq;
2914        int rcqn = qp_get_rcqn(qpc);
2915        int scqn = qp_get_scqn(qpc);
2916        u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2917        int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2918        struct res_srq *srq;
2919        int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2920
2921        err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2922        if (err)
2923                return err;
2924
2925        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2926        if (err)
2927                return err;
2928        qp->local_qpn = local_qpn;
2929        qp->sched_queue = 0;
2930        qp->param3 = 0;
2931        qp->vlan_control = 0;
2932        qp->fvl_rx = 0;
2933        qp->pri_path_fl = 0;
2934        qp->vlan_index = 0;
2935        qp->feup = 0;
2936        qp->qpc_flags = be32_to_cpu(qpc->flags);
2937
2938        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2939        if (err)
2940                goto ex_abort;
2941
2942        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2943        if (err)
2944                goto ex_put_mtt;
2945
2946        err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2947        if (err)
2948                goto ex_put_mtt;
2949
2950        if (scqn != rcqn) {
2951                err = get_res(dev, slave, scqn, RES_CQ, &scq);
2952                if (err)
2953                        goto ex_put_rcq;
2954        } else
2955                scq = rcq;
2956
2957        if (use_srq) {
2958                err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2959                if (err)
2960                        goto ex_put_scq;
2961        }
2962
2963        adjust_proxy_tun_qkey(dev, vhcr, qpc);
2964        update_pkey_index(dev, slave, inbox);
2965        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2966        if (err)
2967                goto ex_put_srq;
2968        atomic_inc(&mtt->ref_count);
2969        qp->mtt = mtt;
2970        atomic_inc(&rcq->ref_count);
2971        qp->rcq = rcq;
2972        atomic_inc(&scq->ref_count);
2973        qp->scq = scq;
2974
2975        if (scqn != rcqn)
2976                put_res(dev, slave, scqn, RES_CQ);
2977
2978        if (use_srq) {
2979                atomic_inc(&srq->ref_count);
2980                put_res(dev, slave, srqn, RES_SRQ);
2981                qp->srq = srq;
2982        }
2983        put_res(dev, slave, rcqn, RES_CQ);
2984        put_res(dev, slave, mtt_base, RES_MTT);
2985        res_end_move(dev, slave, RES_QP, qpn);
2986
2987        return 0;
2988
2989ex_put_srq:
2990        if (use_srq)
2991                put_res(dev, slave, srqn, RES_SRQ);
2992ex_put_scq:
2993        if (scqn != rcqn)
2994                put_res(dev, slave, scqn, RES_CQ);
2995ex_put_rcq:
2996        put_res(dev, slave, rcqn, RES_CQ);
2997ex_put_mtt:
2998        put_res(dev, slave, mtt_base, RES_MTT);
2999ex_abort:
3000        res_abort_move(dev, slave, RES_QP, qpn);
3001
3002        return err;
3003}
3004
3005static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3006{
3007        return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3008}
3009
3010static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3011{
3012        int log_eq_size = eqc->log_eq_size & 0x1f;
3013        int page_shift = (eqc->log_page_size & 0x3f) + 12;
3014
3015        if (log_eq_size + 5 < page_shift)
3016                return 1;
3017
3018        return 1 << (log_eq_size + 5 - page_shift);
3019}
3020
3021static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3022{
3023        return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3024}
3025
3026static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3027{
3028        int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3029        int page_shift = (cqc->log_page_size & 0x3f) + 12;
3030
3031        if (log_cq_size + 5 < page_shift)
3032                return 1;
3033
3034        return 1 << (log_cq_size + 5 - page_shift);
3035}
3036
3037int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3038                          struct mlx4_vhcr *vhcr,
3039                          struct mlx4_cmd_mailbox *inbox,
3040                          struct mlx4_cmd_mailbox *outbox,
3041                          struct mlx4_cmd_info *cmd)
3042{
3043        int err;
3044        int eqn = vhcr->in_modifier;
3045        int res_id = (slave << 10) | eqn;
3046        struct mlx4_eq_context *eqc = inbox->buf;
3047        int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3048        int mtt_size = eq_get_mtt_size(eqc);
3049        struct res_eq *eq;
3050        struct res_mtt *mtt;
3051
3052        err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3053        if (err)
3054                return err;
3055        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3056        if (err)
3057                goto out_add;
3058
3059        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3060        if (err)
3061                goto out_move;
3062
3063        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3064        if (err)
3065                goto out_put;
3066
3067        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3068        if (err)
3069                goto out_put;
3070
3071        atomic_inc(&mtt->ref_count);
3072        eq->mtt = mtt;
3073        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3074        res_end_move(dev, slave, RES_EQ, res_id);
3075        return 0;
3076
3077out_put:
3078        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3079out_move:
3080        res_abort_move(dev, slave, RES_EQ, res_id);
3081out_add:
3082        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3083        return err;
3084}
3085
3086int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3087                            struct mlx4_vhcr *vhcr,
3088                            struct mlx4_cmd_mailbox *inbox,
3089                            struct mlx4_cmd_mailbox *outbox,
3090                            struct mlx4_cmd_info *cmd)
3091{
3092        int err;
3093        u8 get = vhcr->op_modifier;
3094
3095        if (get != 1)
3096                return -EPERM;
3097
3098        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3099
3100        return err;
3101}
3102
3103static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3104                              int len, struct res_mtt **res)
3105{
3106        struct mlx4_priv *priv = mlx4_priv(dev);
3107        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3108        struct res_mtt *mtt;
3109        int err = -EINVAL;
3110
3111        spin_lock_irq(mlx4_tlock(dev));
3112        list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3113                            com.list) {
3114                if (!check_mtt_range(dev, slave, start, len, mtt)) {
3115                        *res = mtt;
3116                        mtt->com.from_state = mtt->com.state;
3117                        mtt->com.state = RES_MTT_BUSY;
3118                        err = 0;
3119                        break;
3120                }
3121        }
3122        spin_unlock_irq(mlx4_tlock(dev));
3123
3124        return err;
3125}
3126
3127static int verify_qp_parameters(struct mlx4_dev *dev,
3128                                struct mlx4_vhcr *vhcr,
3129                                struct mlx4_cmd_mailbox *inbox,
3130                                enum qp_transition transition, u8 slave)
3131{
3132        u32                     qp_type;
3133        u32                     qpn;
3134        struct mlx4_qp_context  *qp_ctx;
3135        enum mlx4_qp_optpar     optpar;
3136        int port;
3137        int num_gids;
3138
3139        qp_ctx  = inbox->buf + 8;
3140        qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3141        optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
3142
3143        if (slave != mlx4_master_func_num(dev)) {
3144                qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3145                /* setting QP rate-limit is disallowed for VFs */
3146                if (qp_ctx->rate_limit_params)
3147                        return -EPERM;
3148        }
3149
3150        switch (qp_type) {
3151        case MLX4_QP_ST_RC:
3152        case MLX4_QP_ST_XRC:
3153        case MLX4_QP_ST_UC:
3154                switch (transition) {
3155                case QP_TRANS_INIT2RTR:
3156                case QP_TRANS_RTR2RTS:
3157                case QP_TRANS_RTS2RTS:
3158                case QP_TRANS_SQD2SQD:
3159                case QP_TRANS_SQD2RTS:
3160                        if (slave != mlx4_master_func_num(dev)) {
3161                                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3162                                        port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3163                                        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3164                                                num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3165                                        else
3166                                                num_gids = 1;
3167                                        if (qp_ctx->pri_path.mgid_index >= num_gids)
3168                                                return -EINVAL;
3169                                }
3170                                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3171                                        port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3172                                        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3173                                                num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3174                                        else
3175                                                num_gids = 1;
3176                                        if (qp_ctx->alt_path.mgid_index >= num_gids)
3177                                                return -EINVAL;
3178                                }
3179                        }
3180                        break;
3181                default:
3182                        break;
3183                }
3184                break;
3185
3186        case MLX4_QP_ST_MLX:
3187                qpn = vhcr->in_modifier & 0x7fffff;
3188                port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3189                if (transition == QP_TRANS_INIT2RTR &&
3190                    slave != mlx4_master_func_num(dev) &&
3191                    mlx4_is_qp_reserved(dev, qpn) &&
3192                    !mlx4_vf_smi_enabled(dev, slave, port)) {
3193                        /* only enabled VFs may create MLX proxy QPs */
3194                        mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3195                                 __func__, slave, port);
3196                        return -EPERM;
3197                }
3198                break;
3199
3200        default:
3201                break;
3202        }
3203
3204        return 0;
3205}
3206
3207int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3208                           struct mlx4_vhcr *vhcr,
3209                           struct mlx4_cmd_mailbox *inbox,
3210                           struct mlx4_cmd_mailbox *outbox,
3211                           struct mlx4_cmd_info *cmd)
3212{
3213        struct mlx4_mtt mtt;
3214        __be64 *page_list = inbox->buf;
3215        u64 *pg_list = (u64 *)page_list;
3216        int i;
3217        struct res_mtt *rmtt = NULL;
3218        int start = be64_to_cpu(page_list[0]);
3219        int npages = vhcr->in_modifier;
3220        int err;
3221
3222        err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3223        if (err)
3224                return err;
3225
3226        /* Call the SW implementation of write_mtt:
3227         * - Prepare a dummy mtt struct
3228         * - Translate inbox contents to simple addresses in host endianness */
3229        mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3230                            we don't really use it */
3231        mtt.order = 0;
3232        mtt.page_shift = 0;
3233        for (i = 0; i < npages; ++i)
3234                pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3235
3236        err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3237                               ((u64 *)page_list + 2));
3238
3239        if (rmtt)
3240                put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3241
3242        return err;
3243}
3244
3245int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3246                          struct mlx4_vhcr *vhcr,
3247                          struct mlx4_cmd_mailbox *inbox,
3248                          struct mlx4_cmd_mailbox *outbox,
3249                          struct mlx4_cmd_info *cmd)
3250{
3251        int eqn = vhcr->in_modifier;
3252        int res_id = eqn | (slave << 10);
3253        struct res_eq *eq;
3254        int err;
3255
3256        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3257        if (err)
3258                return err;
3259
3260        err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3261        if (err)
3262                goto ex_abort;
3263
3264        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3265        if (err)
3266                goto ex_put;
3267
3268        atomic_dec(&eq->mtt->ref_count);
3269        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3270        res_end_move(dev, slave, RES_EQ, res_id);
3271        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3272
3273        return 0;
3274
3275ex_put:
3276        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3277ex_abort:
3278        res_abort_move(dev, slave, RES_EQ, res_id);
3279
3280        return err;
3281}
3282
3283int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3284{
3285        struct mlx4_priv *priv = mlx4_priv(dev);
3286        struct mlx4_slave_event_eq_info *event_eq;
3287        struct mlx4_cmd_mailbox *mailbox;
3288        u32 in_modifier = 0;
3289        int err;
3290        int res_id;
3291        struct res_eq *req;
3292
3293        if (!priv->mfunc.master.slave_state)
3294                return -EINVAL;
3295
3296        /* check for slave valid, slave not PF, and slave active */
3297        if (slave < 0 || slave > dev->persist->num_vfs ||
3298            slave == dev->caps.function ||
3299            !priv->mfunc.master.slave_state[slave].active)
3300                return 0;
3301
3302        event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3303
3304        /* Create the event only if the slave is registered */
3305        if (event_eq->eqn < 0)
3306                return 0;
3307
3308        mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3309        res_id = (slave << 10) | event_eq->eqn;
3310        err = get_res(dev, slave, res_id, RES_EQ, &req);
3311        if (err)
3312                goto unlock;
3313
3314        if (req->com.from_state != RES_EQ_HW) {
3315                err = -EINVAL;
3316                goto put;
3317        }
3318
3319        mailbox = mlx4_alloc_cmd_mailbox(dev);
3320        if (IS_ERR(mailbox)) {
3321                err = PTR_ERR(mailbox);
3322                goto put;
3323        }
3324
3325        if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3326                ++event_eq->token;
3327                eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3328        }
3329
3330        memcpy(mailbox->buf, (u8 *) eqe, 28);
3331
3332        in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3333
3334        err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3335                       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3336                       MLX4_CMD_NATIVE);
3337
3338        put_res(dev, slave, res_id, RES_EQ);
3339        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3340        mlx4_free_cmd_mailbox(dev, mailbox);
3341        return err;
3342
3343put:
3344        put_res(dev, slave, res_id, RES_EQ);
3345
3346unlock:
3347        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3348        return err;
3349}
3350
3351int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3352                          struct mlx4_vhcr *vhcr,
3353                          struct mlx4_cmd_mailbox *inbox,
3354                          struct mlx4_cmd_mailbox *outbox,
3355                          struct mlx4_cmd_info *cmd)
3356{
3357        int eqn = vhcr->in_modifier;
3358        int res_id = eqn | (slave << 10);
3359        struct res_eq *eq;
3360        int err;
3361
3362        err = get_res(dev, slave, res_id, RES_EQ, &eq);
3363        if (err)
3364                return err;
3365
3366        if (eq->com.from_state != RES_EQ_HW) {
3367                err = -EINVAL;
3368                goto ex_put;
3369        }
3370
3371        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3372
3373ex_put:
3374        put_res(dev, slave, res_id, RES_EQ);
3375        return err;
3376}
3377
3378int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3379                          struct mlx4_vhcr *vhcr,
3380                          struct mlx4_cmd_mailbox *inbox,
3381                          struct mlx4_cmd_mailbox *outbox,
3382                          struct mlx4_cmd_info *cmd)
3383{
3384        int err;
3385        int cqn = vhcr->in_modifier;
3386        struct mlx4_cq_context *cqc = inbox->buf;
3387        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3388        struct res_cq *cq = NULL;
3389        struct res_mtt *mtt;
3390
3391        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3392        if (err)
3393                return err;
3394        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3395        if (err)
3396                goto out_move;
3397        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3398        if (err)
3399                goto out_put;
3400        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3401        if (err)
3402                goto out_put;
3403        atomic_inc(&mtt->ref_count);
3404        cq->mtt = mtt;
3405        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3406        res_end_move(dev, slave, RES_CQ, cqn);
3407        return 0;
3408
3409out_put:
3410        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3411out_move:
3412        res_abort_move(dev, slave, RES_CQ, cqn);
3413        return err;
3414}
3415
3416int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3417                          struct mlx4_vhcr *vhcr,
3418                          struct mlx4_cmd_mailbox *inbox,
3419                          struct mlx4_cmd_mailbox *outbox,
3420                          struct mlx4_cmd_info *cmd)
3421{
3422        int err;
3423        int cqn = vhcr->in_modifier;
3424        struct res_cq *cq = NULL;
3425
3426        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3427        if (err)
3428                return err;
3429        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3430        if (err)
3431                goto out_move;
3432        atomic_dec(&cq->mtt->ref_count);
3433        res_end_move(dev, slave, RES_CQ, cqn);
3434        return 0;
3435
3436out_move:
3437        res_abort_move(dev, slave, RES_CQ, cqn);
3438        return err;
3439}
3440
3441int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3442                          struct mlx4_vhcr *vhcr,
3443                          struct mlx4_cmd_mailbox *inbox,
3444                          struct mlx4_cmd_mailbox *outbox,
3445                          struct mlx4_cmd_info *cmd)
3446{
3447        int cqn = vhcr->in_modifier;
3448        struct res_cq *cq;
3449        int err;
3450
3451        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3452        if (err)
3453                return err;
3454
3455        if (cq->com.from_state != RES_CQ_HW)
3456                goto ex_put;
3457
3458        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3459ex_put:
3460        put_res(dev, slave, cqn, RES_CQ);
3461
3462        return err;
3463}
3464
3465static int handle_resize(struct mlx4_dev *dev, int slave,
3466                         struct mlx4_vhcr *vhcr,
3467                         struct mlx4_cmd_mailbox *inbox,
3468                         struct mlx4_cmd_mailbox *outbox,
3469                         struct mlx4_cmd_info *cmd,
3470                         struct res_cq *cq)
3471{
3472        int err;
3473        struct res_mtt *orig_mtt;
3474        struct res_mtt *mtt;
3475        struct mlx4_cq_context *cqc = inbox->buf;
3476        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3477
3478        err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3479        if (err)
3480                return err;
3481
3482        if (orig_mtt != cq->mtt) {
3483                err = -EINVAL;
3484                goto ex_put;
3485        }
3486
3487        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3488        if (err)
3489                goto ex_put;
3490
3491        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3492        if (err)
3493                goto ex_put1;
3494        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3495        if (err)
3496                goto ex_put1;
3497        atomic_dec(&orig_mtt->ref_count);
3498        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3499        atomic_inc(&mtt->ref_count);
3500        cq->mtt = mtt;
3501        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3502        return 0;
3503
3504ex_put1:
3505        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3506ex_put:
3507        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3508
3509        return err;
3510
3511}
3512
3513int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3514                           struct mlx4_vhcr *vhcr,
3515                           struct mlx4_cmd_mailbox *inbox,
3516                           struct mlx4_cmd_mailbox *outbox,
3517                           struct mlx4_cmd_info *cmd)
3518{
3519        int cqn = vhcr->in_modifier;
3520        struct res_cq *cq;
3521        int err;
3522
3523        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3524        if (err)
3525                return err;
3526
3527        if (cq->com.from_state != RES_CQ_HW)
3528                goto ex_put;
3529
3530        if (vhcr->op_modifier == 0) {
3531                err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3532                goto ex_put;
3533        }
3534
3535        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3536ex_put:
3537        put_res(dev, slave, cqn, RES_CQ);
3538
3539        return err;
3540}
3541
3542static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3543{
3544        int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3545        int log_rq_stride = srqc->logstride & 7;
3546        int page_shift = (srqc->log_page_size & 0x3f) + 12;
3547
3548        if (log_srq_size + log_rq_stride + 4 < page_shift)
3549                return 1;
3550
3551        return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3552}
3553
3554int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3555                           struct mlx4_vhcr *vhcr,
3556                           struct mlx4_cmd_mailbox *inbox,
3557                           struct mlx4_cmd_mailbox *outbox,
3558                           struct mlx4_cmd_info *cmd)
3559{
3560        int err;
3561        int srqn = vhcr->in_modifier;
3562        struct res_mtt *mtt;
3563        struct res_srq *srq = NULL;
3564        struct mlx4_srq_context *srqc = inbox->buf;
3565        int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3566
3567        if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3568                return -EINVAL;
3569
3570        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3571        if (err)
3572                return err;
3573        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3574        if (err)
3575                goto ex_abort;
3576        err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3577                              mtt);
3578        if (err)
3579                goto ex_put_mtt;
3580
3581        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3582        if (err)
3583                goto ex_put_mtt;
3584
3585        atomic_inc(&mtt->ref_count);
3586        srq->mtt = mtt;
3587        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3588        res_end_move(dev, slave, RES_SRQ, srqn);
3589        return 0;
3590
3591ex_put_mtt:
3592        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3593ex_abort:
3594        res_abort_move(dev, slave, RES_SRQ, srqn);
3595
3596        return err;
3597}
3598
3599int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3600                           struct mlx4_vhcr *vhcr,
3601                           struct mlx4_cmd_mailbox *inbox,
3602                           struct mlx4_cmd_mailbox *outbox,
3603                           struct mlx4_cmd_info *cmd)
3604{
3605        int err;
3606        int srqn = vhcr->in_modifier;
3607        struct res_srq *srq = NULL;
3608
3609        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3610        if (err)
3611                return err;
3612        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3613        if (err)
3614                goto ex_abort;
3615        atomic_dec(&srq->mtt->ref_count);
3616        if (srq->cq)
3617                atomic_dec(&srq->cq->ref_count);
3618        res_end_move(dev, slave, RES_SRQ, srqn);
3619
3620        return 0;
3621
3622ex_abort:
3623        res_abort_move(dev, slave, RES_SRQ, srqn);
3624
3625        return err;
3626}
3627
3628int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3629                           struct mlx4_vhcr *vhcr,
3630                           struct mlx4_cmd_mailbox *inbox,
3631                           struct mlx4_cmd_mailbox *outbox,
3632                           struct mlx4_cmd_info *cmd)
3633{
3634        int err;
3635        int srqn = vhcr->in_modifier;
3636        struct res_srq *srq;
3637
3638        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3639        if (err)
3640                return err;
3641        if (srq->com.from_state != RES_SRQ_HW) {
3642                err = -EBUSY;
3643                goto out;
3644        }
3645        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3646out:
3647        put_res(dev, slave, srqn, RES_SRQ);
3648        return err;
3649}
3650
3651int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3652                         struct mlx4_vhcr *vhcr,
3653                         struct mlx4_cmd_mailbox *inbox,
3654                         struct mlx4_cmd_mailbox *outbox,
3655                         struct mlx4_cmd_info *cmd)
3656{
3657        int err;
3658        int srqn = vhcr->in_modifier;
3659        struct res_srq *srq;
3660
3661        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3662        if (err)
3663                return err;
3664
3665        if (srq->com.from_state != RES_SRQ_HW) {
3666                err = -EBUSY;
3667                goto out;
3668        }
3669
3670        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3671out:
3672        put_res(dev, slave, srqn, RES_SRQ);
3673        return err;
3674}
3675
3676int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3677                        struct mlx4_vhcr *vhcr,
3678                        struct mlx4_cmd_mailbox *inbox,
3679                        struct mlx4_cmd_mailbox *outbox,
3680                        struct mlx4_cmd_info *cmd)
3681{
3682        int err;
3683        int qpn = vhcr->in_modifier & 0x7fffff;
3684        struct res_qp *qp;
3685
3686        err = get_res(dev, slave, qpn, RES_QP, &qp);
3687        if (err)
3688                return err;
3689        if (qp->com.from_state != RES_QP_HW) {
3690                err = -EBUSY;
3691                goto out;
3692        }
3693
3694        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3695out:
3696        put_res(dev, slave, qpn, RES_QP);
3697        return err;
3698}
3699
3700int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3701                              struct mlx4_vhcr *vhcr,
3702                              struct mlx4_cmd_mailbox *inbox,
3703                              struct mlx4_cmd_mailbox *outbox,
3704                              struct mlx4_cmd_info *cmd)
3705{
3706        struct mlx4_qp_context *context = inbox->buf + 8;
3707        adjust_proxy_tun_qkey(dev, vhcr, context);
3708        update_pkey_index(dev, slave, inbox);
3709        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3710}
3711
3712static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3713                                  struct mlx4_qp_context *qpc,
3714                                  struct mlx4_cmd_mailbox *inbox)
3715{
3716        enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3717        u8 pri_sched_queue;
3718        int port = mlx4_slave_convert_port(
3719                   dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3720
3721        if (port < 0)
3722                return -EINVAL;
3723
3724        pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3725                          ((port & 1) << 6);
3726
3727        if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3728            qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3729                qpc->pri_path.sched_queue = pri_sched_queue;
3730        }
3731
3732        if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3733                port = mlx4_slave_convert_port(
3734                                dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3735                                + 1) - 1;
3736                if (port < 0)
3737                        return -EINVAL;
3738                qpc->alt_path.sched_queue =
3739                        (qpc->alt_path.sched_queue & ~(1 << 6)) |
3740                        (port & 1) << 6;
3741        }
3742        return 0;
3743}
3744
3745static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3746                                struct mlx4_qp_context *qpc,
3747                                struct mlx4_cmd_mailbox *inbox)
3748{
3749        u64 mac;
3750        int port;
3751        u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3752        u8 sched = *(u8 *)(inbox->buf + 64);
3753        u8 smac_ix;
3754
3755        port = (sched >> 6 & 1) + 1;
3756        if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3757                smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3758                if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3759                        return -ENOENT;
3760        }
3761        return 0;
3762}
3763
3764int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3765                             struct mlx4_vhcr *vhcr,
3766                             struct mlx4_cmd_mailbox *inbox,
3767                             struct mlx4_cmd_mailbox *outbox,
3768                             struct mlx4_cmd_info *cmd)
3769{
3770        int err;
3771        struct mlx4_qp_context *qpc = inbox->buf + 8;
3772        int qpn = vhcr->in_modifier & 0x7fffff;
3773        struct res_qp *qp;
3774        u8 orig_sched_queue;
3775        __be32  orig_param3 = qpc->param3;
3776        u8 orig_vlan_control = qpc->pri_path.vlan_control;
3777        u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3778        u8 orig_pri_path_fl = qpc->pri_path.fl;
3779        u8 orig_vlan_index = qpc->pri_path.vlan_index;
3780        u8 orig_feup = qpc->pri_path.feup;
3781
3782        err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3783        if (err)
3784                return err;
3785        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3786        if (err)
3787                return err;
3788
3789        if (roce_verify_mac(dev, slave, qpc, inbox))
3790                return -EINVAL;
3791
3792        update_pkey_index(dev, slave, inbox);
3793        update_gid(dev, inbox, (u8)slave);
3794        adjust_proxy_tun_qkey(dev, vhcr, qpc);
3795        orig_sched_queue = qpc->pri_path.sched_queue;
3796
3797        err = get_res(dev, slave, qpn, RES_QP, &qp);
3798        if (err)
3799                return err;
3800        if (qp->com.from_state != RES_QP_HW) {
3801                err = -EBUSY;
3802                goto out;
3803        }
3804
3805        err = update_vport_qp_param(dev, inbox, slave, qpn);
3806        if (err)
3807                goto out;
3808
3809        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3810out:
3811        /* if no error, save sched queue value passed in by VF. This is
3812         * essentially the QOS value provided by the VF. This will be useful
3813         * if we allow dynamic changes from VST back to VGT
3814         */
3815        if (!err) {
3816                qp->sched_queue = orig_sched_queue;
3817                qp->param3      = orig_param3;
3818                qp->vlan_control = orig_vlan_control;
3819                qp->fvl_rx      =  orig_fvl_rx;
3820                qp->pri_path_fl = orig_pri_path_fl;
3821                qp->vlan_index  = orig_vlan_index;
3822                qp->feup        = orig_feup;
3823        }
3824        put_res(dev, slave, qpn, RES_QP);
3825        return err;
3826}
3827
3828int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3829                            struct mlx4_vhcr *vhcr,
3830                            struct mlx4_cmd_mailbox *inbox,
3831                            struct mlx4_cmd_mailbox *outbox,
3832                            struct mlx4_cmd_info *cmd)
3833{
3834        int err;
3835        struct mlx4_qp_context *context = inbox->buf + 8;
3836
3837        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3838        if (err)
3839                return err;
3840        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3841        if (err)
3842                return err;
3843
3844        update_pkey_index(dev, slave, inbox);
3845        update_gid(dev, inbox, (u8)slave);
3846        adjust_proxy_tun_qkey(dev, vhcr, context);
3847        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3848}
3849
3850int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3851                            struct mlx4_vhcr *vhcr,
3852                            struct mlx4_cmd_mailbox *inbox,
3853                            struct mlx4_cmd_mailbox *outbox,
3854                            struct mlx4_cmd_info *cmd)
3855{
3856        int err;
3857        struct mlx4_qp_context *context = inbox->buf + 8;
3858
3859        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3860        if (err)
3861                return err;
3862        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3863        if (err)
3864                return err;
3865
3866        update_pkey_index(dev, slave, inbox);
3867        update_gid(dev, inbox, (u8)slave);
3868        adjust_proxy_tun_qkey(dev, vhcr, context);
3869        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3870}
3871
3872
3873int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3874                              struct mlx4_vhcr *vhcr,
3875                              struct mlx4_cmd_mailbox *inbox,
3876                              struct mlx4_cmd_mailbox *outbox,
3877                              struct mlx4_cmd_info *cmd)
3878{
3879        struct mlx4_qp_context *context = inbox->buf + 8;
3880        int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3881        if (err)
3882                return err;
3883        adjust_proxy_tun_qkey(dev, vhcr, context);
3884        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3885}
3886
3887int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3888                            struct mlx4_vhcr *vhcr,
3889                            struct mlx4_cmd_mailbox *inbox,
3890                            struct mlx4_cmd_mailbox *outbox,
3891                            struct mlx4_cmd_info *cmd)
3892{
3893        int err;
3894        struct mlx4_qp_context *context = inbox->buf + 8;
3895
3896        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3897        if (err)
3898                return err;
3899        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3900        if (err)
3901                return err;
3902
3903        adjust_proxy_tun_qkey(dev, vhcr, context);
3904        update_gid(dev, inbox, (u8)slave);
3905        update_pkey_index(dev, slave, inbox);
3906        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3907}
3908
3909int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3910                            struct mlx4_vhcr *vhcr,
3911                            struct mlx4_cmd_mailbox *inbox,
3912                            struct mlx4_cmd_mailbox *outbox,
3913                            struct mlx4_cmd_info *cmd)
3914{
3915        int err;
3916        struct mlx4_qp_context *context = inbox->buf + 8;
3917
3918        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3919        if (err)
3920                return err;
3921        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3922        if (err)
3923                return err;
3924
3925        adjust_proxy_tun_qkey(dev, vhcr, context);
3926        update_gid(dev, inbox, (u8)slave);
3927        update_pkey_index(dev, slave, inbox);
3928        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3929}
3930
3931int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3932                         struct mlx4_vhcr *vhcr,
3933                         struct mlx4_cmd_mailbox *inbox,
3934                         struct mlx4_cmd_mailbox *outbox,
3935                         struct mlx4_cmd_info *cmd)
3936{
3937        int err;
3938        int qpn = vhcr->in_modifier & 0x7fffff;
3939        struct res_qp *qp;
3940
3941        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3942        if (err)
3943                return err;
3944        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3945        if (err)
3946                goto ex_abort;
3947
3948        atomic_dec(&qp->mtt->ref_count);
3949        atomic_dec(&qp->rcq->ref_count);
3950        atomic_dec(&qp->scq->ref_count);
3951        if (qp->srq)
3952                atomic_dec(&qp->srq->ref_count);
3953        res_end_move(dev, slave, RES_QP, qpn);
3954        return 0;
3955
3956ex_abort:
3957        res_abort_move(dev, slave, RES_QP, qpn);
3958
3959        return err;
3960}
3961
3962static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3963                                struct res_qp *rqp, u8 *gid)
3964{
3965        struct res_gid *res;
3966
3967        list_for_each_entry(res, &rqp->mcg_list, list) {
3968                if (!memcmp(res->gid, gid, 16))
3969                        return res;
3970        }
3971        return NULL;
3972}
3973
3974static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3975                       u8 *gid, enum mlx4_protocol prot,
3976                       enum mlx4_steer_type steer, u64 reg_id)
3977{
3978        struct res_gid *res;
3979        int err;
3980
3981        res = kzalloc(sizeof *res, GFP_KERNEL);
3982        if (!res)
3983                return -ENOMEM;
3984
3985        spin_lock_irq(&rqp->mcg_spl);
3986        if (find_gid(dev, slave, rqp, gid)) {
3987                kfree(res);
3988                err = -EEXIST;
3989        } else {
3990                memcpy(res->gid, gid, 16);
3991                res->prot = prot;
3992                res->steer = steer;
3993                res->reg_id = reg_id;
3994                list_add_tail(&res->list, &rqp->mcg_list);
3995                err = 0;
3996        }
3997        spin_unlock_irq(&rqp->mcg_spl);
3998
3999        return err;
4000}
4001
4002static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4003                       u8 *gid, enum mlx4_protocol prot,
4004                       enum mlx4_steer_type steer, u64 *reg_id)
4005{
4006        struct res_gid *res;
4007        int err;
4008
4009        spin_lock_irq(&rqp->mcg_spl);
4010        res = find_gid(dev, slave, rqp, gid);
4011        if (!res || res->prot != prot || res->steer != steer)
4012                err = -EINVAL;
4013        else {
4014                *reg_id = res->reg_id;
4015                list_del(&res->list);
4016                kfree(res);
4017                err = 0;
4018        }
4019        spin_unlock_irq(&rqp->mcg_spl);
4020
4021        return err;
4022}
4023
4024static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4025                     u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4026                     enum mlx4_steer_type type, u64 *reg_id)
4027{
4028        switch (dev->caps.steering_mode) {
4029        case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4030                int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4031                if (port < 0)
4032                        return port;
4033                return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4034                                                block_loopback, prot,
4035                                                reg_id);
4036        }
4037        case MLX4_STEERING_MODE_B0:
4038                if (prot == MLX4_PROT_ETH) {
4039                        int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4040                        if (port < 0)
4041                                return port;
4042                        gid[5] = port;
4043                }
4044                return mlx4_qp_attach_common(dev, qp, gid,
4045                                            block_loopback, prot, type);
4046        default:
4047                return -EINVAL;
4048        }
4049}
4050
4051static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4052                     u8 gid[16], enum mlx4_protocol prot,
4053                     enum mlx4_steer_type type, u64 reg_id)
4054{
4055        switch (dev->caps.steering_mode) {
4056        case MLX4_STEERING_MODE_DEVICE_MANAGED:
4057                return mlx4_flow_detach(dev, reg_id);
4058        case MLX4_STEERING_MODE_B0:
4059                return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4060        default:
4061                return -EINVAL;
4062        }
4063}
4064
4065static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4066                            u8 *gid, enum mlx4_protocol prot)
4067{
4068        int real_port;
4069
4070        if (prot != MLX4_PROT_ETH)
4071                return 0;
4072
4073        if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4074            dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4075                real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4076                if (real_port < 0)
4077                        return -EINVAL;
4078                gid[5] = real_port;
4079        }
4080
4081        return 0;
4082}
4083
4084int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4085                               struct mlx4_vhcr *vhcr,
4086                               struct mlx4_cmd_mailbox *inbox,
4087                               struct mlx4_cmd_mailbox *outbox,
4088                               struct mlx4_cmd_info *cmd)
4089{
4090        struct mlx4_qp qp; /* dummy for calling attach/detach */
4091        u8 *gid = inbox->buf;
4092        enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4093        int err;
4094        int qpn;
4095        struct res_qp *rqp;
4096        u64 reg_id = 0;
4097        int attach = vhcr->op_modifier;
4098        int block_loopback = vhcr->in_modifier >> 31;
4099        u8 steer_type_mask = 2;
4100        enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4101
4102        qpn = vhcr->in_modifier & 0xffffff;
4103        err = get_res(dev, slave, qpn, RES_QP, &rqp);
4104        if (err)
4105                return err;
4106
4107        qp.qpn = qpn;
4108        if (attach) {
4109                err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4110                                type, &reg_id);
4111                if (err) {
4112                        pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4113                        goto ex_put;
4114                }
4115                err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4116                if (err)
4117                        goto ex_detach;
4118        } else {
4119                err = mlx4_adjust_port(dev, slave, gid, prot);
4120                if (err)
4121                        goto ex_put;
4122
4123                err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
4124                if (err)
4125                        goto ex_put;
4126
4127                err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4128                if (err)
4129                        pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4130                               qpn, reg_id);
4131        }
4132        put_res(dev, slave, qpn, RES_QP);
4133        return err;
4134
4135ex_detach:
4136        qp_detach(dev, &qp, gid, prot, type, reg_id);
4137ex_put:
4138        put_res(dev, slave, qpn, RES_QP);
4139        return err;
4140}
4141
4142/*
4143 * MAC validation for Flow Steering rules.
4144 * VF can attach rules only with a mac address which is assigned to it.
4145 */
4146static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4147                                   struct list_head *rlist)
4148{
4149        struct mac_res *res, *tmp;
4150        __be64 be_mac;
4151
4152        /* make sure it isn't multicast or broadcast mac*/
4153        if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4154            !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4155                list_for_each_entry_safe(res, tmp, rlist, list) {
4156                        be_mac = cpu_to_be64(res->mac << 16);
4157                        if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4158                                return 0;
4159                }
4160                pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4161                       eth_header->eth.dst_mac, slave);
4162                return -EINVAL;
4163        }
4164        return 0;
4165}
4166
4167static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4168                                         struct _rule_hw *eth_header)
4169{
4170        if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4171            is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4172                struct mlx4_net_trans_rule_hw_eth *eth =
4173                        (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4174                struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4175                bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4176                        next_rule->rsvd == 0;
4177
4178                if (last_rule)
4179                        ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4180        }
4181}
4182
4183/*
4184 * In case of missing eth header, append eth header with a MAC address
4185 * assigned to the VF.
4186 */
4187static int add_eth_header(struct mlx4_dev *dev, int slave,
4188                          struct mlx4_cmd_mailbox *inbox,
4189                          struct list_head *rlist, int header_id)
4190{
4191        struct mac_res *res, *tmp;
4192        u8 port;
4193        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4194        struct mlx4_net_trans_rule_hw_eth *eth_header;
4195        struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4196        struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4197        __be64 be_mac = 0;
4198        __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4199
4200        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4201        port = ctrl->port;
4202        eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4203
4204        /* Clear a space in the inbox for eth header */
4205        switch (header_id) {
4206        case MLX4_NET_TRANS_RULE_ID_IPV4:
4207                ip_header =
4208                        (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4209                memmove(ip_header, eth_header,
4210                        sizeof(*ip_header) + sizeof(*l4_header));
4211                break;
4212        case MLX4_NET_TRANS_RULE_ID_TCP:
4213        case MLX4_NET_TRANS_RULE_ID_UDP:
4214                l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4215                            (eth_header + 1);
4216                memmove(l4_header, eth_header, sizeof(*l4_header));
4217                break;
4218        default:
4219                return -EINVAL;
4220        }
4221        list_for_each_entry_safe(res, tmp, rlist, list) {
4222                if (port == res->port) {
4223                        be_mac = cpu_to_be64(res->mac << 16);
4224                        break;
4225                }
4226        }
4227        if (!be_mac) {
4228                pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4229                       port);
4230                return -EINVAL;
4231        }
4232
4233        memset(eth_header, 0, sizeof(*eth_header));
4234        eth_header->size = sizeof(*eth_header) >> 2;
4235        eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4236        memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4237        memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4238
4239        return 0;
4240
4241}
4242
4243#define MLX4_UPD_QP_PATH_MASK_SUPPORTED      (                                \
4244        1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX                     |\
4245        1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4246int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4247                           struct mlx4_vhcr *vhcr,
4248                           struct mlx4_cmd_mailbox *inbox,
4249                           struct mlx4_cmd_mailbox *outbox,
4250                           struct mlx4_cmd_info *cmd_info)
4251{
4252        int err;
4253        u32 qpn = vhcr->in_modifier & 0xffffff;
4254        struct res_qp *rqp;
4255        u64 mac;
4256        unsigned port;
4257        u64 pri_addr_path_mask;
4258        struct mlx4_update_qp_context *cmd;
4259        int smac_index;
4260
4261        cmd = (struct mlx4_update_qp_context *)inbox->buf;
4262
4263        pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4264        if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4265            (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4266                return -EPERM;
4267
4268        if ((pri_addr_path_mask &
4269             (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4270                !(dev->caps.flags2 &
4271                  MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4272                mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4273                          slave);
4274                return -ENOTSUPP;
4275        }
4276
4277        /* Just change the smac for the QP */
4278        err = get_res(dev, slave, qpn, RES_QP, &rqp);
4279        if (err) {
4280                mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4281                return err;
4282        }
4283
4284        port = (rqp->sched_queue >> 6 & 1) + 1;
4285
4286        if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4287                smac_index = cmd->qp_context.pri_path.grh_mylmc;
4288                err = mac_find_smac_ix_in_slave(dev, slave, port,
4289                                                smac_index, &mac);
4290
4291                if (err) {
4292                        mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4293                                 qpn, smac_index);
4294                        goto err_mac;
4295                }
4296        }
4297
4298        err = mlx4_cmd(dev, inbox->dma,
4299                       vhcr->in_modifier, 0,
4300                       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4301                       MLX4_CMD_NATIVE);
4302        if (err) {
4303                mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4304                goto err_mac;
4305        }
4306
4307err_mac:
4308        put_res(dev, slave, qpn, RES_QP);
4309        return err;
4310}
4311
4312static u32 qp_attach_mbox_size(void *mbox)
4313{
4314        u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4315        struct _rule_hw  *rule_header;
4316
4317        rule_header = (struct _rule_hw *)(mbox + size);
4318
4319        while (rule_header->size) {
4320                size += rule_header->size * sizeof(u32);
4321                rule_header += 1;
4322        }
4323        return size;
4324}
4325
4326static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4327
4328int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4329                                         struct mlx4_vhcr *vhcr,
4330                                         struct mlx4_cmd_mailbox *inbox,
4331                                         struct mlx4_cmd_mailbox *outbox,
4332                                         struct mlx4_cmd_info *cmd)
4333{
4334
4335        struct mlx4_priv *priv = mlx4_priv(dev);
4336        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4337        struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4338        int err;
4339        int qpn;
4340        struct res_qp *rqp;
4341        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4342        struct _rule_hw  *rule_header;
4343        int header_id;
4344        struct res_fs_rule *rrule;
4345        u32 mbox_size;
4346
4347        if (dev->caps.steering_mode !=
4348            MLX4_STEERING_MODE_DEVICE_MANAGED)
4349                return -EOPNOTSUPP;
4350
4351        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4352        err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4353        if (err <= 0)
4354                return -EINVAL;
4355        ctrl->port = err;
4356        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4357        err = get_res(dev, slave, qpn, RES_QP, &rqp);
4358        if (err) {
4359                pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4360                return err;
4361        }
4362        rule_header = (struct _rule_hw *)(ctrl + 1);
4363        header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4364
4365        if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4366                handle_eth_header_mcast_prio(ctrl, rule_header);
4367
4368        if (slave == dev->caps.function)
4369                goto execute;
4370
4371        switch (header_id) {
4372        case MLX4_NET_TRANS_RULE_ID_ETH:
4373                if (validate_eth_header_mac(slave, rule_header, rlist)) {
4374                        err = -EINVAL;
4375                        goto err_put_qp;
4376                }
4377                break;
4378        case MLX4_NET_TRANS_RULE_ID_IB:
4379                break;
4380        case MLX4_NET_TRANS_RULE_ID_IPV4:
4381        case MLX4_NET_TRANS_RULE_ID_TCP:
4382        case MLX4_NET_TRANS_RULE_ID_UDP:
4383                pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4384                if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4385                        err = -EINVAL;
4386                        goto err_put_qp;
4387                }
4388                vhcr->in_modifier +=
4389                        sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4390                break;
4391        default:
4392                pr_err("Corrupted mailbox\n");
4393                err = -EINVAL;
4394                goto err_put_qp;
4395        }
4396
4397execute:
4398        err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4399                           vhcr->in_modifier, 0,
4400                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4401                           MLX4_CMD_NATIVE);
4402        if (err)
4403                goto err_put_qp;
4404
4405
4406        err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4407        if (err) {
4408                mlx4_err(dev, "Fail to add flow steering resources\n");
4409                goto err_detach;
4410        }
4411
4412        err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4413        if (err)
4414                goto err_detach;
4415
4416        mbox_size = qp_attach_mbox_size(inbox->buf);
4417        rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4418        if (!rrule->mirr_mbox) {
4419                err = -ENOMEM;
4420                goto err_put_rule;
4421        }
4422        rrule->mirr_mbox_size = mbox_size;
4423        rrule->mirr_rule_id = 0;
4424        memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4425
4426        /* set different port */
4427        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4428        if (ctrl->port == 1)
4429                ctrl->port = 2;
4430        else
4431                ctrl->port = 1;
4432
4433        if (mlx4_is_bonded(dev))
4434                mlx4_do_mirror_rule(dev, rrule);
4435
4436        atomic_inc(&rqp->ref_count);
4437
4438err_put_rule:
4439        put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4440err_detach:
4441        /* detach rule on error */
4442        if (err)
4443                mlx4_cmd(dev, vhcr->out_param, 0, 0,
4444                         MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4445                         MLX4_CMD_NATIVE);
4446err_put_qp:
4447        put_res(dev, slave, qpn, RES_QP);
4448        return err;
4449}
4450
4451static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4452{
4453        int err;
4454
4455        err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4456        if (err) {
4457                mlx4_err(dev, "Fail to remove flow steering resources\n");
4458                return err;
4459        }
4460
4461        mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4462                 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4463        return 0;
4464}
4465
4466int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4467                                         struct mlx4_vhcr *vhcr,
4468                                         struct mlx4_cmd_mailbox *inbox,
4469                                         struct mlx4_cmd_mailbox *outbox,
4470                                         struct mlx4_cmd_info *cmd)
4471{
4472        int err;
4473        struct res_qp *rqp;
4474        struct res_fs_rule *rrule;
4475        u64 mirr_reg_id;
4476
4477        if (dev->caps.steering_mode !=
4478            MLX4_STEERING_MODE_DEVICE_MANAGED)
4479                return -EOPNOTSUPP;
4480
4481        err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4482        if (err)
4483                return err;
4484
4485        if (!rrule->mirr_mbox) {
4486                mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4487                put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4488                return -EINVAL;
4489        }
4490        mirr_reg_id = rrule->mirr_rule_id;
4491        kfree(rrule->mirr_mbox);
4492
4493        /* Release the rule form busy state before removal */
4494        put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4495        err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4496        if (err)
4497                return err;
4498
4499        if (mirr_reg_id && mlx4_is_bonded(dev)) {
4500                err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4501                if (err) {
4502                        mlx4_err(dev, "Fail to get resource of mirror rule\n");
4503                } else {
4504                        put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4505                        mlx4_undo_mirror_rule(dev, rrule);
4506                }
4507        }
4508        err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4509        if (err) {
4510                mlx4_err(dev, "Fail to remove flow steering resources\n");
4511                goto out;
4512        }
4513
4514        err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4515                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4516                       MLX4_CMD_NATIVE);
4517        if (!err)
4518                atomic_dec(&rqp->ref_count);
4519out:
4520        put_res(dev, slave, rrule->qpn, RES_QP);
4521        return err;
4522}
4523
4524enum {
4525        BUSY_MAX_RETRIES = 10
4526};
4527
4528int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4529                               struct mlx4_vhcr *vhcr,
4530                               struct mlx4_cmd_mailbox *inbox,
4531                               struct mlx4_cmd_mailbox *outbox,
4532                               struct mlx4_cmd_info *cmd)
4533{
4534        int err;
4535        int index = vhcr->in_modifier & 0xffff;
4536
4537        err = get_res(dev, slave, index, RES_COUNTER, NULL);
4538        if (err)
4539                return err;
4540
4541        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4542        put_res(dev, slave, index, RES_COUNTER);
4543        return err;
4544}
4545
4546static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4547{
4548        struct res_gid *rgid;
4549        struct res_gid *tmp;
4550        struct mlx4_qp qp; /* dummy for calling attach/detach */
4551
4552        list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4553                switch (dev->caps.steering_mode) {
4554                case MLX4_STEERING_MODE_DEVICE_MANAGED:
4555                        mlx4_flow_detach(dev, rgid->reg_id);
4556                        break;
4557                case MLX4_STEERING_MODE_B0:
4558                        qp.qpn = rqp->local_qpn;
4559                        (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4560                                                     rgid->prot, rgid->steer);
4561                        break;
4562                }
4563                list_del(&rgid->list);
4564                kfree(rgid);
4565        }
4566}
4567
4568static int _move_all_busy(struct mlx4_dev *dev, int slave,
4569                          enum mlx4_resource type, int print)
4570{
4571        struct mlx4_priv *priv = mlx4_priv(dev);
4572        struct mlx4_resource_tracker *tracker =
4573                &priv->mfunc.master.res_tracker;
4574        struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4575        struct res_common *r;
4576        struct res_common *tmp;
4577        int busy;
4578
4579        busy = 0;
4580        spin_lock_irq(mlx4_tlock(dev));
4581        list_for_each_entry_safe(r, tmp, rlist, list) {
4582                if (r->owner == slave) {
4583                        if (!r->removing) {
4584                                if (r->state == RES_ANY_BUSY) {
4585                                        if (print)
4586                                                mlx4_dbg(dev,
4587                                                         "%s id 0x%llx is busy\n",
4588                                                          resource_str(type),
4589                                                          r->res_id);
4590                                        ++busy;
4591                                } else {
4592                                        r->from_state = r->state;
4593                                        r->state = RES_ANY_BUSY;
4594                                        r->removing = 1;
4595                                }
4596                        }
4597                }
4598        }
4599        spin_unlock_irq(mlx4_tlock(dev));
4600
4601        return busy;
4602}
4603
4604static int move_all_busy(struct mlx4_dev *dev, int slave,
4605                         enum mlx4_resource type)
4606{
4607        unsigned long begin;
4608        int busy;
4609
4610        begin = jiffies;
4611        do {
4612                busy = _move_all_busy(dev, slave, type, 0);
4613                if (time_after(jiffies, begin + 5 * HZ))
4614                        break;
4615                if (busy)
4616                        cond_resched();
4617        } while (busy);
4618
4619        if (busy)
4620                busy = _move_all_busy(dev, slave, type, 1);
4621
4622        return busy;
4623}
4624static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4625{
4626        struct mlx4_priv *priv = mlx4_priv(dev);
4627        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4628        struct list_head *qp_list =
4629                &tracker->slave_list[slave].res_list[RES_QP];
4630        struct res_qp *qp;
4631        struct res_qp *tmp;
4632        int state;
4633        u64 in_param;
4634        int qpn;
4635        int err;
4636
4637        err = move_all_busy(dev, slave, RES_QP);
4638        if (err)
4639                mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4640                          slave);
4641
4642        spin_lock_irq(mlx4_tlock(dev));
4643        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4644                spin_unlock_irq(mlx4_tlock(dev));
4645                if (qp->com.owner == slave) {
4646                        qpn = qp->com.res_id;
4647                        detach_qp(dev, slave, qp);
4648                        state = qp->com.from_state;
4649                        while (state != 0) {
4650                                switch (state) {
4651                                case RES_QP_RESERVED:
4652                                        spin_lock_irq(mlx4_tlock(dev));
4653                                        rb_erase(&qp->com.node,
4654                                                 &tracker->res_tree[RES_QP]);
4655                                        list_del(&qp->com.list);
4656                                        spin_unlock_irq(mlx4_tlock(dev));
4657                                        if (!valid_reserved(dev, slave, qpn)) {
4658                                                __mlx4_qp_release_range(dev, qpn, 1);
4659                                                mlx4_release_resource(dev, slave,
4660                                                                      RES_QP, 1, 0);
4661                                        }
4662                                        kfree(qp);
4663                                        state = 0;
4664                                        break;
4665                                case RES_QP_MAPPED:
4666                                        if (!valid_reserved(dev, slave, qpn))
4667                                                __mlx4_qp_free_icm(dev, qpn);
4668                                        state = RES_QP_RESERVED;
4669                                        break;
4670                                case RES_QP_HW:
4671                                        in_param = slave;
4672                                        err = mlx4_cmd(dev, in_param,
4673                                                       qp->local_qpn, 2,
4674                                                       MLX4_CMD_2RST_QP,
4675                                                       MLX4_CMD_TIME_CLASS_A,
4676                                                       MLX4_CMD_NATIVE);
4677                                        if (err)
4678                                                mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4679                                                         slave, qp->local_qpn);
4680                                        atomic_dec(&qp->rcq->ref_count);
4681                                        atomic_dec(&qp->scq->ref_count);
4682                                        atomic_dec(&qp->mtt->ref_count);
4683                                        if (qp->srq)
4684                                                atomic_dec(&qp->srq->ref_count);
4685                                        state = RES_QP_MAPPED;
4686                                        break;
4687                                default:
4688                                        state = 0;
4689                                }
4690                        }
4691                }
4692                spin_lock_irq(mlx4_tlock(dev));
4693        }
4694        spin_unlock_irq(mlx4_tlock(dev));
4695}
4696
4697static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4698{
4699        struct mlx4_priv *priv = mlx4_priv(dev);
4700        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4701        struct list_head *srq_list =
4702                &tracker->slave_list[slave].res_list[RES_SRQ];
4703        struct res_srq *srq;
4704        struct res_srq *tmp;
4705        int state;
4706        u64 in_param;
4707        LIST_HEAD(tlist);
4708        int srqn;
4709        int err;
4710
4711        err = move_all_busy(dev, slave, RES_SRQ);
4712        if (err)
4713                mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4714                          slave);
4715
4716        spin_lock_irq(mlx4_tlock(dev));
4717        list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4718                spin_unlock_irq(mlx4_tlock(dev));
4719                if (srq->com.owner == slave) {
4720                        srqn = srq->com.res_id;
4721                        state = srq->com.from_state;
4722                        while (state != 0) {
4723                                switch (state) {
4724                                case RES_SRQ_ALLOCATED:
4725                                        __mlx4_srq_free_icm(dev, srqn);
4726                                        spin_lock_irq(mlx4_tlock(dev));
4727                                        rb_erase(&srq->com.node,
4728                                                 &tracker->res_tree[RES_SRQ]);
4729                                        list_del(&srq->com.list);
4730                                        spin_unlock_irq(mlx4_tlock(dev));
4731                                        mlx4_release_resource(dev, slave,
4732                                                              RES_SRQ, 1, 0);
4733                                        kfree(srq);
4734                                        state = 0;
4735                                        break;
4736
4737                                case RES_SRQ_HW:
4738                                        in_param = slave;
4739                                        err = mlx4_cmd(dev, in_param, srqn, 1,
4740                                                       MLX4_CMD_HW2SW_SRQ,
4741                                                       MLX4_CMD_TIME_CLASS_A,
4742                                                       MLX4_CMD_NATIVE);
4743                                        if (err)
4744                                                mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4745                                                         slave, srqn);
4746
4747                                        atomic_dec(&srq->mtt->ref_count);
4748                                        if (srq->cq)
4749                                                atomic_dec(&srq->cq->ref_count);
4750                                        state = RES_SRQ_ALLOCATED;
4751                                        break;
4752
4753                                default:
4754                                        state = 0;
4755                                }
4756                        }
4757                }
4758                spin_lock_irq(mlx4_tlock(dev));
4759        }
4760        spin_unlock_irq(mlx4_tlock(dev));
4761}
4762
4763static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4764{
4765        struct mlx4_priv *priv = mlx4_priv(dev);
4766        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4767        struct list_head *cq_list =
4768                &tracker->slave_list[slave].res_list[RES_CQ];
4769        struct res_cq *cq;
4770        struct res_cq *tmp;
4771        int state;
4772        u64 in_param;
4773        LIST_HEAD(tlist);
4774        int cqn;
4775        int err;
4776
4777        err = move_all_busy(dev, slave, RES_CQ);
4778        if (err)
4779                mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4780                          slave);
4781
4782        spin_lock_irq(mlx4_tlock(dev));
4783        list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4784                spin_unlock_irq(mlx4_tlock(dev));
4785                if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4786                        cqn = cq->com.res_id;
4787                        state = cq->com.from_state;
4788                        while (state != 0) {
4789                                switch (state) {
4790                                case RES_CQ_ALLOCATED:
4791                                        __mlx4_cq_free_icm(dev, cqn);
4792                                        spin_lock_irq(mlx4_tlock(dev));
4793                                        rb_erase(&cq->com.node,
4794                                                 &tracker->res_tree[RES_CQ]);
4795                                        list_del(&cq->com.list);
4796                                        spin_unlock_irq(mlx4_tlock(dev));
4797                                        mlx4_release_resource(dev, slave,
4798                                                              RES_CQ, 1, 0);
4799                                        kfree(cq);
4800                                        state = 0;
4801                                        break;
4802
4803                                case RES_CQ_HW:
4804                                        in_param = slave;
4805                                        err = mlx4_cmd(dev, in_param, cqn, 1,
4806                                                       MLX4_CMD_HW2SW_CQ,
4807                                                       MLX4_CMD_TIME_CLASS_A,
4808                                                       MLX4_CMD_NATIVE);
4809                                        if (err)
4810                                                mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4811                                                         slave, cqn);
4812                                        atomic_dec(&cq->mtt->ref_count);
4813                                        state = RES_CQ_ALLOCATED;
4814                                        break;
4815
4816                                default:
4817                                        state = 0;
4818                                }
4819                        }
4820                }
4821                spin_lock_irq(mlx4_tlock(dev));
4822        }
4823        spin_unlock_irq(mlx4_tlock(dev));
4824}
4825
4826static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4827{
4828        struct mlx4_priv *priv = mlx4_priv(dev);
4829        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4830        struct list_head *mpt_list =
4831                &tracker->slave_list[slave].res_list[RES_MPT];
4832        struct res_mpt *mpt;
4833        struct res_mpt *tmp;
4834        int state;
4835        u64 in_param;
4836        LIST_HEAD(tlist);
4837        int mptn;
4838        int err;
4839
4840        err = move_all_busy(dev, slave, RES_MPT);
4841        if (err)
4842                mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4843                          slave);
4844
4845        spin_lock_irq(mlx4_tlock(dev));
4846        list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4847                spin_unlock_irq(mlx4_tlock(dev));
4848                if (mpt->com.owner == slave) {
4849                        mptn = mpt->com.res_id;
4850                        state = mpt->com.from_state;
4851                        while (state != 0) {
4852                                switch (state) {
4853                                case RES_MPT_RESERVED:
4854                                        __mlx4_mpt_release(dev, mpt->key);
4855                                        spin_lock_irq(mlx4_tlock(dev));
4856                                        rb_erase(&mpt->com.node,
4857                                                 &tracker->res_tree[RES_MPT]);
4858                                        list_del(&mpt->com.list);
4859                                        spin_unlock_irq(mlx4_tlock(dev));
4860                                        mlx4_release_resource(dev, slave,
4861                                                              RES_MPT, 1, 0);
4862                                        kfree(mpt);
4863                                        state = 0;
4864                                        break;
4865
4866                                case RES_MPT_MAPPED:
4867                                        __mlx4_mpt_free_icm(dev, mpt->key);
4868                                        state = RES_MPT_RESERVED;
4869                                        break;
4870
4871                                case RES_MPT_HW:
4872                                        in_param = slave;
4873                                        err = mlx4_cmd(dev, in_param, mptn, 0,
4874                                                     MLX4_CMD_HW2SW_MPT,
4875                                                     MLX4_CMD_TIME_CLASS_A,
4876                                                     MLX4_CMD_NATIVE);
4877                                        if (err)
4878                                                mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4879                                                         slave, mptn);
4880                                        if (mpt->mtt)
4881                                                atomic_dec(&mpt->mtt->ref_count);
4882                                        state = RES_MPT_MAPPED;
4883                                        break;
4884                                default:
4885                                        state = 0;
4886                                }
4887                        }
4888                }
4889                spin_lock_irq(mlx4_tlock(dev));
4890        }
4891        spin_unlock_irq(mlx4_tlock(dev));
4892}
4893
4894static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4895{
4896        struct mlx4_priv *priv = mlx4_priv(dev);
4897        struct mlx4_resource_tracker *tracker =
4898                &priv->mfunc.master.res_tracker;
4899        struct list_head *mtt_list =
4900                &tracker->slave_list[slave].res_list[RES_MTT];
4901        struct res_mtt *mtt;
4902        struct res_mtt *tmp;
4903        int state;
4904        LIST_HEAD(tlist);
4905        int base;
4906        int err;
4907
4908        err = move_all_busy(dev, slave, RES_MTT);
4909        if (err)
4910                mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4911                          slave);
4912
4913        spin_lock_irq(mlx4_tlock(dev));
4914        list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4915                spin_unlock_irq(mlx4_tlock(dev));
4916                if (mtt->com.owner == slave) {
4917                        base = mtt->com.res_id;
4918                        state = mtt->com.from_state;
4919                        while (state != 0) {
4920                                switch (state) {
4921                                case RES_MTT_ALLOCATED:
4922                                        __mlx4_free_mtt_range(dev, base,
4923                                                              mtt->order);
4924                                        spin_lock_irq(mlx4_tlock(dev));
4925                                        rb_erase(&mtt->com.node,
4926                                                 &tracker->res_tree[RES_MTT]);
4927                                        list_del(&mtt->com.list);
4928                                        spin_unlock_irq(mlx4_tlock(dev));
4929                                        mlx4_release_resource(dev, slave, RES_MTT,
4930                                                              1 << mtt->order, 0);
4931                                        kfree(mtt);
4932                                        state = 0;
4933                                        break;
4934
4935                                default:
4936                                        state = 0;
4937                                }
4938                        }
4939                }
4940                spin_lock_irq(mlx4_tlock(dev));
4941        }
4942        spin_unlock_irq(mlx4_tlock(dev));
4943}
4944
4945static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4946{
4947        struct mlx4_cmd_mailbox *mailbox;
4948        int err;
4949        struct res_fs_rule *mirr_rule;
4950        u64 reg_id;
4951
4952        mailbox = mlx4_alloc_cmd_mailbox(dev);
4953        if (IS_ERR(mailbox))
4954                return PTR_ERR(mailbox);
4955
4956        if (!fs_rule->mirr_mbox) {
4957                mlx4_err(dev, "rule mirroring mailbox is null\n");
4958                return -EINVAL;
4959        }
4960        memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4961        err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
4962                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4963                           MLX4_CMD_NATIVE);
4964        mlx4_free_cmd_mailbox(dev, mailbox);
4965
4966        if (err)
4967                goto err;
4968
4969        err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4970        if (err)
4971                goto err_detach;
4972
4973        err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4974        if (err)
4975                goto err_rem;
4976
4977        fs_rule->mirr_rule_id = reg_id;
4978        mirr_rule->mirr_rule_id = 0;
4979        mirr_rule->mirr_mbox_size = 0;
4980        mirr_rule->mirr_mbox = NULL;
4981        put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
4982
4983        return 0;
4984err_rem:
4985        rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
4986err_detach:
4987        mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4988                 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4989err:
4990        return err;
4991}
4992
4993static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
4994{
4995        struct mlx4_priv *priv = mlx4_priv(dev);
4996        struct mlx4_resource_tracker *tracker =
4997                &priv->mfunc.master.res_tracker;
4998        struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
4999        struct rb_node *p;
5000        struct res_fs_rule *fs_rule;
5001        int err = 0;
5002        LIST_HEAD(mirr_list);
5003
5004        for (p = rb_first(root); p; p = rb_next(p)) {
5005                fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5006                if ((bond && fs_rule->mirr_mbox_size) ||
5007                    (!bond && !fs_rule->mirr_mbox_size))
5008                        list_add_tail(&fs_rule->mirr_list, &mirr_list);
5009        }
5010
5011        list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5012                if (bond)
5013                        err += mlx4_do_mirror_rule(dev, fs_rule);
5014                else
5015                        err += mlx4_undo_mirror_rule(dev, fs_rule);
5016        }
5017        return err;
5018}
5019
5020int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5021{
5022        return mlx4_mirror_fs_rules(dev, true);
5023}
5024
5025int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5026{
5027        return mlx4_mirror_fs_rules(dev, false);
5028}
5029
5030static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5031{
5032        struct mlx4_priv *priv = mlx4_priv(dev);
5033        struct mlx4_resource_tracker *tracker =
5034                &priv->mfunc.master.res_tracker;
5035        struct list_head *fs_rule_list =
5036                &tracker->slave_list[slave].res_list[RES_FS_RULE];
5037        struct res_fs_rule *fs_rule;
5038        struct res_fs_rule *tmp;
5039        int state;
5040        u64 base;
5041        int err;
5042
5043        err = move_all_busy(dev, slave, RES_FS_RULE);
5044        if (err)
5045                mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5046                          slave);
5047
5048        spin_lock_irq(mlx4_tlock(dev));
5049        list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5050                spin_unlock_irq(mlx4_tlock(dev));
5051                if (fs_rule->com.owner == slave) {
5052                        base = fs_rule->com.res_id;
5053                        state = fs_rule->com.from_state;
5054                        while (state != 0) {
5055                                switch (state) {
5056                                case RES_FS_RULE_ALLOCATED:
5057                                        /* detach rule */
5058                                        err = mlx4_cmd(dev, base, 0, 0,
5059                                                       MLX4_QP_FLOW_STEERING_DETACH,
5060                                                       MLX4_CMD_TIME_CLASS_A,
5061                                                       MLX4_CMD_NATIVE);
5062
5063                                        spin_lock_irq(mlx4_tlock(dev));
5064                                        rb_erase(&fs_rule->com.node,
5065                                                 &tracker->res_tree[RES_FS_RULE]);
5066                                        list_del(&fs_rule->com.list);
5067                                        spin_unlock_irq(mlx4_tlock(dev));
5068                                        kfree(fs_rule);
5069                                        state = 0;
5070                                        break;
5071
5072                                default:
5073                                        state = 0;
5074                                }
5075                        }
5076                }
5077                spin_lock_irq(mlx4_tlock(dev));
5078        }
5079        spin_unlock_irq(mlx4_tlock(dev));
5080}
5081
5082static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5083{
5084        struct mlx4_priv *priv = mlx4_priv(dev);
5085        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5086        struct list_head *eq_list =
5087                &tracker->slave_list[slave].res_list[RES_EQ];
5088        struct res_eq *eq;
5089        struct res_eq *tmp;
5090        int err;
5091        int state;
5092        LIST_HEAD(tlist);
5093        int eqn;
5094
5095        err = move_all_busy(dev, slave, RES_EQ);
5096        if (err)
5097                mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5098                          slave);
5099
5100        spin_lock_irq(mlx4_tlock(dev));
5101        list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5102                spin_unlock_irq(mlx4_tlock(dev));
5103                if (eq->com.owner == slave) {
5104                        eqn = eq->com.res_id;
5105                        state = eq->com.from_state;
5106                        while (state != 0) {
5107                                switch (state) {
5108                                case RES_EQ_RESERVED:
5109                                        spin_lock_irq(mlx4_tlock(dev));
5110                                        rb_erase(&eq->com.node,
5111                                                 &tracker->res_tree[RES_EQ]);
5112                                        list_del(&eq->com.list);
5113                                        spin_unlock_irq(mlx4_tlock(dev));
5114                                        kfree(eq);
5115                                        state = 0;
5116                                        break;
5117
5118                                case RES_EQ_HW:
5119                                        err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5120                                                       1, MLX4_CMD_HW2SW_EQ,
5121                                                       MLX4_CMD_TIME_CLASS_A,
5122                                                       MLX4_CMD_NATIVE);
5123                                        if (err)
5124                                                mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5125                                                         slave, eqn & 0x3ff);
5126                                        atomic_dec(&eq->mtt->ref_count);
5127                                        state = RES_EQ_RESERVED;
5128                                        break;
5129
5130                                default:
5131                                        state = 0;
5132                                }
5133                        }
5134                }
5135                spin_lock_irq(mlx4_tlock(dev));
5136        }
5137        spin_unlock_irq(mlx4_tlock(dev));
5138}
5139
5140static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5141{
5142        struct mlx4_priv *priv = mlx4_priv(dev);
5143        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5144        struct list_head *counter_list =
5145                &tracker->slave_list[slave].res_list[RES_COUNTER];
5146        struct res_counter *counter;
5147        struct res_counter *tmp;
5148        int err;
5149        int *counters_arr = NULL;
5150        int i, j;
5151
5152        err = move_all_busy(dev, slave, RES_COUNTER);
5153        if (err)
5154                mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5155                          slave);
5156
5157        counters_arr = kmalloc_array(dev->caps.max_counters,
5158                                     sizeof(*counters_arr), GFP_KERNEL);
5159        if (!counters_arr)
5160                return;
5161
5162        do {
5163                i = 0;
5164                j = 0;
5165                spin_lock_irq(mlx4_tlock(dev));
5166                list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5167                        if (counter->com.owner == slave) {
5168                                counters_arr[i++] = counter->com.res_id;
5169                                rb_erase(&counter->com.node,
5170                                         &tracker->res_tree[RES_COUNTER]);
5171                                list_del(&counter->com.list);
5172                                kfree(counter);
5173                        }
5174                }
5175                spin_unlock_irq(mlx4_tlock(dev));
5176
5177                while (j < i) {
5178                        __mlx4_counter_free(dev, counters_arr[j++]);
5179                        mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5180                }
5181        } while (i);
5182
5183        kfree(counters_arr);
5184}
5185
5186static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5187{
5188        struct mlx4_priv *priv = mlx4_priv(dev);
5189        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5190        struct list_head *xrcdn_list =
5191                &tracker->slave_list[slave].res_list[RES_XRCD];
5192        struct res_xrcdn *xrcd;
5193        struct res_xrcdn *tmp;
5194        int err;
5195        int xrcdn;
5196
5197        err = move_all_busy(dev, slave, RES_XRCD);
5198        if (err)
5199                mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5200                          slave);
5201
5202        spin_lock_irq(mlx4_tlock(dev));
5203        list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5204                if (xrcd->com.owner == slave) {
5205                        xrcdn = xrcd->com.res_id;
5206                        rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5207                        list_del(&xrcd->com.list);
5208                        kfree(xrcd);
5209                        __mlx4_xrcd_free(dev, xrcdn);
5210                }
5211        }
5212        spin_unlock_irq(mlx4_tlock(dev));
5213}
5214
5215void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5216{
5217        struct mlx4_priv *priv = mlx4_priv(dev);
5218        mlx4_reset_roce_gids(dev, slave);
5219        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5220        rem_slave_vlans(dev, slave);
5221        rem_slave_macs(dev, slave);
5222        rem_slave_fs_rule(dev, slave);
5223        rem_slave_qps(dev, slave);
5224        rem_slave_srqs(dev, slave);
5225        rem_slave_cqs(dev, slave);
5226        rem_slave_mrs(dev, slave);
5227        rem_slave_eqs(dev, slave);
5228        rem_slave_mtts(dev, slave);
5229        rem_slave_counters(dev, slave);
5230        rem_slave_xrcdns(dev, slave);
5231        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5232}
5233
5234void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5235{
5236        struct mlx4_vf_immed_vlan_work *work =
5237                container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5238        struct mlx4_cmd_mailbox *mailbox;
5239        struct mlx4_update_qp_context *upd_context;
5240        struct mlx4_dev *dev = &work->priv->dev;
5241        struct mlx4_resource_tracker *tracker =
5242                &work->priv->mfunc.master.res_tracker;
5243        struct list_head *qp_list =
5244                &tracker->slave_list[work->slave].res_list[RES_QP];
5245        struct res_qp *qp;
5246        struct res_qp *tmp;
5247        u64 qp_path_mask_vlan_ctrl =
5248                       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5249                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5250                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5251                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5252                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5253                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5254
5255        u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5256                       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5257                       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5258                       (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5259                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5260                       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5261                       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5262                       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5263
5264        int err;
5265        int port, errors = 0;
5266        u8 vlan_control;
5267
5268        if (mlx4_is_slave(dev)) {
5269                mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5270                          work->slave);
5271                goto out;
5272        }
5273
5274        mailbox = mlx4_alloc_cmd_mailbox(dev);
5275        if (IS_ERR(mailbox))
5276                goto out;
5277        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5278                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5279                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5280                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5281                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5282                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5283                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5284        else if (!work->vlan_id)
5285                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5286                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5287        else if (work->vlan_proto == htons(ETH_P_8021AD))
5288                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5289                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5290                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5291                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5292        else  /* vst 802.1Q */
5293                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5294                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5295                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5296
5297        upd_context = mailbox->buf;
5298        upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5299
5300        spin_lock_irq(mlx4_tlock(dev));
5301        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5302                spin_unlock_irq(mlx4_tlock(dev));
5303                if (qp->com.owner == work->slave) {
5304                        if (qp->com.from_state != RES_QP_HW ||
5305                            !qp->sched_queue ||  /* no INIT2RTR trans yet */
5306                            mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5307                            qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5308                                spin_lock_irq(mlx4_tlock(dev));
5309                                continue;
5310                        }
5311                        port = (qp->sched_queue >> 6 & 1) + 1;
5312                        if (port != work->port) {
5313                                spin_lock_irq(mlx4_tlock(dev));
5314                                continue;
5315                        }
5316                        if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5317                                upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5318                        else
5319                                upd_context->primary_addr_path_mask =
5320                                        cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5321                        if (work->vlan_id == MLX4_VGT) {
5322                                upd_context->qp_context.param3 = qp->param3;
5323                                upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5324                                upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5325                                upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5326                                upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5327                                upd_context->qp_context.pri_path.feup = qp->feup;
5328                                upd_context->qp_context.pri_path.sched_queue =
5329                                        qp->sched_queue;
5330                        } else {
5331                                upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5332                                upd_context->qp_context.pri_path.vlan_control = vlan_control;
5333                                upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5334                                upd_context->qp_context.pri_path.fvl_rx =
5335                                        qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5336                                upd_context->qp_context.pri_path.fl =
5337                                        qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5338                                if (work->vlan_proto == htons(ETH_P_8021AD))
5339                                        upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5340                                else
5341                                        upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5342                                upd_context->qp_context.pri_path.feup =
5343                                        qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5344                                upd_context->qp_context.pri_path.sched_queue =
5345                                        qp->sched_queue & 0xC7;
5346                                upd_context->qp_context.pri_path.sched_queue |=
5347                                        ((work->qos & 0x7) << 3);
5348                                upd_context->qp_mask |=
5349                                        cpu_to_be64(1ULL <<
5350                                                    MLX4_UPD_QP_MASK_QOS_VPP);
5351                                upd_context->qp_context.qos_vport =
5352                                        work->qos_vport;
5353                        }
5354
5355                        err = mlx4_cmd(dev, mailbox->dma,
5356                                       qp->local_qpn & 0xffffff,
5357                                       0, MLX4_CMD_UPDATE_QP,
5358                                       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5359                        if (err) {
5360                                mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5361                                          work->slave, port, qp->local_qpn, err);
5362                                errors++;
5363                        }
5364                }
5365                spin_lock_irq(mlx4_tlock(dev));
5366        }
5367        spin_unlock_irq(mlx4_tlock(dev));
5368        mlx4_free_cmd_mailbox(dev, mailbox);
5369
5370        if (errors)
5371                mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5372                         errors, work->slave, work->port);
5373
5374        /* unregister previous vlan_id if needed and we had no errors
5375         * while updating the QPs
5376         */
5377        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5378            NO_INDX != work->orig_vlan_ix)
5379                __mlx4_unregister_vlan(&work->priv->dev, work->port,
5380                                       work->orig_vlan_id);
5381out:
5382        kfree(work);
5383        return;
5384}
5385