linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
   4 * All rights reserved.
   5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/sched.h>
  37#include <linux/pci.h>
  38#include <linux/errno.h>
  39#include <linux/kernel.h>
  40#include <linux/io.h>
  41#include <linux/slab.h>
  42#include <linux/mlx4/cmd.h>
  43#include <linux/mlx4/qp.h>
  44#include <linux/if_ether.h>
  45#include <linux/etherdevice.h>
  46
  47#include "mlx4.h"
  48#include "fw.h"
  49
  50#define MLX4_MAC_VALID          (1ull << 63)
  51
  52struct mac_res {
  53        struct list_head list;
  54        u64 mac;
  55        int ref_count;
  56        u8 smac_index;
  57        u8 port;
  58};
  59
  60struct vlan_res {
  61        struct list_head list;
  62        u16 vlan;
  63        int ref_count;
  64        int vlan_index;
  65        u8 port;
  66};
  67
  68struct res_common {
  69        struct list_head        list;
  70        struct rb_node          node;
  71        u64                     res_id;
  72        int                     owner;
  73        int                     state;
  74        int                     from_state;
  75        int                     to_state;
  76        int                     removing;
  77};
  78
  79enum {
  80        RES_ANY_BUSY = 1
  81};
  82
  83struct res_gid {
  84        struct list_head        list;
  85        u8                      gid[16];
  86        enum mlx4_protocol      prot;
  87        enum mlx4_steer_type    steer;
  88        u64                     reg_id;
  89};
  90
  91enum res_qp_states {
  92        RES_QP_BUSY = RES_ANY_BUSY,
  93
  94        /* QP number was allocated */
  95        RES_QP_RESERVED,
  96
  97        /* ICM memory for QP context was mapped */
  98        RES_QP_MAPPED,
  99
 100        /* QP is in hw ownership */
 101        RES_QP_HW
 102};
 103
 104struct res_qp {
 105        struct res_common       com;
 106        struct res_mtt         *mtt;
 107        struct res_cq          *rcq;
 108        struct res_cq          *scq;
 109        struct res_srq         *srq;
 110        struct list_head        mcg_list;
 111        spinlock_t              mcg_spl;
 112        int                     local_qpn;
 113        atomic_t                ref_count;
 114        u32                     qpc_flags;
 115        /* saved qp params before VST enforcement in order to restore on VGT */
 116        u8                      sched_queue;
 117        __be32                  param3;
 118        u8                      vlan_control;
 119        u8                      fvl_rx;
 120        u8                      pri_path_fl;
 121        u8                      vlan_index;
 122        u8                      feup;
 123};
 124
 125enum res_mtt_states {
 126        RES_MTT_BUSY = RES_ANY_BUSY,
 127        RES_MTT_ALLOCATED,
 128};
 129
 130static inline const char *mtt_states_str(enum res_mtt_states state)
 131{
 132        switch (state) {
 133        case RES_MTT_BUSY: return "RES_MTT_BUSY";
 134        case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
 135        default: return "Unknown";
 136        }
 137}
 138
 139struct res_mtt {
 140        struct res_common       com;
 141        int                     order;
 142        atomic_t                ref_count;
 143};
 144
 145enum res_mpt_states {
 146        RES_MPT_BUSY = RES_ANY_BUSY,
 147        RES_MPT_RESERVED,
 148        RES_MPT_MAPPED,
 149        RES_MPT_HW,
 150};
 151
 152struct res_mpt {
 153        struct res_common       com;
 154        struct res_mtt         *mtt;
 155        int                     key;
 156};
 157
 158enum res_eq_states {
 159        RES_EQ_BUSY = RES_ANY_BUSY,
 160        RES_EQ_RESERVED,
 161        RES_EQ_HW,
 162};
 163
 164struct res_eq {
 165        struct res_common       com;
 166        struct res_mtt         *mtt;
 167};
 168
 169enum res_cq_states {
 170        RES_CQ_BUSY = RES_ANY_BUSY,
 171        RES_CQ_ALLOCATED,
 172        RES_CQ_HW,
 173};
 174
 175struct res_cq {
 176        struct res_common       com;
 177        struct res_mtt         *mtt;
 178        atomic_t                ref_count;
 179};
 180
 181enum res_srq_states {
 182        RES_SRQ_BUSY = RES_ANY_BUSY,
 183        RES_SRQ_ALLOCATED,
 184        RES_SRQ_HW,
 185};
 186
 187struct res_srq {
 188        struct res_common       com;
 189        struct res_mtt         *mtt;
 190        struct res_cq          *cq;
 191        atomic_t                ref_count;
 192};
 193
 194enum res_counter_states {
 195        RES_COUNTER_BUSY = RES_ANY_BUSY,
 196        RES_COUNTER_ALLOCATED,
 197};
 198
 199struct res_counter {
 200        struct res_common       com;
 201        int                     port;
 202};
 203
 204enum res_xrcdn_states {
 205        RES_XRCD_BUSY = RES_ANY_BUSY,
 206        RES_XRCD_ALLOCATED,
 207};
 208
 209struct res_xrcdn {
 210        struct res_common       com;
 211        int                     port;
 212};
 213
 214enum res_fs_rule_states {
 215        RES_FS_RULE_BUSY = RES_ANY_BUSY,
 216        RES_FS_RULE_ALLOCATED,
 217};
 218
 219struct res_fs_rule {
 220        struct res_common       com;
 221        int                     qpn;
 222};
 223
 224static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
 225{
 226        struct rb_node *node = root->rb_node;
 227
 228        while (node) {
 229                struct res_common *res = container_of(node, struct res_common,
 230                                                      node);
 231
 232                if (res_id < res->res_id)
 233                        node = node->rb_left;
 234                else if (res_id > res->res_id)
 235                        node = node->rb_right;
 236                else
 237                        return res;
 238        }
 239        return NULL;
 240}
 241
 242static int res_tracker_insert(struct rb_root *root, struct res_common *res)
 243{
 244        struct rb_node **new = &(root->rb_node), *parent = NULL;
 245
 246        /* Figure out where to put new node */
 247        while (*new) {
 248                struct res_common *this = container_of(*new, struct res_common,
 249                                                       node);
 250
 251                parent = *new;
 252                if (res->res_id < this->res_id)
 253                        new = &((*new)->rb_left);
 254                else if (res->res_id > this->res_id)
 255                        new = &((*new)->rb_right);
 256                else
 257                        return -EEXIST;
 258        }
 259
 260        /* Add new node and rebalance tree. */
 261        rb_link_node(&res->node, parent, new);
 262        rb_insert_color(&res->node, root);
 263
 264        return 0;
 265}
 266
 267enum qp_transition {
 268        QP_TRANS_INIT2RTR,
 269        QP_TRANS_RTR2RTS,
 270        QP_TRANS_RTS2RTS,
 271        QP_TRANS_SQERR2RTS,
 272        QP_TRANS_SQD2SQD,
 273        QP_TRANS_SQD2RTS
 274};
 275
 276/* For Debug uses */
 277static const char *resource_str(enum mlx4_resource rt)
 278{
 279        switch (rt) {
 280        case RES_QP: return "RES_QP";
 281        case RES_CQ: return "RES_CQ";
 282        case RES_SRQ: return "RES_SRQ";
 283        case RES_MPT: return "RES_MPT";
 284        case RES_MTT: return "RES_MTT";
 285        case RES_MAC: return  "RES_MAC";
 286        case RES_VLAN: return  "RES_VLAN";
 287        case RES_EQ: return "RES_EQ";
 288        case RES_COUNTER: return "RES_COUNTER";
 289        case RES_FS_RULE: return "RES_FS_RULE";
 290        case RES_XRCD: return "RES_XRCD";
 291        default: return "Unknown resource type !!!";
 292        };
 293}
 294
 295static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
 296static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
 297                                      enum mlx4_resource res_type, int count,
 298                                      int port)
 299{
 300        struct mlx4_priv *priv = mlx4_priv(dev);
 301        struct resource_allocator *res_alloc =
 302                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 303        int err = -EINVAL;
 304        int allocated, free, reserved, guaranteed, from_free;
 305        int from_rsvd;
 306
 307        if (slave > dev->persist->num_vfs)
 308                return -EINVAL;
 309
 310        spin_lock(&res_alloc->alloc_lock);
 311        allocated = (port > 0) ?
 312                res_alloc->allocated[(port - 1) *
 313                (dev->persist->num_vfs + 1) + slave] :
 314                res_alloc->allocated[slave];
 315        free = (port > 0) ? res_alloc->res_port_free[port - 1] :
 316                res_alloc->res_free;
 317        reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
 318                res_alloc->res_reserved;
 319        guaranteed = res_alloc->guaranteed[slave];
 320
 321        if (allocated + count > res_alloc->quota[slave]) {
 322                mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
 323                          slave, port, resource_str(res_type), count,
 324                          allocated, res_alloc->quota[slave]);
 325                goto out;
 326        }
 327
 328        if (allocated + count <= guaranteed) {
 329                err = 0;
 330                from_rsvd = count;
 331        } else {
 332                /* portion may need to be obtained from free area */
 333                if (guaranteed - allocated > 0)
 334                        from_free = count - (guaranteed - allocated);
 335                else
 336                        from_free = count;
 337
 338                from_rsvd = count - from_free;
 339
 340                if (free - from_free >= reserved)
 341                        err = 0;
 342                else
 343                        mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
 344                                  slave, port, resource_str(res_type), free,
 345                                  from_free, reserved);
 346        }
 347
 348        if (!err) {
 349                /* grant the request */
 350                if (port > 0) {
 351                        res_alloc->allocated[(port - 1) *
 352                        (dev->persist->num_vfs + 1) + slave] += count;
 353                        res_alloc->res_port_free[port - 1] -= count;
 354                        res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
 355                } else {
 356                        res_alloc->allocated[slave] += count;
 357                        res_alloc->res_free -= count;
 358                        res_alloc->res_reserved -= from_rsvd;
 359                }
 360        }
 361
 362out:
 363        spin_unlock(&res_alloc->alloc_lock);
 364        return err;
 365}
 366
 367static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
 368                                    enum mlx4_resource res_type, int count,
 369                                    int port)
 370{
 371        struct mlx4_priv *priv = mlx4_priv(dev);
 372        struct resource_allocator *res_alloc =
 373                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 374        int allocated, guaranteed, from_rsvd;
 375
 376        if (slave > dev->persist->num_vfs)
 377                return;
 378
 379        spin_lock(&res_alloc->alloc_lock);
 380
 381        allocated = (port > 0) ?
 382                res_alloc->allocated[(port - 1) *
 383                (dev->persist->num_vfs + 1) + slave] :
 384                res_alloc->allocated[slave];
 385        guaranteed = res_alloc->guaranteed[slave];
 386
 387        if (allocated - count >= guaranteed) {
 388                from_rsvd = 0;
 389        } else {
 390                /* portion may need to be returned to reserved area */
 391                if (allocated - guaranteed > 0)
 392                        from_rsvd = count - (allocated - guaranteed);
 393                else
 394                        from_rsvd = count;
 395        }
 396
 397        if (port > 0) {
 398                res_alloc->allocated[(port - 1) *
 399                (dev->persist->num_vfs + 1) + slave] -= count;
 400                res_alloc->res_port_free[port - 1] += count;
 401                res_alloc->res_port_rsvd[port - 1] += from_rsvd;
 402        } else {
 403                res_alloc->allocated[slave] -= count;
 404                res_alloc->res_free += count;
 405                res_alloc->res_reserved += from_rsvd;
 406        }
 407
 408        spin_unlock(&res_alloc->alloc_lock);
 409        return;
 410}
 411
 412static inline void initialize_res_quotas(struct mlx4_dev *dev,
 413                                         struct resource_allocator *res_alloc,
 414                                         enum mlx4_resource res_type,
 415                                         int vf, int num_instances)
 416{
 417        res_alloc->guaranteed[vf] = num_instances /
 418                                    (2 * (dev->persist->num_vfs + 1));
 419        res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
 420        if (vf == mlx4_master_func_num(dev)) {
 421                res_alloc->res_free = num_instances;
 422                if (res_type == RES_MTT) {
 423                        /* reserved mtts will be taken out of the PF allocation */
 424                        res_alloc->res_free += dev->caps.reserved_mtts;
 425                        res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
 426                        res_alloc->quota[vf] += dev->caps.reserved_mtts;
 427                }
 428        }
 429}
 430
 431void mlx4_init_quotas(struct mlx4_dev *dev)
 432{
 433        struct mlx4_priv *priv = mlx4_priv(dev);
 434        int pf;
 435
 436        /* quotas for VFs are initialized in mlx4_slave_cap */
 437        if (mlx4_is_slave(dev))
 438                return;
 439
 440        if (!mlx4_is_mfunc(dev)) {
 441                dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
 442                        mlx4_num_reserved_sqps(dev);
 443                dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
 444                dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
 445                dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
 446                dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
 447                return;
 448        }
 449
 450        pf = mlx4_master_func_num(dev);
 451        dev->quotas.qp =
 452                priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
 453        dev->quotas.cq =
 454                priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
 455        dev->quotas.srq =
 456                priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
 457        dev->quotas.mtt =
 458                priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
 459        dev->quotas.mpt =
 460                priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
 461}
 462int mlx4_init_resource_tracker(struct mlx4_dev *dev)
 463{
 464        struct mlx4_priv *priv = mlx4_priv(dev);
 465        int i, j;
 466        int t;
 467
 468        priv->mfunc.master.res_tracker.slave_list =
 469                kzalloc(dev->num_slaves * sizeof(struct slave_list),
 470                        GFP_KERNEL);
 471        if (!priv->mfunc.master.res_tracker.slave_list)
 472                return -ENOMEM;
 473
 474        for (i = 0 ; i < dev->num_slaves; i++) {
 475                for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
 476                        INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
 477                                       slave_list[i].res_list[t]);
 478                mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 479        }
 480
 481        mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
 482                 dev->num_slaves);
 483        for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
 484                priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
 485
 486        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 487                struct resource_allocator *res_alloc =
 488                        &priv->mfunc.master.res_tracker.res_alloc[i];
 489                res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
 490                                           sizeof(int), GFP_KERNEL);
 491                res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
 492                                                sizeof(int), GFP_KERNEL);
 493                if (i == RES_MAC || i == RES_VLAN)
 494                        res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
 495                                                       (dev->persist->num_vfs
 496                                                       + 1) *
 497                                                       sizeof(int), GFP_KERNEL);
 498                else
 499                        res_alloc->allocated = kzalloc((dev->persist->
 500                                                        num_vfs + 1) *
 501                                                       sizeof(int), GFP_KERNEL);
 502
 503                if (!res_alloc->quota || !res_alloc->guaranteed ||
 504                    !res_alloc->allocated)
 505                        goto no_mem_err;
 506
 507                spin_lock_init(&res_alloc->alloc_lock);
 508                for (t = 0; t < dev->persist->num_vfs + 1; t++) {
 509                        struct mlx4_active_ports actv_ports =
 510                                mlx4_get_active_ports(dev, t);
 511                        switch (i) {
 512                        case RES_QP:
 513                                initialize_res_quotas(dev, res_alloc, RES_QP,
 514                                                      t, dev->caps.num_qps -
 515                                                      dev->caps.reserved_qps -
 516                                                      mlx4_num_reserved_sqps(dev));
 517                                break;
 518                        case RES_CQ:
 519                                initialize_res_quotas(dev, res_alloc, RES_CQ,
 520                                                      t, dev->caps.num_cqs -
 521                                                      dev->caps.reserved_cqs);
 522                                break;
 523                        case RES_SRQ:
 524                                initialize_res_quotas(dev, res_alloc, RES_SRQ,
 525                                                      t, dev->caps.num_srqs -
 526                                                      dev->caps.reserved_srqs);
 527                                break;
 528                        case RES_MPT:
 529                                initialize_res_quotas(dev, res_alloc, RES_MPT,
 530                                                      t, dev->caps.num_mpts -
 531                                                      dev->caps.reserved_mrws);
 532                                break;
 533                        case RES_MTT:
 534                                initialize_res_quotas(dev, res_alloc, RES_MTT,
 535                                                      t, dev->caps.num_mtts -
 536                                                      dev->caps.reserved_mtts);
 537                                break;
 538                        case RES_MAC:
 539                                if (t == mlx4_master_func_num(dev)) {
 540                                        int max_vfs_pport = 0;
 541                                        /* Calculate the max vfs per port for */
 542                                        /* both ports.                        */
 543                                        for (j = 0; j < dev->caps.num_ports;
 544                                             j++) {
 545                                                struct mlx4_slaves_pport slaves_pport =
 546                                                        mlx4_phys_to_slaves_pport(dev, j + 1);
 547                                                unsigned current_slaves =
 548                                                        bitmap_weight(slaves_pport.slaves,
 549                                                                      dev->caps.num_ports) - 1;
 550                                                if (max_vfs_pport < current_slaves)
 551                                                        max_vfs_pport =
 552                                                                current_slaves;
 553                                        }
 554                                        res_alloc->quota[t] =
 555                                                MLX4_MAX_MAC_NUM -
 556                                                2 * max_vfs_pport;
 557                                        res_alloc->guaranteed[t] = 2;
 558                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 559                                                res_alloc->res_port_free[j] =
 560                                                        MLX4_MAX_MAC_NUM;
 561                                } else {
 562                                        res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
 563                                        res_alloc->guaranteed[t] = 2;
 564                                }
 565                                break;
 566                        case RES_VLAN:
 567                                if (t == mlx4_master_func_num(dev)) {
 568                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
 569                                        res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
 570                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 571                                                res_alloc->res_port_free[j] =
 572                                                        res_alloc->quota[t];
 573                                } else {
 574                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
 575                                        res_alloc->guaranteed[t] = 0;
 576                                }
 577                                break;
 578                        case RES_COUNTER:
 579                                res_alloc->quota[t] = dev->caps.max_counters;
 580                                res_alloc->guaranteed[t] = 0;
 581                                if (t == mlx4_master_func_num(dev))
 582                                        res_alloc->res_free = res_alloc->quota[t];
 583                                break;
 584                        default:
 585                                break;
 586                        }
 587                        if (i == RES_MAC || i == RES_VLAN) {
 588                                for (j = 0; j < dev->caps.num_ports; j++)
 589                                        if (test_bit(j, actv_ports.ports))
 590                                                res_alloc->res_port_rsvd[j] +=
 591                                                        res_alloc->guaranteed[t];
 592                        } else {
 593                                res_alloc->res_reserved += res_alloc->guaranteed[t];
 594                        }
 595                }
 596        }
 597        spin_lock_init(&priv->mfunc.master.res_tracker.lock);
 598        return 0;
 599
 600no_mem_err:
 601        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 602                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 603                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 604                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 605                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 606                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 607                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 608        }
 609        return -ENOMEM;
 610}
 611
 612void mlx4_free_resource_tracker(struct mlx4_dev *dev,
 613                                enum mlx4_res_tracker_free_type type)
 614{
 615        struct mlx4_priv *priv = mlx4_priv(dev);
 616        int i;
 617
 618        if (priv->mfunc.master.res_tracker.slave_list) {
 619                if (type != RES_TR_FREE_STRUCTS_ONLY) {
 620                        for (i = 0; i < dev->num_slaves; i++) {
 621                                if (type == RES_TR_FREE_ALL ||
 622                                    dev->caps.function != i)
 623                                        mlx4_delete_all_resources_for_slave(dev, i);
 624                        }
 625                        /* free master's vlans */
 626                        i = dev->caps.function;
 627                        mlx4_reset_roce_gids(dev, i);
 628                        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 629                        rem_slave_vlans(dev, i);
 630                        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 631                }
 632
 633                if (type != RES_TR_FREE_SLAVES_ONLY) {
 634                        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 635                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 636                                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 637                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 638                                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 639                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 640                                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 641                        }
 642                        kfree(priv->mfunc.master.res_tracker.slave_list);
 643                        priv->mfunc.master.res_tracker.slave_list = NULL;
 644                }
 645        }
 646}
 647
 648static void update_pkey_index(struct mlx4_dev *dev, int slave,
 649                              struct mlx4_cmd_mailbox *inbox)
 650{
 651        u8 sched = *(u8 *)(inbox->buf + 64);
 652        u8 orig_index = *(u8 *)(inbox->buf + 35);
 653        u8 new_index;
 654        struct mlx4_priv *priv = mlx4_priv(dev);
 655        int port;
 656
 657        port = (sched >> 6 & 1) + 1;
 658
 659        new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
 660        *(u8 *)(inbox->buf + 35) = new_index;
 661}
 662
 663static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
 664                       u8 slave)
 665{
 666        struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
 667        enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
 668        u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
 669        int port;
 670
 671        if (MLX4_QP_ST_UD == ts) {
 672                port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 673                if (mlx4_is_eth(dev, port))
 674                        qp_ctx->pri_path.mgid_index =
 675                                mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
 676                else
 677                        qp_ctx->pri_path.mgid_index = slave | 0x80;
 678
 679        } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
 680                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
 681                        port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 682                        if (mlx4_is_eth(dev, port)) {
 683                                qp_ctx->pri_path.mgid_index +=
 684                                        mlx4_get_base_gid_ix(dev, slave, port);
 685                                qp_ctx->pri_path.mgid_index &= 0x7f;
 686                        } else {
 687                                qp_ctx->pri_path.mgid_index = slave & 0x7F;
 688                        }
 689                }
 690                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
 691                        port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
 692                        if (mlx4_is_eth(dev, port)) {
 693                                qp_ctx->alt_path.mgid_index +=
 694                                        mlx4_get_base_gid_ix(dev, slave, port);
 695                                qp_ctx->alt_path.mgid_index &= 0x7f;
 696                        } else {
 697                                qp_ctx->alt_path.mgid_index = slave & 0x7F;
 698                        }
 699                }
 700        }
 701}
 702
 703static int update_vport_qp_param(struct mlx4_dev *dev,
 704                                 struct mlx4_cmd_mailbox *inbox,
 705                                 u8 slave, u32 qpn)
 706{
 707        struct mlx4_qp_context  *qpc = inbox->buf + 8;
 708        struct mlx4_vport_oper_state *vp_oper;
 709        struct mlx4_priv *priv;
 710        u32 qp_type;
 711        int port, err = 0;
 712
 713        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
 714        priv = mlx4_priv(dev);
 715        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 716        qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
 717
 718        if (MLX4_VGT != vp_oper->state.default_vlan) {
 719                /* the reserved QPs (special, proxy, tunnel)
 720                 * do not operate over vlans
 721                 */
 722                if (mlx4_is_qp_reserved(dev, qpn))
 723                        return 0;
 724
 725                /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
 726                if (qp_type == MLX4_QP_ST_UD ||
 727                    (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
 728                        if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
 729                                *(__be32 *)inbox->buf =
 730                                        cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
 731                                        MLX4_QP_OPTPAR_VLAN_STRIPPING);
 732                                qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
 733                        } else {
 734                                struct mlx4_update_qp_params params = {.flags = 0};
 735
 736                                err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
 737                                if (err)
 738                                        goto out;
 739                        }
 740                }
 741
 742                if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
 743                    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
 744                        qpc->pri_path.vlan_control =
 745                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 746                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 747                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
 748                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 749                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
 750                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 751                } else if (0 != vp_oper->state.default_vlan) {
 752                        qpc->pri_path.vlan_control =
 753                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 754                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 755                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 756                } else { /* priority tagged */
 757                        qpc->pri_path.vlan_control =
 758                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 759                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 760                }
 761
 762                qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
 763                qpc->pri_path.vlan_index = vp_oper->vlan_idx;
 764                qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
 765                qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
 766                qpc->pri_path.sched_queue &= 0xC7;
 767                qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
 768                qpc->qos_vport = vp_oper->state.qos_vport;
 769        }
 770        if (vp_oper->state.spoofchk) {
 771                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
 772                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
 773        }
 774out:
 775        return err;
 776}
 777
 778static int mpt_mask(struct mlx4_dev *dev)
 779{
 780        return dev->caps.num_mpts - 1;
 781}
 782
 783static void *find_res(struct mlx4_dev *dev, u64 res_id,
 784                      enum mlx4_resource type)
 785{
 786        struct mlx4_priv *priv = mlx4_priv(dev);
 787
 788        return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
 789                                  res_id);
 790}
 791
 792static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
 793                   enum mlx4_resource type,
 794                   void *res)
 795{
 796        struct res_common *r;
 797        int err = 0;
 798
 799        spin_lock_irq(mlx4_tlock(dev));
 800        r = find_res(dev, res_id, type);
 801        if (!r) {
 802                err = -ENONET;
 803                goto exit;
 804        }
 805
 806        if (r->state == RES_ANY_BUSY) {
 807                err = -EBUSY;
 808                goto exit;
 809        }
 810
 811        if (r->owner != slave) {
 812                err = -EPERM;
 813                goto exit;
 814        }
 815
 816        r->from_state = r->state;
 817        r->state = RES_ANY_BUSY;
 818
 819        if (res)
 820                *((struct res_common **)res) = r;
 821
 822exit:
 823        spin_unlock_irq(mlx4_tlock(dev));
 824        return err;
 825}
 826
 827int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
 828                                    enum mlx4_resource type,
 829                                    u64 res_id, int *slave)
 830{
 831
 832        struct res_common *r;
 833        int err = -ENOENT;
 834        int id = res_id;
 835
 836        if (type == RES_QP)
 837                id &= 0x7fffff;
 838        spin_lock(mlx4_tlock(dev));
 839
 840        r = find_res(dev, id, type);
 841        if (r) {
 842                *slave = r->owner;
 843                err = 0;
 844        }
 845        spin_unlock(mlx4_tlock(dev));
 846
 847        return err;
 848}
 849
 850static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
 851                    enum mlx4_resource type)
 852{
 853        struct res_common *r;
 854
 855        spin_lock_irq(mlx4_tlock(dev));
 856        r = find_res(dev, res_id, type);
 857        if (r)
 858                r->state = r->from_state;
 859        spin_unlock_irq(mlx4_tlock(dev));
 860}
 861
 862static struct res_common *alloc_qp_tr(int id)
 863{
 864        struct res_qp *ret;
 865
 866        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 867        if (!ret)
 868                return NULL;
 869
 870        ret->com.res_id = id;
 871        ret->com.state = RES_QP_RESERVED;
 872        ret->local_qpn = id;
 873        INIT_LIST_HEAD(&ret->mcg_list);
 874        spin_lock_init(&ret->mcg_spl);
 875        atomic_set(&ret->ref_count, 0);
 876
 877        return &ret->com;
 878}
 879
 880static struct res_common *alloc_mtt_tr(int id, int order)
 881{
 882        struct res_mtt *ret;
 883
 884        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 885        if (!ret)
 886                return NULL;
 887
 888        ret->com.res_id = id;
 889        ret->order = order;
 890        ret->com.state = RES_MTT_ALLOCATED;
 891        atomic_set(&ret->ref_count, 0);
 892
 893        return &ret->com;
 894}
 895
 896static struct res_common *alloc_mpt_tr(int id, int key)
 897{
 898        struct res_mpt *ret;
 899
 900        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 901        if (!ret)
 902                return NULL;
 903
 904        ret->com.res_id = id;
 905        ret->com.state = RES_MPT_RESERVED;
 906        ret->key = key;
 907
 908        return &ret->com;
 909}
 910
 911static struct res_common *alloc_eq_tr(int id)
 912{
 913        struct res_eq *ret;
 914
 915        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 916        if (!ret)
 917                return NULL;
 918
 919        ret->com.res_id = id;
 920        ret->com.state = RES_EQ_RESERVED;
 921
 922        return &ret->com;
 923}
 924
 925static struct res_common *alloc_cq_tr(int id)
 926{
 927        struct res_cq *ret;
 928
 929        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 930        if (!ret)
 931                return NULL;
 932
 933        ret->com.res_id = id;
 934        ret->com.state = RES_CQ_ALLOCATED;
 935        atomic_set(&ret->ref_count, 0);
 936
 937        return &ret->com;
 938}
 939
 940static struct res_common *alloc_srq_tr(int id)
 941{
 942        struct res_srq *ret;
 943
 944        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 945        if (!ret)
 946                return NULL;
 947
 948        ret->com.res_id = id;
 949        ret->com.state = RES_SRQ_ALLOCATED;
 950        atomic_set(&ret->ref_count, 0);
 951
 952        return &ret->com;
 953}
 954
 955static struct res_common *alloc_counter_tr(int id)
 956{
 957        struct res_counter *ret;
 958
 959        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 960        if (!ret)
 961                return NULL;
 962
 963        ret->com.res_id = id;
 964        ret->com.state = RES_COUNTER_ALLOCATED;
 965
 966        return &ret->com;
 967}
 968
 969static struct res_common *alloc_xrcdn_tr(int id)
 970{
 971        struct res_xrcdn *ret;
 972
 973        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 974        if (!ret)
 975                return NULL;
 976
 977        ret->com.res_id = id;
 978        ret->com.state = RES_XRCD_ALLOCATED;
 979
 980        return &ret->com;
 981}
 982
 983static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
 984{
 985        struct res_fs_rule *ret;
 986
 987        ret = kzalloc(sizeof *ret, GFP_KERNEL);
 988        if (!ret)
 989                return NULL;
 990
 991        ret->com.res_id = id;
 992        ret->com.state = RES_FS_RULE_ALLOCATED;
 993        ret->qpn = qpn;
 994        return &ret->com;
 995}
 996
 997static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
 998                                   int extra)
 999{
1000        struct res_common *ret;
1001
1002        switch (type) {
1003        case RES_QP:
1004                ret = alloc_qp_tr(id);
1005                break;
1006        case RES_MPT:
1007                ret = alloc_mpt_tr(id, extra);
1008                break;
1009        case RES_MTT:
1010                ret = alloc_mtt_tr(id, extra);
1011                break;
1012        case RES_EQ:
1013                ret = alloc_eq_tr(id);
1014                break;
1015        case RES_CQ:
1016                ret = alloc_cq_tr(id);
1017                break;
1018        case RES_SRQ:
1019                ret = alloc_srq_tr(id);
1020                break;
1021        case RES_MAC:
1022                pr_err("implementation missing\n");
1023                return NULL;
1024        case RES_COUNTER:
1025                ret = alloc_counter_tr(id);
1026                break;
1027        case RES_XRCD:
1028                ret = alloc_xrcdn_tr(id);
1029                break;
1030        case RES_FS_RULE:
1031                ret = alloc_fs_rule_tr(id, extra);
1032                break;
1033        default:
1034                return NULL;
1035        }
1036        if (ret)
1037                ret->owner = slave;
1038
1039        return ret;
1040}
1041
1042static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1043                         enum mlx4_resource type, int extra)
1044{
1045        int i;
1046        int err;
1047        struct mlx4_priv *priv = mlx4_priv(dev);
1048        struct res_common **res_arr;
1049        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1050        struct rb_root *root = &tracker->res_tree[type];
1051
1052        res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1053        if (!res_arr)
1054                return -ENOMEM;
1055
1056        for (i = 0; i < count; ++i) {
1057                res_arr[i] = alloc_tr(base + i, type, slave, extra);
1058                if (!res_arr[i]) {
1059                        for (--i; i >= 0; --i)
1060                                kfree(res_arr[i]);
1061
1062                        kfree(res_arr);
1063                        return -ENOMEM;
1064                }
1065        }
1066
1067        spin_lock_irq(mlx4_tlock(dev));
1068        for (i = 0; i < count; ++i) {
1069                if (find_res(dev, base + i, type)) {
1070                        err = -EEXIST;
1071                        goto undo;
1072                }
1073                err = res_tracker_insert(root, res_arr[i]);
1074                if (err)
1075                        goto undo;
1076                list_add_tail(&res_arr[i]->list,
1077                              &tracker->slave_list[slave].res_list[type]);
1078        }
1079        spin_unlock_irq(mlx4_tlock(dev));
1080        kfree(res_arr);
1081
1082        return 0;
1083
1084undo:
1085        for (--i; i >= base; --i)
1086                rb_erase(&res_arr[i]->node, root);
1087
1088        spin_unlock_irq(mlx4_tlock(dev));
1089
1090        for (i = 0; i < count; ++i)
1091                kfree(res_arr[i]);
1092
1093        kfree(res_arr);
1094
1095        return err;
1096}
1097
1098static int remove_qp_ok(struct res_qp *res)
1099{
1100        if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1101            !list_empty(&res->mcg_list)) {
1102                pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1103                       res->com.state, atomic_read(&res->ref_count));
1104                return -EBUSY;
1105        } else if (res->com.state != RES_QP_RESERVED) {
1106                return -EPERM;
1107        }
1108
1109        return 0;
1110}
1111
1112static int remove_mtt_ok(struct res_mtt *res, int order)
1113{
1114        if (res->com.state == RES_MTT_BUSY ||
1115            atomic_read(&res->ref_count)) {
1116                pr_devel("%s-%d: state %s, ref_count %d\n",
1117                         __func__, __LINE__,
1118                         mtt_states_str(res->com.state),
1119                         atomic_read(&res->ref_count));
1120                return -EBUSY;
1121        } else if (res->com.state != RES_MTT_ALLOCATED)
1122                return -EPERM;
1123        else if (res->order != order)
1124                return -EINVAL;
1125
1126        return 0;
1127}
1128
1129static int remove_mpt_ok(struct res_mpt *res)
1130{
1131        if (res->com.state == RES_MPT_BUSY)
1132                return -EBUSY;
1133        else if (res->com.state != RES_MPT_RESERVED)
1134                return -EPERM;
1135
1136        return 0;
1137}
1138
1139static int remove_eq_ok(struct res_eq *res)
1140{
1141        if (res->com.state == RES_MPT_BUSY)
1142                return -EBUSY;
1143        else if (res->com.state != RES_MPT_RESERVED)
1144                return -EPERM;
1145
1146        return 0;
1147}
1148
1149static int remove_counter_ok(struct res_counter *res)
1150{
1151        if (res->com.state == RES_COUNTER_BUSY)
1152                return -EBUSY;
1153        else if (res->com.state != RES_COUNTER_ALLOCATED)
1154                return -EPERM;
1155
1156        return 0;
1157}
1158
1159static int remove_xrcdn_ok(struct res_xrcdn *res)
1160{
1161        if (res->com.state == RES_XRCD_BUSY)
1162                return -EBUSY;
1163        else if (res->com.state != RES_XRCD_ALLOCATED)
1164                return -EPERM;
1165
1166        return 0;
1167}
1168
1169static int remove_fs_rule_ok(struct res_fs_rule *res)
1170{
1171        if (res->com.state == RES_FS_RULE_BUSY)
1172                return -EBUSY;
1173        else if (res->com.state != RES_FS_RULE_ALLOCATED)
1174                return -EPERM;
1175
1176        return 0;
1177}
1178
1179static int remove_cq_ok(struct res_cq *res)
1180{
1181        if (res->com.state == RES_CQ_BUSY)
1182                return -EBUSY;
1183        else if (res->com.state != RES_CQ_ALLOCATED)
1184                return -EPERM;
1185
1186        return 0;
1187}
1188
1189static int remove_srq_ok(struct res_srq *res)
1190{
1191        if (res->com.state == RES_SRQ_BUSY)
1192                return -EBUSY;
1193        else if (res->com.state != RES_SRQ_ALLOCATED)
1194                return -EPERM;
1195
1196        return 0;
1197}
1198
1199static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1200{
1201        switch (type) {
1202        case RES_QP:
1203                return remove_qp_ok((struct res_qp *)res);
1204        case RES_CQ:
1205                return remove_cq_ok((struct res_cq *)res);
1206        case RES_SRQ:
1207                return remove_srq_ok((struct res_srq *)res);
1208        case RES_MPT:
1209                return remove_mpt_ok((struct res_mpt *)res);
1210        case RES_MTT:
1211                return remove_mtt_ok((struct res_mtt *)res, extra);
1212        case RES_MAC:
1213                return -ENOSYS;
1214        case RES_EQ:
1215                return remove_eq_ok((struct res_eq *)res);
1216        case RES_COUNTER:
1217                return remove_counter_ok((struct res_counter *)res);
1218        case RES_XRCD:
1219                return remove_xrcdn_ok((struct res_xrcdn *)res);
1220        case RES_FS_RULE:
1221                return remove_fs_rule_ok((struct res_fs_rule *)res);
1222        default:
1223                return -EINVAL;
1224        }
1225}
1226
1227static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1228                         enum mlx4_resource type, int extra)
1229{
1230        u64 i;
1231        int err;
1232        struct mlx4_priv *priv = mlx4_priv(dev);
1233        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1234        struct res_common *r;
1235
1236        spin_lock_irq(mlx4_tlock(dev));
1237        for (i = base; i < base + count; ++i) {
1238                r = res_tracker_lookup(&tracker->res_tree[type], i);
1239                if (!r) {
1240                        err = -ENOENT;
1241                        goto out;
1242                }
1243                if (r->owner != slave) {
1244                        err = -EPERM;
1245                        goto out;
1246                }
1247                err = remove_ok(r, type, extra);
1248                if (err)
1249                        goto out;
1250        }
1251
1252        for (i = base; i < base + count; ++i) {
1253                r = res_tracker_lookup(&tracker->res_tree[type], i);
1254                rb_erase(&r->node, &tracker->res_tree[type]);
1255                list_del(&r->list);
1256                kfree(r);
1257        }
1258        err = 0;
1259
1260out:
1261        spin_unlock_irq(mlx4_tlock(dev));
1262
1263        return err;
1264}
1265
1266static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1267                                enum res_qp_states state, struct res_qp **qp,
1268                                int alloc)
1269{
1270        struct mlx4_priv *priv = mlx4_priv(dev);
1271        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1272        struct res_qp *r;
1273        int err = 0;
1274
1275        spin_lock_irq(mlx4_tlock(dev));
1276        r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1277        if (!r)
1278                err = -ENOENT;
1279        else if (r->com.owner != slave)
1280                err = -EPERM;
1281        else {
1282                switch (state) {
1283                case RES_QP_BUSY:
1284                        mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1285                                 __func__, r->com.res_id);
1286                        err = -EBUSY;
1287                        break;
1288
1289                case RES_QP_RESERVED:
1290                        if (r->com.state == RES_QP_MAPPED && !alloc)
1291                                break;
1292
1293                        mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1294                        err = -EINVAL;
1295                        break;
1296
1297                case RES_QP_MAPPED:
1298                        if ((r->com.state == RES_QP_RESERVED && alloc) ||
1299                            r->com.state == RES_QP_HW)
1300                                break;
1301                        else {
1302                                mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1303                                          r->com.res_id);
1304                                err = -EINVAL;
1305                        }
1306
1307                        break;
1308
1309                case RES_QP_HW:
1310                        if (r->com.state != RES_QP_MAPPED)
1311                                err = -EINVAL;
1312                        break;
1313                default:
1314                        err = -EINVAL;
1315                }
1316
1317                if (!err) {
1318                        r->com.from_state = r->com.state;
1319                        r->com.to_state = state;
1320                        r->com.state = RES_QP_BUSY;
1321                        if (qp)
1322                                *qp = r;
1323                }
1324        }
1325
1326        spin_unlock_irq(mlx4_tlock(dev));
1327
1328        return err;
1329}
1330
1331static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1332                                enum res_mpt_states state, struct res_mpt **mpt)
1333{
1334        struct mlx4_priv *priv = mlx4_priv(dev);
1335        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1336        struct res_mpt *r;
1337        int err = 0;
1338
1339        spin_lock_irq(mlx4_tlock(dev));
1340        r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1341        if (!r)
1342                err = -ENOENT;
1343        else if (r->com.owner != slave)
1344                err = -EPERM;
1345        else {
1346                switch (state) {
1347                case RES_MPT_BUSY:
1348                        err = -EINVAL;
1349                        break;
1350
1351                case RES_MPT_RESERVED:
1352                        if (r->com.state != RES_MPT_MAPPED)
1353                                err = -EINVAL;
1354                        break;
1355
1356                case RES_MPT_MAPPED:
1357                        if (r->com.state != RES_MPT_RESERVED &&
1358                            r->com.state != RES_MPT_HW)
1359                                err = -EINVAL;
1360                        break;
1361
1362                case RES_MPT_HW:
1363                        if (r->com.state != RES_MPT_MAPPED)
1364                                err = -EINVAL;
1365                        break;
1366                default:
1367                        err = -EINVAL;
1368                }
1369
1370                if (!err) {
1371                        r->com.from_state = r->com.state;
1372                        r->com.to_state = state;
1373                        r->com.state = RES_MPT_BUSY;
1374                        if (mpt)
1375                                *mpt = r;
1376                }
1377        }
1378
1379        spin_unlock_irq(mlx4_tlock(dev));
1380
1381        return err;
1382}
1383
1384static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1385                                enum res_eq_states state, struct res_eq **eq)
1386{
1387        struct mlx4_priv *priv = mlx4_priv(dev);
1388        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1389        struct res_eq *r;
1390        int err = 0;
1391
1392        spin_lock_irq(mlx4_tlock(dev));
1393        r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1394        if (!r)
1395                err = -ENOENT;
1396        else if (r->com.owner != slave)
1397                err = -EPERM;
1398        else {
1399                switch (state) {
1400                case RES_EQ_BUSY:
1401                        err = -EINVAL;
1402                        break;
1403
1404                case RES_EQ_RESERVED:
1405                        if (r->com.state != RES_EQ_HW)
1406                                err = -EINVAL;
1407                        break;
1408
1409                case RES_EQ_HW:
1410                        if (r->com.state != RES_EQ_RESERVED)
1411                                err = -EINVAL;
1412                        break;
1413
1414                default:
1415                        err = -EINVAL;
1416                }
1417
1418                if (!err) {
1419                        r->com.from_state = r->com.state;
1420                        r->com.to_state = state;
1421                        r->com.state = RES_EQ_BUSY;
1422                        if (eq)
1423                                *eq = r;
1424                }
1425        }
1426
1427        spin_unlock_irq(mlx4_tlock(dev));
1428
1429        return err;
1430}
1431
1432static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1433                                enum res_cq_states state, struct res_cq **cq)
1434{
1435        struct mlx4_priv *priv = mlx4_priv(dev);
1436        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1437        struct res_cq *r;
1438        int err;
1439
1440        spin_lock_irq(mlx4_tlock(dev));
1441        r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1442        if (!r) {
1443                err = -ENOENT;
1444        } else if (r->com.owner != slave) {
1445                err = -EPERM;
1446        } else if (state == RES_CQ_ALLOCATED) {
1447                if (r->com.state != RES_CQ_HW)
1448                        err = -EINVAL;
1449                else if (atomic_read(&r->ref_count))
1450                        err = -EBUSY;
1451                else
1452                        err = 0;
1453        } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1454                err = -EINVAL;
1455        } else {
1456                err = 0;
1457        }
1458
1459        if (!err) {
1460                r->com.from_state = r->com.state;
1461                r->com.to_state = state;
1462                r->com.state = RES_CQ_BUSY;
1463                if (cq)
1464                        *cq = r;
1465        }
1466
1467        spin_unlock_irq(mlx4_tlock(dev));
1468
1469        return err;
1470}
1471
1472static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1473                                 enum res_srq_states state, struct res_srq **srq)
1474{
1475        struct mlx4_priv *priv = mlx4_priv(dev);
1476        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1477        struct res_srq *r;
1478        int err = 0;
1479
1480        spin_lock_irq(mlx4_tlock(dev));
1481        r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1482        if (!r) {
1483                err = -ENOENT;
1484        } else if (r->com.owner != slave) {
1485                err = -EPERM;
1486        } else if (state == RES_SRQ_ALLOCATED) {
1487                if (r->com.state != RES_SRQ_HW)
1488                        err = -EINVAL;
1489                else if (atomic_read(&r->ref_count))
1490                        err = -EBUSY;
1491        } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1492                err = -EINVAL;
1493        }
1494
1495        if (!err) {
1496                r->com.from_state = r->com.state;
1497                r->com.to_state = state;
1498                r->com.state = RES_SRQ_BUSY;
1499                if (srq)
1500                        *srq = r;
1501        }
1502
1503        spin_unlock_irq(mlx4_tlock(dev));
1504
1505        return err;
1506}
1507
1508static void res_abort_move(struct mlx4_dev *dev, int slave,
1509                           enum mlx4_resource type, int id)
1510{
1511        struct mlx4_priv *priv = mlx4_priv(dev);
1512        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1513        struct res_common *r;
1514
1515        spin_lock_irq(mlx4_tlock(dev));
1516        r = res_tracker_lookup(&tracker->res_tree[type], id);
1517        if (r && (r->owner == slave))
1518                r->state = r->from_state;
1519        spin_unlock_irq(mlx4_tlock(dev));
1520}
1521
1522static void res_end_move(struct mlx4_dev *dev, int slave,
1523                         enum mlx4_resource type, int id)
1524{
1525        struct mlx4_priv *priv = mlx4_priv(dev);
1526        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1527        struct res_common *r;
1528
1529        spin_lock_irq(mlx4_tlock(dev));
1530        r = res_tracker_lookup(&tracker->res_tree[type], id);
1531        if (r && (r->owner == slave))
1532                r->state = r->to_state;
1533        spin_unlock_irq(mlx4_tlock(dev));
1534}
1535
1536static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1537{
1538        return mlx4_is_qp_reserved(dev, qpn) &&
1539                (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1540}
1541
1542static int fw_reserved(struct mlx4_dev *dev, int qpn)
1543{
1544        return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1545}
1546
1547static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1548                        u64 in_param, u64 *out_param)
1549{
1550        int err;
1551        int count;
1552        int align;
1553        int base;
1554        int qpn;
1555        u8 flags;
1556
1557        switch (op) {
1558        case RES_OP_RESERVE:
1559                count = get_param_l(&in_param) & 0xffffff;
1560                /* Turn off all unsupported QP allocation flags that the
1561                 * slave tries to set.
1562                 */
1563                flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1564                align = get_param_h(&in_param);
1565                err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1566                if (err)
1567                        return err;
1568
1569                err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1570                if (err) {
1571                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1572                        return err;
1573                }
1574
1575                err = add_res_range(dev, slave, base, count, RES_QP, 0);
1576                if (err) {
1577                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1578                        __mlx4_qp_release_range(dev, base, count);
1579                        return err;
1580                }
1581                set_param_l(out_param, base);
1582                break;
1583        case RES_OP_MAP_ICM:
1584                qpn = get_param_l(&in_param) & 0x7fffff;
1585                if (valid_reserved(dev, slave, qpn)) {
1586                        err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1587                        if (err)
1588                                return err;
1589                }
1590
1591                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1592                                           NULL, 1);
1593                if (err)
1594                        return err;
1595
1596                if (!fw_reserved(dev, qpn)) {
1597                        err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1598                        if (err) {
1599                                res_abort_move(dev, slave, RES_QP, qpn);
1600                                return err;
1601                        }
1602                }
1603
1604                res_end_move(dev, slave, RES_QP, qpn);
1605                break;
1606
1607        default:
1608                err = -EINVAL;
1609                break;
1610        }
1611        return err;
1612}
1613
1614static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1615                         u64 in_param, u64 *out_param)
1616{
1617        int err = -EINVAL;
1618        int base;
1619        int order;
1620
1621        if (op != RES_OP_RESERVE_AND_MAP)
1622                return err;
1623
1624        order = get_param_l(&in_param);
1625
1626        err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1627        if (err)
1628                return err;
1629
1630        base = __mlx4_alloc_mtt_range(dev, order);
1631        if (base == -1) {
1632                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1633                return -ENOMEM;
1634        }
1635
1636        err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1637        if (err) {
1638                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1639                __mlx4_free_mtt_range(dev, base, order);
1640        } else {
1641                set_param_l(out_param, base);
1642        }
1643
1644        return err;
1645}
1646
1647static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1648                         u64 in_param, u64 *out_param)
1649{
1650        int err = -EINVAL;
1651        int index;
1652        int id;
1653        struct res_mpt *mpt;
1654
1655        switch (op) {
1656        case RES_OP_RESERVE:
1657                err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1658                if (err)
1659                        break;
1660
1661                index = __mlx4_mpt_reserve(dev);
1662                if (index == -1) {
1663                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1664                        break;
1665                }
1666                id = index & mpt_mask(dev);
1667
1668                err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1669                if (err) {
1670                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1671                        __mlx4_mpt_release(dev, index);
1672                        break;
1673                }
1674                set_param_l(out_param, index);
1675                break;
1676        case RES_OP_MAP_ICM:
1677                index = get_param_l(&in_param);
1678                id = index & mpt_mask(dev);
1679                err = mr_res_start_move_to(dev, slave, id,
1680                                           RES_MPT_MAPPED, &mpt);
1681                if (err)
1682                        return err;
1683
1684                err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1685                if (err) {
1686                        res_abort_move(dev, slave, RES_MPT, id);
1687                        return err;
1688                }
1689
1690                res_end_move(dev, slave, RES_MPT, id);
1691                break;
1692        }
1693        return err;
1694}
1695
1696static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1697                        u64 in_param, u64 *out_param)
1698{
1699        int cqn;
1700        int err;
1701
1702        switch (op) {
1703        case RES_OP_RESERVE_AND_MAP:
1704                err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1705                if (err)
1706                        break;
1707
1708                err = __mlx4_cq_alloc_icm(dev, &cqn);
1709                if (err) {
1710                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1711                        break;
1712                }
1713
1714                err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1715                if (err) {
1716                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1717                        __mlx4_cq_free_icm(dev, cqn);
1718                        break;
1719                }
1720
1721                set_param_l(out_param, cqn);
1722                break;
1723
1724        default:
1725                err = -EINVAL;
1726        }
1727
1728        return err;
1729}
1730
1731static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1732                         u64 in_param, u64 *out_param)
1733{
1734        int srqn;
1735        int err;
1736
1737        switch (op) {
1738        case RES_OP_RESERVE_AND_MAP:
1739                err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1740                if (err)
1741                        break;
1742
1743                err = __mlx4_srq_alloc_icm(dev, &srqn);
1744                if (err) {
1745                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1746                        break;
1747                }
1748
1749                err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1750                if (err) {
1751                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1752                        __mlx4_srq_free_icm(dev, srqn);
1753                        break;
1754                }
1755
1756                set_param_l(out_param, srqn);
1757                break;
1758
1759        default:
1760                err = -EINVAL;
1761        }
1762
1763        return err;
1764}
1765
1766static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1767                                     u8 smac_index, u64 *mac)
1768{
1769        struct mlx4_priv *priv = mlx4_priv(dev);
1770        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1771        struct list_head *mac_list =
1772                &tracker->slave_list[slave].res_list[RES_MAC];
1773        struct mac_res *res, *tmp;
1774
1775        list_for_each_entry_safe(res, tmp, mac_list, list) {
1776                if (res->smac_index == smac_index && res->port == (u8) port) {
1777                        *mac = res->mac;
1778                        return 0;
1779                }
1780        }
1781        return -ENOENT;
1782}
1783
1784static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1785{
1786        struct mlx4_priv *priv = mlx4_priv(dev);
1787        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1788        struct list_head *mac_list =
1789                &tracker->slave_list[slave].res_list[RES_MAC];
1790        struct mac_res *res, *tmp;
1791
1792        list_for_each_entry_safe(res, tmp, mac_list, list) {
1793                if (res->mac == mac && res->port == (u8) port) {
1794                        /* mac found. update ref count */
1795                        ++res->ref_count;
1796                        return 0;
1797                }
1798        }
1799
1800        if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1801                return -EINVAL;
1802        res = kzalloc(sizeof *res, GFP_KERNEL);
1803        if (!res) {
1804                mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1805                return -ENOMEM;
1806        }
1807        res->mac = mac;
1808        res->port = (u8) port;
1809        res->smac_index = smac_index;
1810        res->ref_count = 1;
1811        list_add_tail(&res->list,
1812                      &tracker->slave_list[slave].res_list[RES_MAC]);
1813        return 0;
1814}
1815
1816static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1817                               int port)
1818{
1819        struct mlx4_priv *priv = mlx4_priv(dev);
1820        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1821        struct list_head *mac_list =
1822                &tracker->slave_list[slave].res_list[RES_MAC];
1823        struct mac_res *res, *tmp;
1824
1825        list_for_each_entry_safe(res, tmp, mac_list, list) {
1826                if (res->mac == mac && res->port == (u8) port) {
1827                        if (!--res->ref_count) {
1828                                list_del(&res->list);
1829                                mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1830                                kfree(res);
1831                        }
1832                        break;
1833                }
1834        }
1835}
1836
1837static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1838{
1839        struct mlx4_priv *priv = mlx4_priv(dev);
1840        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1841        struct list_head *mac_list =
1842                &tracker->slave_list[slave].res_list[RES_MAC];
1843        struct mac_res *res, *tmp;
1844        int i;
1845
1846        list_for_each_entry_safe(res, tmp, mac_list, list) {
1847                list_del(&res->list);
1848                /* dereference the mac the num times the slave referenced it */
1849                for (i = 0; i < res->ref_count; i++)
1850                        __mlx4_unregister_mac(dev, res->port, res->mac);
1851                mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1852                kfree(res);
1853        }
1854}
1855
1856static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1857                         u64 in_param, u64 *out_param, int in_port)
1858{
1859        int err = -EINVAL;
1860        int port;
1861        u64 mac;
1862        u8 smac_index;
1863
1864        if (op != RES_OP_RESERVE_AND_MAP)
1865                return err;
1866
1867        port = !in_port ? get_param_l(out_param) : in_port;
1868        port = mlx4_slave_convert_port(
1869                        dev, slave, port);
1870
1871        if (port < 0)
1872                return -EINVAL;
1873        mac = in_param;
1874
1875        err = __mlx4_register_mac(dev, port, mac);
1876        if (err >= 0) {
1877                smac_index = err;
1878                set_param_l(out_param, err);
1879                err = 0;
1880        }
1881
1882        if (!err) {
1883                err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1884                if (err)
1885                        __mlx4_unregister_mac(dev, port, mac);
1886        }
1887        return err;
1888}
1889
1890static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1891                             int port, int vlan_index)
1892{
1893        struct mlx4_priv *priv = mlx4_priv(dev);
1894        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1895        struct list_head *vlan_list =
1896                &tracker->slave_list[slave].res_list[RES_VLAN];
1897        struct vlan_res *res, *tmp;
1898
1899        list_for_each_entry_safe(res, tmp, vlan_list, list) {
1900                if (res->vlan == vlan && res->port == (u8) port) {
1901                        /* vlan found. update ref count */
1902                        ++res->ref_count;
1903                        return 0;
1904                }
1905        }
1906
1907        if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1908                return -EINVAL;
1909        res = kzalloc(sizeof(*res), GFP_KERNEL);
1910        if (!res) {
1911                mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1912                return -ENOMEM;
1913        }
1914        res->vlan = vlan;
1915        res->port = (u8) port;
1916        res->vlan_index = vlan_index;
1917        res->ref_count = 1;
1918        list_add_tail(&res->list,
1919                      &tracker->slave_list[slave].res_list[RES_VLAN]);
1920        return 0;
1921}
1922
1923
1924static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1925                                int port)
1926{
1927        struct mlx4_priv *priv = mlx4_priv(dev);
1928        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1929        struct list_head *vlan_list =
1930                &tracker->slave_list[slave].res_list[RES_VLAN];
1931        struct vlan_res *res, *tmp;
1932
1933        list_for_each_entry_safe(res, tmp, vlan_list, list) {
1934                if (res->vlan == vlan && res->port == (u8) port) {
1935                        if (!--res->ref_count) {
1936                                list_del(&res->list);
1937                                mlx4_release_resource(dev, slave, RES_VLAN,
1938                                                      1, port);
1939                                kfree(res);
1940                        }
1941                        break;
1942                }
1943        }
1944}
1945
1946static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1947{
1948        struct mlx4_priv *priv = mlx4_priv(dev);
1949        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1950        struct list_head *vlan_list =
1951                &tracker->slave_list[slave].res_list[RES_VLAN];
1952        struct vlan_res *res, *tmp;
1953        int i;
1954
1955        list_for_each_entry_safe(res, tmp, vlan_list, list) {
1956                list_del(&res->list);
1957                /* dereference the vlan the num times the slave referenced it */
1958                for (i = 0; i < res->ref_count; i++)
1959                        __mlx4_unregister_vlan(dev, res->port, res->vlan);
1960                mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1961                kfree(res);
1962        }
1963}
1964
1965static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1966                          u64 in_param, u64 *out_param, int in_port)
1967{
1968        struct mlx4_priv *priv = mlx4_priv(dev);
1969        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1970        int err;
1971        u16 vlan;
1972        int vlan_index;
1973        int port;
1974
1975        port = !in_port ? get_param_l(out_param) : in_port;
1976
1977        if (!port || op != RES_OP_RESERVE_AND_MAP)
1978                return -EINVAL;
1979
1980        port = mlx4_slave_convert_port(
1981                        dev, slave, port);
1982
1983        if (port < 0)
1984                return -EINVAL;
1985        /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1986        if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1987                slave_state[slave].old_vlan_api = true;
1988                return 0;
1989        }
1990
1991        vlan = (u16) in_param;
1992
1993        err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1994        if (!err) {
1995                set_param_l(out_param, (u32) vlan_index);
1996                err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1997                if (err)
1998                        __mlx4_unregister_vlan(dev, port, vlan);
1999        }
2000        return err;
2001}
2002
2003static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2004                             u64 in_param, u64 *out_param)
2005{
2006        u32 index;
2007        int err;
2008
2009        if (op != RES_OP_RESERVE)
2010                return -EINVAL;
2011
2012        err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2013        if (err)
2014                return err;
2015
2016        err = __mlx4_counter_alloc(dev, &index);
2017        if (err) {
2018                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2019                return err;
2020        }
2021
2022        err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2023        if (err) {
2024                __mlx4_counter_free(dev, index);
2025                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2026        } else {
2027                set_param_l(out_param, index);
2028        }
2029
2030        return err;
2031}
2032
2033static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2034                           u64 in_param, u64 *out_param)
2035{
2036        u32 xrcdn;
2037        int err;
2038
2039        if (op != RES_OP_RESERVE)
2040                return -EINVAL;
2041
2042        err = __mlx4_xrcd_alloc(dev, &xrcdn);
2043        if (err)
2044                return err;
2045
2046        err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2047        if (err)
2048                __mlx4_xrcd_free(dev, xrcdn);
2049        else
2050                set_param_l(out_param, xrcdn);
2051
2052        return err;
2053}
2054
2055int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2056                           struct mlx4_vhcr *vhcr,
2057                           struct mlx4_cmd_mailbox *inbox,
2058                           struct mlx4_cmd_mailbox *outbox,
2059                           struct mlx4_cmd_info *cmd)
2060{
2061        int err;
2062        int alop = vhcr->op_modifier;
2063
2064        switch (vhcr->in_modifier & 0xFF) {
2065        case RES_QP:
2066                err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2067                                   vhcr->in_param, &vhcr->out_param);
2068                break;
2069
2070        case RES_MTT:
2071                err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2072                                    vhcr->in_param, &vhcr->out_param);
2073                break;
2074
2075        case RES_MPT:
2076                err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2077                                    vhcr->in_param, &vhcr->out_param);
2078                break;
2079
2080        case RES_CQ:
2081                err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2082                                   vhcr->in_param, &vhcr->out_param);
2083                break;
2084
2085        case RES_SRQ:
2086                err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2087                                    vhcr->in_param, &vhcr->out_param);
2088                break;
2089
2090        case RES_MAC:
2091                err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2092                                    vhcr->in_param, &vhcr->out_param,
2093                                    (vhcr->in_modifier >> 8) & 0xFF);
2094                break;
2095
2096        case RES_VLAN:
2097                err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2098                                     vhcr->in_param, &vhcr->out_param,
2099                                     (vhcr->in_modifier >> 8) & 0xFF);
2100                break;
2101
2102        case RES_COUNTER:
2103                err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2104                                        vhcr->in_param, &vhcr->out_param);
2105                break;
2106
2107        case RES_XRCD:
2108                err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2109                                      vhcr->in_param, &vhcr->out_param);
2110                break;
2111
2112        default:
2113                err = -EINVAL;
2114                break;
2115        }
2116
2117        return err;
2118}
2119
2120static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2121                       u64 in_param)
2122{
2123        int err;
2124        int count;
2125        int base;
2126        int qpn;
2127
2128        switch (op) {
2129        case RES_OP_RESERVE:
2130                base = get_param_l(&in_param) & 0x7fffff;
2131                count = get_param_h(&in_param);
2132                err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2133                if (err)
2134                        break;
2135                mlx4_release_resource(dev, slave, RES_QP, count, 0);
2136                __mlx4_qp_release_range(dev, base, count);
2137                break;
2138        case RES_OP_MAP_ICM:
2139                qpn = get_param_l(&in_param) & 0x7fffff;
2140                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2141                                           NULL, 0);
2142                if (err)
2143                        return err;
2144
2145                if (!fw_reserved(dev, qpn))
2146                        __mlx4_qp_free_icm(dev, qpn);
2147
2148                res_end_move(dev, slave, RES_QP, qpn);
2149
2150                if (valid_reserved(dev, slave, qpn))
2151                        err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2152                break;
2153        default:
2154                err = -EINVAL;
2155                break;
2156        }
2157        return err;
2158}
2159
2160static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2161                        u64 in_param, u64 *out_param)
2162{
2163        int err = -EINVAL;
2164        int base;
2165        int order;
2166
2167        if (op != RES_OP_RESERVE_AND_MAP)
2168                return err;
2169
2170        base = get_param_l(&in_param);
2171        order = get_param_h(&in_param);
2172        err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2173        if (!err) {
2174                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2175                __mlx4_free_mtt_range(dev, base, order);
2176        }
2177        return err;
2178}
2179
2180static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2181                        u64 in_param)
2182{
2183        int err = -EINVAL;
2184        int index;
2185        int id;
2186        struct res_mpt *mpt;
2187
2188        switch (op) {
2189        case RES_OP_RESERVE:
2190                index = get_param_l(&in_param);
2191                id = index & mpt_mask(dev);
2192                err = get_res(dev, slave, id, RES_MPT, &mpt);
2193                if (err)
2194                        break;
2195                index = mpt->key;
2196                put_res(dev, slave, id, RES_MPT);
2197
2198                err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2199                if (err)
2200                        break;
2201                mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2202                __mlx4_mpt_release(dev, index);
2203                break;
2204        case RES_OP_MAP_ICM:
2205                        index = get_param_l(&in_param);
2206                        id = index & mpt_mask(dev);
2207                        err = mr_res_start_move_to(dev, slave, id,
2208                                                   RES_MPT_RESERVED, &mpt);
2209                        if (err)
2210                                return err;
2211
2212                        __mlx4_mpt_free_icm(dev, mpt->key);
2213                        res_end_move(dev, slave, RES_MPT, id);
2214                        return err;
2215                break;
2216        default:
2217                err = -EINVAL;
2218                break;
2219        }
2220        return err;
2221}
2222
2223static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2224                       u64 in_param, u64 *out_param)
2225{
2226        int cqn;
2227        int err;
2228
2229        switch (op) {
2230        case RES_OP_RESERVE_AND_MAP:
2231                cqn = get_param_l(&in_param);
2232                err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2233                if (err)
2234                        break;
2235
2236                mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2237                __mlx4_cq_free_icm(dev, cqn);
2238                break;
2239
2240        default:
2241                err = -EINVAL;
2242                break;
2243        }
2244
2245        return err;
2246}
2247
2248static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2249                        u64 in_param, u64 *out_param)
2250{
2251        int srqn;
2252        int err;
2253
2254        switch (op) {
2255        case RES_OP_RESERVE_AND_MAP:
2256                srqn = get_param_l(&in_param);
2257                err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2258                if (err)
2259                        break;
2260
2261                mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2262                __mlx4_srq_free_icm(dev, srqn);
2263                break;
2264
2265        default:
2266                err = -EINVAL;
2267                break;
2268        }
2269
2270        return err;
2271}
2272
2273static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2274                            u64 in_param, u64 *out_param, int in_port)
2275{
2276        int port;
2277        int err = 0;
2278
2279        switch (op) {
2280        case RES_OP_RESERVE_AND_MAP:
2281                port = !in_port ? get_param_l(out_param) : in_port;
2282                port = mlx4_slave_convert_port(
2283                                dev, slave, port);
2284
2285                if (port < 0)
2286                        return -EINVAL;
2287                mac_del_from_slave(dev, slave, in_param, port);
2288                __mlx4_unregister_mac(dev, port, in_param);
2289                break;
2290        default:
2291                err = -EINVAL;
2292                break;
2293        }
2294
2295        return err;
2296
2297}
2298
2299static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2300                            u64 in_param, u64 *out_param, int port)
2301{
2302        struct mlx4_priv *priv = mlx4_priv(dev);
2303        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2304        int err = 0;
2305
2306        port = mlx4_slave_convert_port(
2307                        dev, slave, port);
2308
2309        if (port < 0)
2310                return -EINVAL;
2311        switch (op) {
2312        case RES_OP_RESERVE_AND_MAP:
2313                if (slave_state[slave].old_vlan_api)
2314                        return 0;
2315                if (!port)
2316                        return -EINVAL;
2317                vlan_del_from_slave(dev, slave, in_param, port);
2318                __mlx4_unregister_vlan(dev, port, in_param);
2319                break;
2320        default:
2321                err = -EINVAL;
2322                break;
2323        }
2324
2325        return err;
2326}
2327
2328static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2329                            u64 in_param, u64 *out_param)
2330{
2331        int index;
2332        int err;
2333
2334        if (op != RES_OP_RESERVE)
2335                return -EINVAL;
2336
2337        index = get_param_l(&in_param);
2338        err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2339        if (err)
2340                return err;
2341
2342        __mlx4_counter_free(dev, index);
2343        mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2344
2345        return err;
2346}
2347
2348static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2349                          u64 in_param, u64 *out_param)
2350{
2351        int xrcdn;
2352        int err;
2353
2354        if (op != RES_OP_RESERVE)
2355                return -EINVAL;
2356
2357        xrcdn = get_param_l(&in_param);
2358        err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2359        if (err)
2360                return err;
2361
2362        __mlx4_xrcd_free(dev, xrcdn);
2363
2364        return err;
2365}
2366
2367int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2368                          struct mlx4_vhcr *vhcr,
2369                          struct mlx4_cmd_mailbox *inbox,
2370                          struct mlx4_cmd_mailbox *outbox,
2371                          struct mlx4_cmd_info *cmd)
2372{
2373        int err = -EINVAL;
2374        int alop = vhcr->op_modifier;
2375
2376        switch (vhcr->in_modifier & 0xFF) {
2377        case RES_QP:
2378                err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2379                                  vhcr->in_param);
2380                break;
2381
2382        case RES_MTT:
2383                err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2384                                   vhcr->in_param, &vhcr->out_param);
2385                break;
2386
2387        case RES_MPT:
2388                err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2389                                   vhcr->in_param);
2390                break;
2391
2392        case RES_CQ:
2393                err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2394                                  vhcr->in_param, &vhcr->out_param);
2395                break;
2396
2397        case RES_SRQ:
2398                err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2399                                   vhcr->in_param, &vhcr->out_param);
2400                break;
2401
2402        case RES_MAC:
2403                err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2404                                   vhcr->in_param, &vhcr->out_param,
2405                                   (vhcr->in_modifier >> 8) & 0xFF);
2406                break;
2407
2408        case RES_VLAN:
2409                err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2410                                    vhcr->in_param, &vhcr->out_param,
2411                                    (vhcr->in_modifier >> 8) & 0xFF);
2412                break;
2413
2414        case RES_COUNTER:
2415                err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2416                                       vhcr->in_param, &vhcr->out_param);
2417                break;
2418
2419        case RES_XRCD:
2420                err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2421                                     vhcr->in_param, &vhcr->out_param);
2422
2423        default:
2424                break;
2425        }
2426        return err;
2427}
2428
2429/* ugly but other choices are uglier */
2430static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2431{
2432        return (be32_to_cpu(mpt->flags) >> 9) & 1;
2433}
2434
2435static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2436{
2437        return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2438}
2439
2440static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2441{
2442        return be32_to_cpu(mpt->mtt_sz);
2443}
2444
2445static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2446{
2447        return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2448}
2449
2450static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2451{
2452        return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2453}
2454
2455static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2456{
2457        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2458}
2459
2460static int mr_is_region(struct mlx4_mpt_entry *mpt)
2461{
2462        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2463}
2464
2465static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2466{
2467        return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2468}
2469
2470static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2471{
2472        return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2473}
2474
2475static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2476{
2477        int page_shift = (qpc->log_page_size & 0x3f) + 12;
2478        int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2479        int log_sq_sride = qpc->sq_size_stride & 7;
2480        int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2481        int log_rq_stride = qpc->rq_size_stride & 7;
2482        int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2483        int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2484        u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2485        int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2486        int sq_size;
2487        int rq_size;
2488        int total_pages;
2489        int total_mem;
2490        int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2491
2492        sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2493        rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2494        total_mem = sq_size + rq_size;
2495        total_pages =
2496                roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2497                                   page_shift);
2498
2499        return total_pages;
2500}
2501
2502static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2503                           int size, struct res_mtt *mtt)
2504{
2505        int res_start = mtt->com.res_id;
2506        int res_size = (1 << mtt->order);
2507
2508        if (start < res_start || start + size > res_start + res_size)
2509                return -EPERM;
2510        return 0;
2511}
2512
2513int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2514                           struct mlx4_vhcr *vhcr,
2515                           struct mlx4_cmd_mailbox *inbox,
2516                           struct mlx4_cmd_mailbox *outbox,
2517                           struct mlx4_cmd_info *cmd)
2518{
2519        int err;
2520        int index = vhcr->in_modifier;
2521        struct res_mtt *mtt;
2522        struct res_mpt *mpt;
2523        int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2524        int phys;
2525        int id;
2526        u32 pd;
2527        int pd_slave;
2528
2529        id = index & mpt_mask(dev);
2530        err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2531        if (err)
2532                return err;
2533
2534        /* Disable memory windows for VFs. */
2535        if (!mr_is_region(inbox->buf)) {
2536                err = -EPERM;
2537                goto ex_abort;
2538        }
2539
2540        /* Make sure that the PD bits related to the slave id are zeros. */
2541        pd = mr_get_pd(inbox->buf);
2542        pd_slave = (pd >> 17) & 0x7f;
2543        if (pd_slave != 0 && --pd_slave != slave) {
2544                err = -EPERM;
2545                goto ex_abort;
2546        }
2547
2548        if (mr_is_fmr(inbox->buf)) {
2549                /* FMR and Bind Enable are forbidden in slave devices. */
2550                if (mr_is_bind_enabled(inbox->buf)) {
2551                        err = -EPERM;
2552                        goto ex_abort;
2553                }
2554                /* FMR and Memory Windows are also forbidden. */
2555                if (!mr_is_region(inbox->buf)) {
2556                        err = -EPERM;
2557                        goto ex_abort;
2558                }
2559        }
2560
2561        phys = mr_phys_mpt(inbox->buf);
2562        if (!phys) {
2563                err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2564                if (err)
2565                        goto ex_abort;
2566
2567                err = check_mtt_range(dev, slave, mtt_base,
2568                                      mr_get_mtt_size(inbox->buf), mtt);
2569                if (err)
2570                        goto ex_put;
2571
2572                mpt->mtt = mtt;
2573        }
2574
2575        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2576        if (err)
2577                goto ex_put;
2578
2579        if (!phys) {
2580                atomic_inc(&mtt->ref_count);
2581                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2582        }
2583
2584        res_end_move(dev, slave, RES_MPT, id);
2585        return 0;
2586
2587ex_put:
2588        if (!phys)
2589                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2590ex_abort:
2591        res_abort_move(dev, slave, RES_MPT, id);
2592
2593        return err;
2594}
2595
2596int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2597                           struct mlx4_vhcr *vhcr,
2598                           struct mlx4_cmd_mailbox *inbox,
2599                           struct mlx4_cmd_mailbox *outbox,
2600                           struct mlx4_cmd_info *cmd)
2601{
2602        int err;
2603        int index = vhcr->in_modifier;
2604        struct res_mpt *mpt;
2605        int id;
2606
2607        id = index & mpt_mask(dev);
2608        err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2609        if (err)
2610                return err;
2611
2612        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2613        if (err)
2614                goto ex_abort;
2615
2616        if (mpt->mtt)
2617                atomic_dec(&mpt->mtt->ref_count);
2618
2619        res_end_move(dev, slave, RES_MPT, id);
2620        return 0;
2621
2622ex_abort:
2623        res_abort_move(dev, slave, RES_MPT, id);
2624
2625        return err;
2626}
2627
2628int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2629                           struct mlx4_vhcr *vhcr,
2630                           struct mlx4_cmd_mailbox *inbox,
2631                           struct mlx4_cmd_mailbox *outbox,
2632                           struct mlx4_cmd_info *cmd)
2633{
2634        int err;
2635        int index = vhcr->in_modifier;
2636        struct res_mpt *mpt;
2637        int id;
2638
2639        id = index & mpt_mask(dev);
2640        err = get_res(dev, slave, id, RES_MPT, &mpt);
2641        if (err)
2642                return err;
2643
2644        if (mpt->com.from_state == RES_MPT_MAPPED) {
2645                /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2646                 * that, the VF must read the MPT. But since the MPT entry memory is not
2647                 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2648                 * entry contents. To guarantee that the MPT cannot be changed, the driver
2649                 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2650                 * ownership fofollowing the change. The change here allows the VF to
2651                 * perform QUERY_MPT also when the entry is in SW ownership.
2652                 */
2653                struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2654                                        &mlx4_priv(dev)->mr_table.dmpt_table,
2655                                        mpt->key, NULL);
2656
2657                if (NULL == mpt_entry || NULL == outbox->buf) {
2658                        err = -EINVAL;
2659                        goto out;
2660                }
2661
2662                memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2663
2664                err = 0;
2665        } else if (mpt->com.from_state == RES_MPT_HW) {
2666                err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2667        } else {
2668                err = -EBUSY;
2669                goto out;
2670        }
2671
2672
2673out:
2674        put_res(dev, slave, id, RES_MPT);
2675        return err;
2676}
2677
2678static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2679{
2680        return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2681}
2682
2683static int qp_get_scqn(struct mlx4_qp_context *qpc)
2684{
2685        return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2686}
2687
2688static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2689{
2690        return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2691}
2692
2693static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2694                                  struct mlx4_qp_context *context)
2695{
2696        u32 qpn = vhcr->in_modifier & 0xffffff;
2697        u32 qkey = 0;
2698
2699        if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2700                return;
2701
2702        /* adjust qkey in qp context */
2703        context->qkey = cpu_to_be32(qkey);
2704}
2705
2706int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2707                             struct mlx4_vhcr *vhcr,
2708                             struct mlx4_cmd_mailbox *inbox,
2709                             struct mlx4_cmd_mailbox *outbox,
2710                             struct mlx4_cmd_info *cmd)
2711{
2712        int err;
2713        int qpn = vhcr->in_modifier & 0x7fffff;
2714        struct res_mtt *mtt;
2715        struct res_qp *qp;
2716        struct mlx4_qp_context *qpc = inbox->buf + 8;
2717        int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2718        int mtt_size = qp_get_mtt_size(qpc);
2719        struct res_cq *rcq;
2720        struct res_cq *scq;
2721        int rcqn = qp_get_rcqn(qpc);
2722        int scqn = qp_get_scqn(qpc);
2723        u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2724        int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2725        struct res_srq *srq;
2726        int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2727
2728        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2729        if (err)
2730                return err;
2731        qp->local_qpn = local_qpn;
2732        qp->sched_queue = 0;
2733        qp->param3 = 0;
2734        qp->vlan_control = 0;
2735        qp->fvl_rx = 0;
2736        qp->pri_path_fl = 0;
2737        qp->vlan_index = 0;
2738        qp->feup = 0;
2739        qp->qpc_flags = be32_to_cpu(qpc->flags);
2740
2741        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2742        if (err)
2743                goto ex_abort;
2744
2745        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2746        if (err)
2747                goto ex_put_mtt;
2748
2749        err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2750        if (err)
2751                goto ex_put_mtt;
2752
2753        if (scqn != rcqn) {
2754                err = get_res(dev, slave, scqn, RES_CQ, &scq);
2755                if (err)
2756                        goto ex_put_rcq;
2757        } else
2758                scq = rcq;
2759
2760        if (use_srq) {
2761                err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2762                if (err)
2763                        goto ex_put_scq;
2764        }
2765
2766        adjust_proxy_tun_qkey(dev, vhcr, qpc);
2767        update_pkey_index(dev, slave, inbox);
2768        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2769        if (err)
2770                goto ex_put_srq;
2771        atomic_inc(&mtt->ref_count);
2772        qp->mtt = mtt;
2773        atomic_inc(&rcq->ref_count);
2774        qp->rcq = rcq;
2775        atomic_inc(&scq->ref_count);
2776        qp->scq = scq;
2777
2778        if (scqn != rcqn)
2779                put_res(dev, slave, scqn, RES_CQ);
2780
2781        if (use_srq) {
2782                atomic_inc(&srq->ref_count);
2783                put_res(dev, slave, srqn, RES_SRQ);
2784                qp->srq = srq;
2785        }
2786        put_res(dev, slave, rcqn, RES_CQ);
2787        put_res(dev, slave, mtt_base, RES_MTT);
2788        res_end_move(dev, slave, RES_QP, qpn);
2789
2790        return 0;
2791
2792ex_put_srq:
2793        if (use_srq)
2794                put_res(dev, slave, srqn, RES_SRQ);
2795ex_put_scq:
2796        if (scqn != rcqn)
2797                put_res(dev, slave, scqn, RES_CQ);
2798ex_put_rcq:
2799        put_res(dev, slave, rcqn, RES_CQ);
2800ex_put_mtt:
2801        put_res(dev, slave, mtt_base, RES_MTT);
2802ex_abort:
2803        res_abort_move(dev, slave, RES_QP, qpn);
2804
2805        return err;
2806}
2807
2808static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2809{
2810        return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2811}
2812
2813static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2814{
2815        int log_eq_size = eqc->log_eq_size & 0x1f;
2816        int page_shift = (eqc->log_page_size & 0x3f) + 12;
2817
2818        if (log_eq_size + 5 < page_shift)
2819                return 1;
2820
2821        return 1 << (log_eq_size + 5 - page_shift);
2822}
2823
2824static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2825{
2826        return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2827}
2828
2829static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2830{
2831        int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2832        int page_shift = (cqc->log_page_size & 0x3f) + 12;
2833
2834        if (log_cq_size + 5 < page_shift)
2835                return 1;
2836
2837        return 1 << (log_cq_size + 5 - page_shift);
2838}
2839
2840int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2841                          struct mlx4_vhcr *vhcr,
2842                          struct mlx4_cmd_mailbox *inbox,
2843                          struct mlx4_cmd_mailbox *outbox,
2844                          struct mlx4_cmd_info *cmd)
2845{
2846        int err;
2847        int eqn = vhcr->in_modifier;
2848        int res_id = (slave << 10) | eqn;
2849        struct mlx4_eq_context *eqc = inbox->buf;
2850        int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2851        int mtt_size = eq_get_mtt_size(eqc);
2852        struct res_eq *eq;
2853        struct res_mtt *mtt;
2854
2855        err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2856        if (err)
2857                return err;
2858        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2859        if (err)
2860                goto out_add;
2861
2862        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2863        if (err)
2864                goto out_move;
2865
2866        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2867        if (err)
2868                goto out_put;
2869
2870        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2871        if (err)
2872                goto out_put;
2873
2874        atomic_inc(&mtt->ref_count);
2875        eq->mtt = mtt;
2876        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2877        res_end_move(dev, slave, RES_EQ, res_id);
2878        return 0;
2879
2880out_put:
2881        put_res(dev, slave, mtt->com.res_id, RES_MTT);
2882out_move:
2883        res_abort_move(dev, slave, RES_EQ, res_id);
2884out_add:
2885        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2886        return err;
2887}
2888
2889int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
2890                            struct mlx4_vhcr *vhcr,
2891                            struct mlx4_cmd_mailbox *inbox,
2892                            struct mlx4_cmd_mailbox *outbox,
2893                            struct mlx4_cmd_info *cmd)
2894{
2895        int err;
2896        u8 get = vhcr->op_modifier;
2897
2898        if (get != 1)
2899                return -EPERM;
2900
2901        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2902
2903        return err;
2904}
2905
2906static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2907                              int len, struct res_mtt **res)
2908{
2909        struct mlx4_priv *priv = mlx4_priv(dev);
2910        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2911        struct res_mtt *mtt;
2912        int err = -EINVAL;
2913
2914        spin_lock_irq(mlx4_tlock(dev));
2915        list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2916                            com.list) {
2917                if (!check_mtt_range(dev, slave, start, len, mtt)) {
2918                        *res = mtt;
2919                        mtt->com.from_state = mtt->com.state;
2920                        mtt->com.state = RES_MTT_BUSY;
2921                        err = 0;
2922                        break;
2923                }
2924        }
2925        spin_unlock_irq(mlx4_tlock(dev));
2926
2927        return err;
2928}
2929
2930static int verify_qp_parameters(struct mlx4_dev *dev,
2931                                struct mlx4_vhcr *vhcr,
2932                                struct mlx4_cmd_mailbox *inbox,
2933                                enum qp_transition transition, u8 slave)
2934{
2935        u32                     qp_type;
2936        u32                     qpn;
2937        struct mlx4_qp_context  *qp_ctx;
2938        enum mlx4_qp_optpar     optpar;
2939        int port;
2940        int num_gids;
2941
2942        qp_ctx  = inbox->buf + 8;
2943        qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2944        optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2945
2946        if (slave != mlx4_master_func_num(dev)) {
2947                qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
2948                /* setting QP rate-limit is disallowed for VFs */
2949                if (qp_ctx->rate_limit_params)
2950                        return -EPERM;
2951        }
2952
2953        switch (qp_type) {
2954        case MLX4_QP_ST_RC:
2955        case MLX4_QP_ST_XRC:
2956        case MLX4_QP_ST_UC:
2957                switch (transition) {
2958                case QP_TRANS_INIT2RTR:
2959                case QP_TRANS_RTR2RTS:
2960                case QP_TRANS_RTS2RTS:
2961                case QP_TRANS_SQD2SQD:
2962                case QP_TRANS_SQD2RTS:
2963                        if (slave != mlx4_master_func_num(dev))
2964                                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2965                                        port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2966                                        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2967                                                num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2968                                        else
2969                                                num_gids = 1;
2970                                        if (qp_ctx->pri_path.mgid_index >= num_gids)
2971                                                return -EINVAL;
2972                                }
2973                                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2974                                        port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2975                                        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2976                                                num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2977                                        else
2978                                                num_gids = 1;
2979                                        if (qp_ctx->alt_path.mgid_index >= num_gids)
2980                                                return -EINVAL;
2981                                }
2982                        break;
2983                default:
2984                        break;
2985                }
2986                break;
2987
2988        case MLX4_QP_ST_MLX:
2989                qpn = vhcr->in_modifier & 0x7fffff;
2990                port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2991                if (transition == QP_TRANS_INIT2RTR &&
2992                    slave != mlx4_master_func_num(dev) &&
2993                    mlx4_is_qp_reserved(dev, qpn) &&
2994                    !mlx4_vf_smi_enabled(dev, slave, port)) {
2995                        /* only enabled VFs may create MLX proxy QPs */
2996                        mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
2997                                 __func__, slave, port);
2998                        return -EPERM;
2999                }
3000                break;
3001
3002        default:
3003                break;
3004        }
3005
3006        return 0;
3007}
3008
3009int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3010                           struct mlx4_vhcr *vhcr,
3011                           struct mlx4_cmd_mailbox *inbox,
3012                           struct mlx4_cmd_mailbox *outbox,
3013                           struct mlx4_cmd_info *cmd)
3014{
3015        struct mlx4_mtt mtt;
3016        __be64 *page_list = inbox->buf;
3017        u64 *pg_list = (u64 *)page_list;
3018        int i;
3019        struct res_mtt *rmtt = NULL;
3020        int start = be64_to_cpu(page_list[0]);
3021        int npages = vhcr->in_modifier;
3022        int err;
3023
3024        err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3025        if (err)
3026                return err;
3027
3028        /* Call the SW implementation of write_mtt:
3029         * - Prepare a dummy mtt struct
3030         * - Translate inbox contents to simple addresses in host endianness */
3031        mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3032                            we don't really use it */
3033        mtt.order = 0;
3034        mtt.page_shift = 0;
3035        for (i = 0; i < npages; ++i)
3036                pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3037
3038        err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3039                               ((u64 *)page_list + 2));
3040
3041        if (rmtt)
3042                put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3043
3044        return err;
3045}
3046
3047int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3048                          struct mlx4_vhcr *vhcr,
3049                          struct mlx4_cmd_mailbox *inbox,
3050                          struct mlx4_cmd_mailbox *outbox,
3051                          struct mlx4_cmd_info *cmd)
3052{
3053        int eqn = vhcr->in_modifier;
3054        int res_id = eqn | (slave << 10);
3055        struct res_eq *eq;
3056        int err;
3057
3058        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3059        if (err)
3060                return err;
3061
3062        err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3063        if (err)
3064                goto ex_abort;
3065
3066        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3067        if (err)
3068                goto ex_put;
3069
3070        atomic_dec(&eq->mtt->ref_count);
3071        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3072        res_end_move(dev, slave, RES_EQ, res_id);
3073        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3074
3075        return 0;
3076
3077ex_put:
3078        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3079ex_abort:
3080        res_abort_move(dev, slave, RES_EQ, res_id);
3081
3082        return err;
3083}
3084
3085int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3086{
3087        struct mlx4_priv *priv = mlx4_priv(dev);
3088        struct mlx4_slave_event_eq_info *event_eq;
3089        struct mlx4_cmd_mailbox *mailbox;
3090        u32 in_modifier = 0;
3091        int err;
3092        int res_id;
3093        struct res_eq *req;
3094
3095        if (!priv->mfunc.master.slave_state)
3096                return -EINVAL;
3097
3098        /* check for slave valid, slave not PF, and slave active */
3099        if (slave < 0 || slave > dev->persist->num_vfs ||
3100            slave == dev->caps.function ||
3101            !priv->mfunc.master.slave_state[slave].active)
3102                return 0;
3103
3104        event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3105
3106        /* Create the event only if the slave is registered */
3107        if (event_eq->eqn < 0)
3108                return 0;
3109
3110        mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3111        res_id = (slave << 10) | event_eq->eqn;
3112        err = get_res(dev, slave, res_id, RES_EQ, &req);
3113        if (err)
3114                goto unlock;
3115
3116        if (req->com.from_state != RES_EQ_HW) {
3117                err = -EINVAL;
3118                goto put;
3119        }
3120
3121        mailbox = mlx4_alloc_cmd_mailbox(dev);
3122        if (IS_ERR(mailbox)) {
3123                err = PTR_ERR(mailbox);
3124                goto put;
3125        }
3126
3127        if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3128                ++event_eq->token;
3129                eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3130        }
3131
3132        memcpy(mailbox->buf, (u8 *) eqe, 28);
3133
3134        in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3135
3136        err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3137                       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3138                       MLX4_CMD_NATIVE);
3139
3140        put_res(dev, slave, res_id, RES_EQ);
3141        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3142        mlx4_free_cmd_mailbox(dev, mailbox);
3143        return err;
3144
3145put:
3146        put_res(dev, slave, res_id, RES_EQ);
3147
3148unlock:
3149        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3150        return err;
3151}
3152
3153int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3154                          struct mlx4_vhcr *vhcr,
3155                          struct mlx4_cmd_mailbox *inbox,
3156                          struct mlx4_cmd_mailbox *outbox,
3157                          struct mlx4_cmd_info *cmd)
3158{
3159        int eqn = vhcr->in_modifier;
3160        int res_id = eqn | (slave << 10);
3161        struct res_eq *eq;
3162        int err;
3163
3164        err = get_res(dev, slave, res_id, RES_EQ, &eq);
3165        if (err)
3166                return err;
3167
3168        if (eq->com.from_state != RES_EQ_HW) {
3169                err = -EINVAL;
3170                goto ex_put;
3171        }
3172
3173        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3174
3175ex_put:
3176        put_res(dev, slave, res_id, RES_EQ);
3177        return err;
3178}
3179
3180int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3181                          struct mlx4_vhcr *vhcr,
3182                          struct mlx4_cmd_mailbox *inbox,
3183                          struct mlx4_cmd_mailbox *outbox,
3184                          struct mlx4_cmd_info *cmd)
3185{
3186        int err;
3187        int cqn = vhcr->in_modifier;
3188        struct mlx4_cq_context *cqc = inbox->buf;
3189        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3190        struct res_cq *cq = NULL;
3191        struct res_mtt *mtt;
3192
3193        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3194        if (err)
3195                return err;
3196        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3197        if (err)
3198                goto out_move;
3199        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3200        if (err)
3201                goto out_put;
3202        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3203        if (err)
3204                goto out_put;
3205        atomic_inc(&mtt->ref_count);
3206        cq->mtt = mtt;
3207        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3208        res_end_move(dev, slave, RES_CQ, cqn);
3209        return 0;
3210
3211out_put:
3212        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3213out_move:
3214        res_abort_move(dev, slave, RES_CQ, cqn);
3215        return err;
3216}
3217
3218int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3219                          struct mlx4_vhcr *vhcr,
3220                          struct mlx4_cmd_mailbox *inbox,
3221                          struct mlx4_cmd_mailbox *outbox,
3222                          struct mlx4_cmd_info *cmd)
3223{
3224        int err;
3225        int cqn = vhcr->in_modifier;
3226        struct res_cq *cq = NULL;
3227
3228        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3229        if (err)
3230                return err;
3231        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3232        if (err)
3233                goto out_move;
3234        atomic_dec(&cq->mtt->ref_count);
3235        res_end_move(dev, slave, RES_CQ, cqn);
3236        return 0;
3237
3238out_move:
3239        res_abort_move(dev, slave, RES_CQ, cqn);
3240        return err;
3241}
3242
3243int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3244                          struct mlx4_vhcr *vhcr,
3245                          struct mlx4_cmd_mailbox *inbox,
3246                          struct mlx4_cmd_mailbox *outbox,
3247                          struct mlx4_cmd_info *cmd)
3248{
3249        int cqn = vhcr->in_modifier;
3250        struct res_cq *cq;
3251        int err;
3252
3253        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3254        if (err)
3255                return err;
3256
3257        if (cq->com.from_state != RES_CQ_HW)
3258                goto ex_put;
3259
3260        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3261ex_put:
3262        put_res(dev, slave, cqn, RES_CQ);
3263
3264        return err;
3265}
3266
3267static int handle_resize(struct mlx4_dev *dev, int slave,
3268                         struct mlx4_vhcr *vhcr,
3269                         struct mlx4_cmd_mailbox *inbox,
3270                         struct mlx4_cmd_mailbox *outbox,
3271                         struct mlx4_cmd_info *cmd,
3272                         struct res_cq *cq)
3273{
3274        int err;
3275        struct res_mtt *orig_mtt;
3276        struct res_mtt *mtt;
3277        struct mlx4_cq_context *cqc = inbox->buf;
3278        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3279
3280        err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3281        if (err)
3282                return err;
3283
3284        if (orig_mtt != cq->mtt) {
3285                err = -EINVAL;
3286                goto ex_put;
3287        }
3288
3289        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3290        if (err)
3291                goto ex_put;
3292
3293        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3294        if (err)
3295                goto ex_put1;
3296        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3297        if (err)
3298                goto ex_put1;
3299        atomic_dec(&orig_mtt->ref_count);
3300        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3301        atomic_inc(&mtt->ref_count);
3302        cq->mtt = mtt;
3303        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3304        return 0;
3305
3306ex_put1:
3307        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3308ex_put:
3309        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3310
3311        return err;
3312
3313}
3314
3315int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3316                           struct mlx4_vhcr *vhcr,
3317                           struct mlx4_cmd_mailbox *inbox,
3318                           struct mlx4_cmd_mailbox *outbox,
3319                           struct mlx4_cmd_info *cmd)
3320{
3321        int cqn = vhcr->in_modifier;
3322        struct res_cq *cq;
3323        int err;
3324
3325        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3326        if (err)
3327                return err;
3328
3329        if (cq->com.from_state != RES_CQ_HW)
3330                goto ex_put;
3331
3332        if (vhcr->op_modifier == 0) {
3333                err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3334                goto ex_put;
3335        }
3336
3337        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3338ex_put:
3339        put_res(dev, slave, cqn, RES_CQ);
3340
3341        return err;
3342}
3343
3344static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3345{
3346        int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3347        int log_rq_stride = srqc->logstride & 7;
3348        int page_shift = (srqc->log_page_size & 0x3f) + 12;
3349
3350        if (log_srq_size + log_rq_stride + 4 < page_shift)
3351                return 1;
3352
3353        return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3354}
3355
3356int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3357                           struct mlx4_vhcr *vhcr,
3358                           struct mlx4_cmd_mailbox *inbox,
3359                           struct mlx4_cmd_mailbox *outbox,
3360                           struct mlx4_cmd_info *cmd)
3361{
3362        int err;
3363        int srqn = vhcr->in_modifier;
3364        struct res_mtt *mtt;
3365        struct res_srq *srq = NULL;
3366        struct mlx4_srq_context *srqc = inbox->buf;
3367        int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3368
3369        if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3370                return -EINVAL;
3371
3372        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3373        if (err)
3374                return err;
3375        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3376        if (err)
3377                goto ex_abort;
3378        err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3379                              mtt);
3380        if (err)
3381                goto ex_put_mtt;
3382
3383        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3384        if (err)
3385                goto ex_put_mtt;
3386
3387        atomic_inc(&mtt->ref_count);
3388        srq->mtt = mtt;
3389        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3390        res_end_move(dev, slave, RES_SRQ, srqn);
3391        return 0;
3392
3393ex_put_mtt:
3394        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3395ex_abort:
3396        res_abort_move(dev, slave, RES_SRQ, srqn);
3397
3398        return err;
3399}
3400
3401int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3402                           struct mlx4_vhcr *vhcr,
3403                           struct mlx4_cmd_mailbox *inbox,
3404                           struct mlx4_cmd_mailbox *outbox,
3405                           struct mlx4_cmd_info *cmd)
3406{
3407        int err;
3408        int srqn = vhcr->in_modifier;
3409        struct res_srq *srq = NULL;
3410
3411        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3412        if (err)
3413                return err;
3414        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3415        if (err)
3416                goto ex_abort;
3417        atomic_dec(&srq->mtt->ref_count);
3418        if (srq->cq)
3419                atomic_dec(&srq->cq->ref_count);
3420        res_end_move(dev, slave, RES_SRQ, srqn);
3421
3422        return 0;
3423
3424ex_abort:
3425        res_abort_move(dev, slave, RES_SRQ, srqn);
3426
3427        return err;
3428}
3429
3430int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3431                           struct mlx4_vhcr *vhcr,
3432                           struct mlx4_cmd_mailbox *inbox,
3433                           struct mlx4_cmd_mailbox *outbox,
3434                           struct mlx4_cmd_info *cmd)
3435{
3436        int err;
3437        int srqn = vhcr->in_modifier;
3438        struct res_srq *srq;
3439
3440        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3441        if (err)
3442                return err;
3443        if (srq->com.from_state != RES_SRQ_HW) {
3444                err = -EBUSY;
3445                goto out;
3446        }
3447        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3448out:
3449        put_res(dev, slave, srqn, RES_SRQ);
3450        return err;
3451}
3452
3453int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3454                         struct mlx4_vhcr *vhcr,
3455                         struct mlx4_cmd_mailbox *inbox,
3456                         struct mlx4_cmd_mailbox *outbox,
3457                         struct mlx4_cmd_info *cmd)
3458{
3459        int err;
3460        int srqn = vhcr->in_modifier;
3461        struct res_srq *srq;
3462
3463        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3464        if (err)
3465                return err;
3466
3467        if (srq->com.from_state != RES_SRQ_HW) {
3468                err = -EBUSY;
3469                goto out;
3470        }
3471
3472        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3473out:
3474        put_res(dev, slave, srqn, RES_SRQ);
3475        return err;
3476}
3477
3478int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3479                        struct mlx4_vhcr *vhcr,
3480                        struct mlx4_cmd_mailbox *inbox,
3481                        struct mlx4_cmd_mailbox *outbox,
3482                        struct mlx4_cmd_info *cmd)
3483{
3484        int err;
3485        int qpn = vhcr->in_modifier & 0x7fffff;
3486        struct res_qp *qp;
3487
3488        err = get_res(dev, slave, qpn, RES_QP, &qp);
3489        if (err)
3490                return err;
3491        if (qp->com.from_state != RES_QP_HW) {
3492                err = -EBUSY;
3493                goto out;
3494        }
3495
3496        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3497out:
3498        put_res(dev, slave, qpn, RES_QP);
3499        return err;
3500}
3501
3502int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3503                              struct mlx4_vhcr *vhcr,
3504                              struct mlx4_cmd_mailbox *inbox,
3505                              struct mlx4_cmd_mailbox *outbox,
3506                              struct mlx4_cmd_info *cmd)
3507{
3508        struct mlx4_qp_context *context = inbox->buf + 8;
3509        adjust_proxy_tun_qkey(dev, vhcr, context);
3510        update_pkey_index(dev, slave, inbox);
3511        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3512}
3513
3514static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3515                                  struct mlx4_qp_context *qpc,
3516                                  struct mlx4_cmd_mailbox *inbox)
3517{
3518        enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3519        u8 pri_sched_queue;
3520        int port = mlx4_slave_convert_port(
3521                   dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3522
3523        if (port < 0)
3524                return -EINVAL;
3525
3526        pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3527                          ((port & 1) << 6);
3528
3529        if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3530            mlx4_is_eth(dev, port + 1)) {
3531                qpc->pri_path.sched_queue = pri_sched_queue;
3532        }
3533
3534        if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3535                port = mlx4_slave_convert_port(
3536                                dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3537                                + 1) - 1;
3538                if (port < 0)
3539                        return -EINVAL;
3540                qpc->alt_path.sched_queue =
3541                        (qpc->alt_path.sched_queue & ~(1 << 6)) |
3542                        (port & 1) << 6;
3543        }
3544        return 0;
3545}
3546
3547static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3548                                struct mlx4_qp_context *qpc,
3549                                struct mlx4_cmd_mailbox *inbox)
3550{
3551        u64 mac;
3552        int port;
3553        u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3554        u8 sched = *(u8 *)(inbox->buf + 64);
3555        u8 smac_ix;
3556
3557        port = (sched >> 6 & 1) + 1;
3558        if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3559                smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3560                if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3561                        return -ENOENT;
3562        }
3563        return 0;
3564}
3565
3566int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3567                             struct mlx4_vhcr *vhcr,
3568                             struct mlx4_cmd_mailbox *inbox,
3569                             struct mlx4_cmd_mailbox *outbox,
3570                             struct mlx4_cmd_info *cmd)
3571{
3572        int err;
3573        struct mlx4_qp_context *qpc = inbox->buf + 8;
3574        int qpn = vhcr->in_modifier & 0x7fffff;
3575        struct res_qp *qp;
3576        u8 orig_sched_queue;
3577        __be32  orig_param3 = qpc->param3;
3578        u8 orig_vlan_control = qpc->pri_path.vlan_control;
3579        u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3580        u8 orig_pri_path_fl = qpc->pri_path.fl;
3581        u8 orig_vlan_index = qpc->pri_path.vlan_index;
3582        u8 orig_feup = qpc->pri_path.feup;
3583
3584        err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3585        if (err)
3586                return err;
3587        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3588        if (err)
3589                return err;
3590
3591        if (roce_verify_mac(dev, slave, qpc, inbox))
3592                return -EINVAL;
3593
3594        update_pkey_index(dev, slave, inbox);
3595        update_gid(dev, inbox, (u8)slave);
3596        adjust_proxy_tun_qkey(dev, vhcr, qpc);
3597        orig_sched_queue = qpc->pri_path.sched_queue;
3598        err = update_vport_qp_param(dev, inbox, slave, qpn);
3599        if (err)
3600                return err;
3601
3602        err = get_res(dev, slave, qpn, RES_QP, &qp);
3603        if (err)
3604                return err;
3605        if (qp->com.from_state != RES_QP_HW) {
3606                err = -EBUSY;
3607                goto out;
3608        }
3609
3610        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3611out:
3612        /* if no error, save sched queue value passed in by VF. This is
3613         * essentially the QOS value provided by the VF. This will be useful
3614         * if we allow dynamic changes from VST back to VGT
3615         */
3616        if (!err) {
3617                qp->sched_queue = orig_sched_queue;
3618                qp->param3      = orig_param3;
3619                qp->vlan_control = orig_vlan_control;
3620                qp->fvl_rx      =  orig_fvl_rx;
3621                qp->pri_path_fl = orig_pri_path_fl;
3622                qp->vlan_index  = orig_vlan_index;
3623                qp->feup        = orig_feup;
3624        }
3625        put_res(dev, slave, qpn, RES_QP);
3626        return err;
3627}
3628
3629int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3630                            struct mlx4_vhcr *vhcr,
3631                            struct mlx4_cmd_mailbox *inbox,
3632                            struct mlx4_cmd_mailbox *outbox,
3633                            struct mlx4_cmd_info *cmd)
3634{
3635        int err;
3636        struct mlx4_qp_context *context = inbox->buf + 8;
3637
3638        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3639        if (err)
3640                return err;
3641        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3642        if (err)
3643                return err;
3644
3645        update_pkey_index(dev, slave, inbox);
3646        update_gid(dev, inbox, (u8)slave);
3647        adjust_proxy_tun_qkey(dev, vhcr, context);
3648        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3649}
3650
3651int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3652                            struct mlx4_vhcr *vhcr,
3653                            struct mlx4_cmd_mailbox *inbox,
3654                            struct mlx4_cmd_mailbox *outbox,
3655                            struct mlx4_cmd_info *cmd)
3656{
3657        int err;
3658        struct mlx4_qp_context *context = inbox->buf + 8;
3659
3660        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3661        if (err)
3662                return err;
3663        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3664        if (err)
3665                return err;
3666
3667        update_pkey_index(dev, slave, inbox);
3668        update_gid(dev, inbox, (u8)slave);
3669        adjust_proxy_tun_qkey(dev, vhcr, context);
3670        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3671}
3672
3673
3674int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3675                              struct mlx4_vhcr *vhcr,
3676                              struct mlx4_cmd_mailbox *inbox,
3677                              struct mlx4_cmd_mailbox *outbox,
3678                              struct mlx4_cmd_info *cmd)
3679{
3680        struct mlx4_qp_context *context = inbox->buf + 8;
3681        int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3682        if (err)
3683                return err;
3684        adjust_proxy_tun_qkey(dev, vhcr, context);
3685        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3686}
3687
3688int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3689                            struct mlx4_vhcr *vhcr,
3690                            struct mlx4_cmd_mailbox *inbox,
3691                            struct mlx4_cmd_mailbox *outbox,
3692                            struct mlx4_cmd_info *cmd)
3693{
3694        int err;
3695        struct mlx4_qp_context *context = inbox->buf + 8;
3696
3697        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3698        if (err)
3699                return err;
3700        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3701        if (err)
3702                return err;
3703
3704        adjust_proxy_tun_qkey(dev, vhcr, context);
3705        update_gid(dev, inbox, (u8)slave);
3706        update_pkey_index(dev, slave, inbox);
3707        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3708}
3709
3710int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3711                            struct mlx4_vhcr *vhcr,
3712                            struct mlx4_cmd_mailbox *inbox,
3713                            struct mlx4_cmd_mailbox *outbox,
3714                            struct mlx4_cmd_info *cmd)
3715{
3716        int err;
3717        struct mlx4_qp_context *context = inbox->buf + 8;
3718
3719        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3720        if (err)
3721                return err;
3722        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3723        if (err)
3724                return err;
3725
3726        adjust_proxy_tun_qkey(dev, vhcr, context);
3727        update_gid(dev, inbox, (u8)slave);
3728        update_pkey_index(dev, slave, inbox);
3729        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3730}
3731
3732int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3733                         struct mlx4_vhcr *vhcr,
3734                         struct mlx4_cmd_mailbox *inbox,
3735                         struct mlx4_cmd_mailbox *outbox,
3736                         struct mlx4_cmd_info *cmd)
3737{
3738        int err;
3739        int qpn = vhcr->in_modifier & 0x7fffff;
3740        struct res_qp *qp;
3741
3742        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3743        if (err)
3744                return err;
3745        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3746        if (err)
3747                goto ex_abort;
3748
3749        atomic_dec(&qp->mtt->ref_count);
3750        atomic_dec(&qp->rcq->ref_count);
3751        atomic_dec(&qp->scq->ref_count);
3752        if (qp->srq)
3753                atomic_dec(&qp->srq->ref_count);
3754        res_end_move(dev, slave, RES_QP, qpn);
3755        return 0;
3756
3757ex_abort:
3758        res_abort_move(dev, slave, RES_QP, qpn);
3759
3760        return err;
3761}
3762
3763static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3764                                struct res_qp *rqp, u8 *gid)
3765{
3766        struct res_gid *res;
3767
3768        list_for_each_entry(res, &rqp->mcg_list, list) {
3769                if (!memcmp(res->gid, gid, 16))
3770                        return res;
3771        }
3772        return NULL;
3773}
3774
3775static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3776                       u8 *gid, enum mlx4_protocol prot,
3777                       enum mlx4_steer_type steer, u64 reg_id)
3778{
3779        struct res_gid *res;
3780        int err;
3781
3782        res = kzalloc(sizeof *res, GFP_KERNEL);
3783        if (!res)
3784                return -ENOMEM;
3785
3786        spin_lock_irq(&rqp->mcg_spl);
3787        if (find_gid(dev, slave, rqp, gid)) {
3788                kfree(res);
3789                err = -EEXIST;
3790        } else {
3791                memcpy(res->gid, gid, 16);
3792                res->prot = prot;
3793                res->steer = steer;
3794                res->reg_id = reg_id;
3795                list_add_tail(&res->list, &rqp->mcg_list);
3796                err = 0;
3797        }
3798        spin_unlock_irq(&rqp->mcg_spl);
3799
3800        return err;
3801}
3802
3803static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3804                       u8 *gid, enum mlx4_protocol prot,
3805                       enum mlx4_steer_type steer, u64 *reg_id)
3806{
3807        struct res_gid *res;
3808        int err;
3809
3810        spin_lock_irq(&rqp->mcg_spl);
3811        res = find_gid(dev, slave, rqp, gid);
3812        if (!res || res->prot != prot || res->steer != steer)
3813                err = -EINVAL;
3814        else {
3815                *reg_id = res->reg_id;
3816                list_del(&res->list);
3817                kfree(res);
3818                err = 0;
3819        }
3820        spin_unlock_irq(&rqp->mcg_spl);
3821
3822        return err;
3823}
3824
3825static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3826                     u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3827                     enum mlx4_steer_type type, u64 *reg_id)
3828{
3829        switch (dev->caps.steering_mode) {
3830        case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3831                int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3832                if (port < 0)
3833                        return port;
3834                return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3835                                                block_loopback, prot,
3836                                                reg_id);
3837        }
3838        case MLX4_STEERING_MODE_B0:
3839                if (prot == MLX4_PROT_ETH) {
3840                        int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3841                        if (port < 0)
3842                                return port;
3843                        gid[5] = port;
3844                }
3845                return mlx4_qp_attach_common(dev, qp, gid,
3846                                            block_loopback, prot, type);
3847        default:
3848                return -EINVAL;
3849        }
3850}
3851
3852static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3853                     u8 gid[16], enum mlx4_protocol prot,
3854                     enum mlx4_steer_type type, u64 reg_id)
3855{
3856        switch (dev->caps.steering_mode) {
3857        case MLX4_STEERING_MODE_DEVICE_MANAGED:
3858                return mlx4_flow_detach(dev, reg_id);
3859        case MLX4_STEERING_MODE_B0:
3860                return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3861        default:
3862                return -EINVAL;
3863        }
3864}
3865
3866static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3867                            u8 *gid, enum mlx4_protocol prot)
3868{
3869        int real_port;
3870
3871        if (prot != MLX4_PROT_ETH)
3872                return 0;
3873
3874        if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3875            dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3876                real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3877                if (real_port < 0)
3878                        return -EINVAL;
3879                gid[5] = real_port;
3880        }
3881
3882        return 0;
3883}
3884
3885int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3886                               struct mlx4_vhcr *vhcr,
3887                               struct mlx4_cmd_mailbox *inbox,
3888                               struct mlx4_cmd_mailbox *outbox,
3889                               struct mlx4_cmd_info *cmd)
3890{
3891        struct mlx4_qp qp; /* dummy for calling attach/detach */
3892        u8 *gid = inbox->buf;
3893        enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3894        int err;
3895        int qpn;
3896        struct res_qp *rqp;
3897        u64 reg_id = 0;
3898        int attach = vhcr->op_modifier;
3899        int block_loopback = vhcr->in_modifier >> 31;
3900        u8 steer_type_mask = 2;
3901        enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3902
3903        qpn = vhcr->in_modifier & 0xffffff;
3904        err = get_res(dev, slave, qpn, RES_QP, &rqp);
3905        if (err)
3906                return err;
3907
3908        qp.qpn = qpn;
3909        if (attach) {
3910                err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3911                                type, &reg_id);
3912                if (err) {
3913                        pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3914                        goto ex_put;
3915                }
3916                err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3917                if (err)
3918                        goto ex_detach;
3919        } else {
3920                err = mlx4_adjust_port(dev, slave, gid, prot);
3921                if (err)
3922                        goto ex_put;
3923
3924                err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3925                if (err)
3926                        goto ex_put;
3927
3928                err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3929                if (err)
3930                        pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3931                               qpn, reg_id);
3932        }
3933        put_res(dev, slave, qpn, RES_QP);
3934        return err;
3935
3936ex_detach:
3937        qp_detach(dev, &qp, gid, prot, type, reg_id);
3938ex_put:
3939        put_res(dev, slave, qpn, RES_QP);
3940        return err;
3941}
3942
3943/*
3944 * MAC validation for Flow Steering rules.
3945 * VF can attach rules only with a mac address which is assigned to it.
3946 */
3947static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3948                                   struct list_head *rlist)
3949{
3950        struct mac_res *res, *tmp;
3951        __be64 be_mac;
3952
3953        /* make sure it isn't multicast or broadcast mac*/
3954        if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3955            !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3956                list_for_each_entry_safe(res, tmp, rlist, list) {
3957                        be_mac = cpu_to_be64(res->mac << 16);
3958                        if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3959                                return 0;
3960                }
3961                pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3962                       eth_header->eth.dst_mac, slave);
3963                return -EINVAL;
3964        }
3965        return 0;
3966}
3967
3968/*
3969 * In case of missing eth header, append eth header with a MAC address
3970 * assigned to the VF.
3971 */
3972static int add_eth_header(struct mlx4_dev *dev, int slave,
3973                          struct mlx4_cmd_mailbox *inbox,
3974                          struct list_head *rlist, int header_id)
3975{
3976        struct mac_res *res, *tmp;
3977        u8 port;
3978        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3979        struct mlx4_net_trans_rule_hw_eth *eth_header;
3980        struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3981        struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3982        __be64 be_mac = 0;
3983        __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3984
3985        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3986        port = ctrl->port;
3987        eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3988
3989        /* Clear a space in the inbox for eth header */
3990        switch (header_id) {
3991        case MLX4_NET_TRANS_RULE_ID_IPV4:
3992                ip_header =
3993                        (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3994                memmove(ip_header, eth_header,
3995                        sizeof(*ip_header) + sizeof(*l4_header));
3996                break;
3997        case MLX4_NET_TRANS_RULE_ID_TCP:
3998        case MLX4_NET_TRANS_RULE_ID_UDP:
3999                l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4000                            (eth_header + 1);
4001                memmove(l4_header, eth_header, sizeof(*l4_header));
4002                break;
4003        default:
4004                return -EINVAL;
4005        }
4006        list_for_each_entry_safe(res, tmp, rlist, list) {
4007                if (port == res->port) {
4008                        be_mac = cpu_to_be64(res->mac << 16);
4009                        break;
4010                }
4011        }
4012        if (!be_mac) {
4013                pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4014                       port);
4015                return -EINVAL;
4016        }
4017
4018        memset(eth_header, 0, sizeof(*eth_header));
4019        eth_header->size = sizeof(*eth_header) >> 2;
4020        eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4021        memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4022        memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4023
4024        return 0;
4025
4026}
4027
4028#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4029int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4030                           struct mlx4_vhcr *vhcr,
4031                           struct mlx4_cmd_mailbox *inbox,
4032                           struct mlx4_cmd_mailbox *outbox,
4033                           struct mlx4_cmd_info *cmd_info)
4034{
4035        int err;
4036        u32 qpn = vhcr->in_modifier & 0xffffff;
4037        struct res_qp *rqp;
4038        u64 mac;
4039        unsigned port;
4040        u64 pri_addr_path_mask;
4041        struct mlx4_update_qp_context *cmd;
4042        int smac_index;
4043
4044        cmd = (struct mlx4_update_qp_context *)inbox->buf;
4045
4046        pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4047        if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4048            (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4049                return -EPERM;
4050
4051        /* Just change the smac for the QP */
4052        err = get_res(dev, slave, qpn, RES_QP, &rqp);
4053        if (err) {
4054                mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4055                return err;
4056        }
4057
4058        port = (rqp->sched_queue >> 6 & 1) + 1;
4059
4060        if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4061                smac_index = cmd->qp_context.pri_path.grh_mylmc;
4062                err = mac_find_smac_ix_in_slave(dev, slave, port,
4063                                                smac_index, &mac);
4064
4065                if (err) {
4066                        mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4067                                 qpn, smac_index);
4068                        goto err_mac;
4069                }
4070        }
4071
4072        err = mlx4_cmd(dev, inbox->dma,
4073                       vhcr->in_modifier, 0,
4074                       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4075                       MLX4_CMD_NATIVE);
4076        if (err) {
4077                mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4078                goto err_mac;
4079        }
4080
4081err_mac:
4082        put_res(dev, slave, qpn, RES_QP);
4083        return err;
4084}
4085
4086int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4087                                         struct mlx4_vhcr *vhcr,
4088                                         struct mlx4_cmd_mailbox *inbox,
4089                                         struct mlx4_cmd_mailbox *outbox,
4090                                         struct mlx4_cmd_info *cmd)
4091{
4092
4093        struct mlx4_priv *priv = mlx4_priv(dev);
4094        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4095        struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4096        int err;
4097        int qpn;
4098        struct res_qp *rqp;
4099        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4100        struct _rule_hw  *rule_header;
4101        int header_id;
4102
4103        if (dev->caps.steering_mode !=
4104            MLX4_STEERING_MODE_DEVICE_MANAGED)
4105                return -EOPNOTSUPP;
4106
4107        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4108        ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4109        if (ctrl->port <= 0)
4110                return -EINVAL;
4111        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4112        err = get_res(dev, slave, qpn, RES_QP, &rqp);
4113        if (err) {
4114                pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4115                return err;
4116        }
4117        rule_header = (struct _rule_hw *)(ctrl + 1);
4118        header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4119
4120        switch (header_id) {
4121        case MLX4_NET_TRANS_RULE_ID_ETH:
4122                if (validate_eth_header_mac(slave, rule_header, rlist)) {
4123                        err = -EINVAL;
4124                        goto err_put;
4125                }
4126                break;
4127        case MLX4_NET_TRANS_RULE_ID_IB:
4128                break;
4129        case MLX4_NET_TRANS_RULE_ID_IPV4:
4130        case MLX4_NET_TRANS_RULE_ID_TCP:
4131        case MLX4_NET_TRANS_RULE_ID_UDP:
4132                pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4133                if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4134                        err = -EINVAL;
4135                        goto err_put;
4136                }
4137                vhcr->in_modifier +=
4138                        sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4139                break;
4140        default:
4141                pr_err("Corrupted mailbox\n");
4142                err = -EINVAL;
4143                goto err_put;
4144        }
4145
4146        err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4147                           vhcr->in_modifier, 0,
4148                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4149                           MLX4_CMD_NATIVE);
4150        if (err)
4151                goto err_put;
4152
4153        err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4154        if (err) {
4155                mlx4_err(dev, "Fail to add flow steering resources\n");
4156                /* detach rule*/
4157                mlx4_cmd(dev, vhcr->out_param, 0, 0,
4158                         MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4159                         MLX4_CMD_NATIVE);
4160                goto err_put;
4161        }
4162        atomic_inc(&rqp->ref_count);
4163err_put:
4164        put_res(dev, slave, qpn, RES_QP);
4165        return err;
4166}
4167
4168int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4169                                         struct mlx4_vhcr *vhcr,
4170                                         struct mlx4_cmd_mailbox *inbox,
4171                                         struct mlx4_cmd_mailbox *outbox,
4172                                         struct mlx4_cmd_info *cmd)
4173{
4174        int err;
4175        struct res_qp *rqp;
4176        struct res_fs_rule *rrule;
4177
4178        if (dev->caps.steering_mode !=
4179            MLX4_STEERING_MODE_DEVICE_MANAGED)
4180                return -EOPNOTSUPP;
4181
4182        err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4183        if (err)
4184                return err;
4185        /* Release the rule form busy state before removal */
4186        put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4187        err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4188        if (err)
4189                return err;
4190
4191        err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4192        if (err) {
4193                mlx4_err(dev, "Fail to remove flow steering resources\n");
4194                goto out;
4195        }
4196
4197        err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4198                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4199                       MLX4_CMD_NATIVE);
4200        if (!err)
4201                atomic_dec(&rqp->ref_count);
4202out:
4203        put_res(dev, slave, rrule->qpn, RES_QP);
4204        return err;
4205}
4206
4207enum {
4208        BUSY_MAX_RETRIES = 10
4209};
4210
4211int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4212                               struct mlx4_vhcr *vhcr,
4213                               struct mlx4_cmd_mailbox *inbox,
4214                               struct mlx4_cmd_mailbox *outbox,
4215                               struct mlx4_cmd_info *cmd)
4216{
4217        int err;
4218        int index = vhcr->in_modifier & 0xffff;
4219
4220        err = get_res(dev, slave, index, RES_COUNTER, NULL);
4221        if (err)
4222                return err;
4223
4224        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4225        put_res(dev, slave, index, RES_COUNTER);
4226        return err;
4227}
4228
4229static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4230{
4231        struct res_gid *rgid;
4232        struct res_gid *tmp;
4233        struct mlx4_qp qp; /* dummy for calling attach/detach */
4234
4235        list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4236                switch (dev->caps.steering_mode) {
4237                case MLX4_STEERING_MODE_DEVICE_MANAGED:
4238                        mlx4_flow_detach(dev, rgid->reg_id);
4239                        break;
4240                case MLX4_STEERING_MODE_B0:
4241                        qp.qpn = rqp->local_qpn;
4242                        (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4243                                                     rgid->prot, rgid->steer);
4244                        break;
4245                }
4246                list_del(&rgid->list);
4247                kfree(rgid);
4248        }
4249}
4250
4251static int _move_all_busy(struct mlx4_dev *dev, int slave,
4252                          enum mlx4_resource type, int print)
4253{
4254        struct mlx4_priv *priv = mlx4_priv(dev);
4255        struct mlx4_resource_tracker *tracker =
4256                &priv->mfunc.master.res_tracker;
4257        struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4258        struct res_common *r;
4259        struct res_common *tmp;
4260        int busy;
4261
4262        busy = 0;
4263        spin_lock_irq(mlx4_tlock(dev));
4264        list_for_each_entry_safe(r, tmp, rlist, list) {
4265                if (r->owner == slave) {
4266                        if (!r->removing) {
4267                                if (r->state == RES_ANY_BUSY) {
4268                                        if (print)
4269                                                mlx4_dbg(dev,
4270                                                         "%s id 0x%llx is busy\n",
4271                                                          resource_str(type),
4272                                                          r->res_id);
4273                                        ++busy;
4274                                } else {
4275                                        r->from_state = r->state;
4276                                        r->state = RES_ANY_BUSY;
4277                                        r->removing = 1;
4278                                }
4279                        }
4280                }
4281        }
4282        spin_unlock_irq(mlx4_tlock(dev));
4283
4284        return busy;
4285}
4286
4287static int move_all_busy(struct mlx4_dev *dev, int slave,
4288                         enum mlx4_resource type)
4289{
4290        unsigned long begin;
4291        int busy;
4292
4293        begin = jiffies;
4294        do {
4295                busy = _move_all_busy(dev, slave, type, 0);
4296                if (time_after(jiffies, begin + 5 * HZ))
4297                        break;
4298                if (busy)
4299                        cond_resched();
4300        } while (busy);
4301
4302        if (busy)
4303                busy = _move_all_busy(dev, slave, type, 1);
4304
4305        return busy;
4306}
4307static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4308{
4309        struct mlx4_priv *priv = mlx4_priv(dev);
4310        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4311        struct list_head *qp_list =
4312                &tracker->slave_list[slave].res_list[RES_QP];
4313        struct res_qp *qp;
4314        struct res_qp *tmp;
4315        int state;
4316        u64 in_param;
4317        int qpn;
4318        int err;
4319
4320        err = move_all_busy(dev, slave, RES_QP);
4321        if (err)
4322                mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4323                          slave);
4324
4325        spin_lock_irq(mlx4_tlock(dev));
4326        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4327                spin_unlock_irq(mlx4_tlock(dev));
4328                if (qp->com.owner == slave) {
4329                        qpn = qp->com.res_id;
4330                        detach_qp(dev, slave, qp);
4331                        state = qp->com.from_state;
4332                        while (state != 0) {
4333                                switch (state) {
4334                                case RES_QP_RESERVED:
4335                                        spin_lock_irq(mlx4_tlock(dev));
4336                                        rb_erase(&qp->com.node,
4337                                                 &tracker->res_tree[RES_QP]);
4338                                        list_del(&qp->com.list);
4339                                        spin_unlock_irq(mlx4_tlock(dev));
4340                                        if (!valid_reserved(dev, slave, qpn)) {
4341                                                __mlx4_qp_release_range(dev, qpn, 1);
4342                                                mlx4_release_resource(dev, slave,
4343                                                                      RES_QP, 1, 0);
4344                                        }
4345                                        kfree(qp);
4346                                        state = 0;
4347                                        break;
4348                                case RES_QP_MAPPED:
4349                                        if (!valid_reserved(dev, slave, qpn))
4350                                                __mlx4_qp_free_icm(dev, qpn);
4351                                        state = RES_QP_RESERVED;
4352                                        break;
4353                                case RES_QP_HW:
4354                                        in_param = slave;
4355                                        err = mlx4_cmd(dev, in_param,
4356                                                       qp->local_qpn, 2,
4357                                                       MLX4_CMD_2RST_QP,
4358                                                       MLX4_CMD_TIME_CLASS_A,
4359                                                       MLX4_CMD_NATIVE);
4360                                        if (err)
4361                                                mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4362                                                         slave, qp->local_qpn);
4363                                        atomic_dec(&qp->rcq->ref_count);
4364                                        atomic_dec(&qp->scq->ref_count);
4365                                        atomic_dec(&qp->mtt->ref_count);
4366                                        if (qp->srq)
4367                                                atomic_dec(&qp->srq->ref_count);
4368                                        state = RES_QP_MAPPED;
4369                                        break;
4370                                default:
4371                                        state = 0;
4372                                }
4373                        }
4374                }
4375                spin_lock_irq(mlx4_tlock(dev));
4376        }
4377        spin_unlock_irq(mlx4_tlock(dev));
4378}
4379
4380static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4381{
4382        struct mlx4_priv *priv = mlx4_priv(dev);
4383        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4384        struct list_head *srq_list =
4385                &tracker->slave_list[slave].res_list[RES_SRQ];
4386        struct res_srq *srq;
4387        struct res_srq *tmp;
4388        int state;
4389        u64 in_param;
4390        LIST_HEAD(tlist);
4391        int srqn;
4392        int err;
4393
4394        err = move_all_busy(dev, slave, RES_SRQ);
4395        if (err)
4396                mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4397                          slave);
4398
4399        spin_lock_irq(mlx4_tlock(dev));
4400        list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4401                spin_unlock_irq(mlx4_tlock(dev));
4402                if (srq->com.owner == slave) {
4403                        srqn = srq->com.res_id;
4404                        state = srq->com.from_state;
4405                        while (state != 0) {
4406                                switch (state) {
4407                                case RES_SRQ_ALLOCATED:
4408                                        __mlx4_srq_free_icm(dev, srqn);
4409                                        spin_lock_irq(mlx4_tlock(dev));
4410                                        rb_erase(&srq->com.node,
4411                                                 &tracker->res_tree[RES_SRQ]);
4412                                        list_del(&srq->com.list);
4413                                        spin_unlock_irq(mlx4_tlock(dev));
4414                                        mlx4_release_resource(dev, slave,
4415                                                              RES_SRQ, 1, 0);
4416                                        kfree(srq);
4417                                        state = 0;
4418                                        break;
4419
4420                                case RES_SRQ_HW:
4421                                        in_param = slave;
4422                                        err = mlx4_cmd(dev, in_param, srqn, 1,
4423                                                       MLX4_CMD_HW2SW_SRQ,
4424                                                       MLX4_CMD_TIME_CLASS_A,
4425                                                       MLX4_CMD_NATIVE);
4426                                        if (err)
4427                                                mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4428                                                         slave, srqn);
4429
4430                                        atomic_dec(&srq->mtt->ref_count);
4431                                        if (srq->cq)
4432                                                atomic_dec(&srq->cq->ref_count);
4433                                        state = RES_SRQ_ALLOCATED;
4434                                        break;
4435
4436                                default:
4437                                        state = 0;
4438                                }
4439                        }
4440                }
4441                spin_lock_irq(mlx4_tlock(dev));
4442        }
4443        spin_unlock_irq(mlx4_tlock(dev));
4444}
4445
4446static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4447{
4448        struct mlx4_priv *priv = mlx4_priv(dev);
4449        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4450        struct list_head *cq_list =
4451                &tracker->slave_list[slave].res_list[RES_CQ];
4452        struct res_cq *cq;
4453        struct res_cq *tmp;
4454        int state;
4455        u64 in_param;
4456        LIST_HEAD(tlist);
4457        int cqn;
4458        int err;
4459
4460        err = move_all_busy(dev, slave, RES_CQ);
4461        if (err)
4462                mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4463                          slave);
4464
4465        spin_lock_irq(mlx4_tlock(dev));
4466        list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4467                spin_unlock_irq(mlx4_tlock(dev));
4468                if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4469                        cqn = cq->com.res_id;
4470                        state = cq->com.from_state;
4471                        while (state != 0) {
4472                                switch (state) {
4473                                case RES_CQ_ALLOCATED:
4474                                        __mlx4_cq_free_icm(dev, cqn);
4475                                        spin_lock_irq(mlx4_tlock(dev));
4476                                        rb_erase(&cq->com.node,
4477                                                 &tracker->res_tree[RES_CQ]);
4478                                        list_del(&cq->com.list);
4479                                        spin_unlock_irq(mlx4_tlock(dev));
4480                                        mlx4_release_resource(dev, slave,
4481                                                              RES_CQ, 1, 0);
4482                                        kfree(cq);
4483                                        state = 0;
4484                                        break;
4485
4486                                case RES_CQ_HW:
4487                                        in_param = slave;
4488                                        err = mlx4_cmd(dev, in_param, cqn, 1,
4489                                                       MLX4_CMD_HW2SW_CQ,
4490                                                       MLX4_CMD_TIME_CLASS_A,
4491                                                       MLX4_CMD_NATIVE);
4492                                        if (err)
4493                                                mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4494                                                         slave, cqn);
4495                                        atomic_dec(&cq->mtt->ref_count);
4496                                        state = RES_CQ_ALLOCATED;
4497                                        break;
4498
4499                                default:
4500                                        state = 0;
4501                                }
4502                        }
4503                }
4504                spin_lock_irq(mlx4_tlock(dev));
4505        }
4506        spin_unlock_irq(mlx4_tlock(dev));
4507}
4508
4509static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4510{
4511        struct mlx4_priv *priv = mlx4_priv(dev);
4512        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4513        struct list_head *mpt_list =
4514                &tracker->slave_list[slave].res_list[RES_MPT];
4515        struct res_mpt *mpt;
4516        struct res_mpt *tmp;
4517        int state;
4518        u64 in_param;
4519        LIST_HEAD(tlist);
4520        int mptn;
4521        int err;
4522
4523        err = move_all_busy(dev, slave, RES_MPT);
4524        if (err)
4525                mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4526                          slave);
4527
4528        spin_lock_irq(mlx4_tlock(dev));
4529        list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4530                spin_unlock_irq(mlx4_tlock(dev));
4531                if (mpt->com.owner == slave) {
4532                        mptn = mpt->com.res_id;
4533                        state = mpt->com.from_state;
4534                        while (state != 0) {
4535                                switch (state) {
4536                                case RES_MPT_RESERVED:
4537                                        __mlx4_mpt_release(dev, mpt->key);
4538                                        spin_lock_irq(mlx4_tlock(dev));
4539                                        rb_erase(&mpt->com.node,
4540                                                 &tracker->res_tree[RES_MPT]);
4541                                        list_del(&mpt->com.list);
4542                                        spin_unlock_irq(mlx4_tlock(dev));
4543                                        mlx4_release_resource(dev, slave,
4544                                                              RES_MPT, 1, 0);
4545                                        kfree(mpt);
4546                                        state = 0;
4547                                        break;
4548
4549                                case RES_MPT_MAPPED:
4550                                        __mlx4_mpt_free_icm(dev, mpt->key);
4551                                        state = RES_MPT_RESERVED;
4552                                        break;
4553
4554                                case RES_MPT_HW:
4555                                        in_param = slave;
4556                                        err = mlx4_cmd(dev, in_param, mptn, 0,
4557                                                     MLX4_CMD_HW2SW_MPT,
4558                                                     MLX4_CMD_TIME_CLASS_A,
4559                                                     MLX4_CMD_NATIVE);
4560                                        if (err)
4561                                                mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4562                                                         slave, mptn);
4563                                        if (mpt->mtt)
4564                                                atomic_dec(&mpt->mtt->ref_count);
4565                                        state = RES_MPT_MAPPED;
4566                                        break;
4567                                default:
4568                                        state = 0;
4569                                }
4570                        }
4571                }
4572                spin_lock_irq(mlx4_tlock(dev));
4573        }
4574        spin_unlock_irq(mlx4_tlock(dev));
4575}
4576
4577static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4578{
4579        struct mlx4_priv *priv = mlx4_priv(dev);
4580        struct mlx4_resource_tracker *tracker =
4581                &priv->mfunc.master.res_tracker;
4582        struct list_head *mtt_list =
4583                &tracker->slave_list[slave].res_list[RES_MTT];
4584        struct res_mtt *mtt;
4585        struct res_mtt *tmp;
4586        int state;
4587        LIST_HEAD(tlist);
4588        int base;
4589        int err;
4590
4591        err = move_all_busy(dev, slave, RES_MTT);
4592        if (err)
4593                mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4594                          slave);
4595
4596        spin_lock_irq(mlx4_tlock(dev));
4597        list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4598                spin_unlock_irq(mlx4_tlock(dev));
4599                if (mtt->com.owner == slave) {
4600                        base = mtt->com.res_id;
4601                        state = mtt->com.from_state;
4602                        while (state != 0) {
4603                                switch (state) {
4604                                case RES_MTT_ALLOCATED:
4605                                        __mlx4_free_mtt_range(dev, base,
4606                                                              mtt->order);
4607                                        spin_lock_irq(mlx4_tlock(dev));
4608                                        rb_erase(&mtt->com.node,
4609                                                 &tracker->res_tree[RES_MTT]);
4610                                        list_del(&mtt->com.list);
4611                                        spin_unlock_irq(mlx4_tlock(dev));
4612                                        mlx4_release_resource(dev, slave, RES_MTT,
4613                                                              1 << mtt->order, 0);
4614                                        kfree(mtt);
4615                                        state = 0;
4616                                        break;
4617
4618                                default:
4619                                        state = 0;
4620                                }
4621                        }
4622                }
4623                spin_lock_irq(mlx4_tlock(dev));
4624        }
4625        spin_unlock_irq(mlx4_tlock(dev));
4626}
4627
4628static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4629{
4630        struct mlx4_priv *priv = mlx4_priv(dev);
4631        struct mlx4_resource_tracker *tracker =
4632                &priv->mfunc.master.res_tracker;
4633        struct list_head *fs_rule_list =
4634                &tracker->slave_list[slave].res_list[RES_FS_RULE];
4635        struct res_fs_rule *fs_rule;
4636        struct res_fs_rule *tmp;
4637        int state;
4638        u64 base;
4639        int err;
4640
4641        err = move_all_busy(dev, slave, RES_FS_RULE);
4642        if (err)
4643                mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4644                          slave);
4645
4646        spin_lock_irq(mlx4_tlock(dev));
4647        list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4648                spin_unlock_irq(mlx4_tlock(dev));
4649                if (fs_rule->com.owner == slave) {
4650                        base = fs_rule->com.res_id;
4651                        state = fs_rule->com.from_state;
4652                        while (state != 0) {
4653                                switch (state) {
4654                                case RES_FS_RULE_ALLOCATED:
4655                                        /* detach rule */
4656                                        err = mlx4_cmd(dev, base, 0, 0,
4657                                                       MLX4_QP_FLOW_STEERING_DETACH,
4658                                                       MLX4_CMD_TIME_CLASS_A,
4659                                                       MLX4_CMD_NATIVE);
4660
4661                                        spin_lock_irq(mlx4_tlock(dev));
4662                                        rb_erase(&fs_rule->com.node,
4663                                                 &tracker->res_tree[RES_FS_RULE]);
4664                                        list_del(&fs_rule->com.list);
4665                                        spin_unlock_irq(mlx4_tlock(dev));
4666                                        kfree(fs_rule);
4667                                        state = 0;
4668                                        break;
4669
4670                                default:
4671                                        state = 0;
4672                                }
4673                        }
4674                }
4675                spin_lock_irq(mlx4_tlock(dev));
4676        }
4677        spin_unlock_irq(mlx4_tlock(dev));
4678}
4679
4680static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4681{
4682        struct mlx4_priv *priv = mlx4_priv(dev);
4683        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4684        struct list_head *eq_list =
4685                &tracker->slave_list[slave].res_list[RES_EQ];
4686        struct res_eq *eq;
4687        struct res_eq *tmp;
4688        int err;
4689        int state;
4690        LIST_HEAD(tlist);
4691        int eqn;
4692
4693        err = move_all_busy(dev, slave, RES_EQ);
4694        if (err)
4695                mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4696                          slave);
4697
4698        spin_lock_irq(mlx4_tlock(dev));
4699        list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4700                spin_unlock_irq(mlx4_tlock(dev));
4701                if (eq->com.owner == slave) {
4702                        eqn = eq->com.res_id;
4703                        state = eq->com.from_state;
4704                        while (state != 0) {
4705                                switch (state) {
4706                                case RES_EQ_RESERVED:
4707                                        spin_lock_irq(mlx4_tlock(dev));
4708                                        rb_erase(&eq->com.node,
4709                                                 &tracker->res_tree[RES_EQ]);
4710                                        list_del(&eq->com.list);
4711                                        spin_unlock_irq(mlx4_tlock(dev));
4712                                        kfree(eq);
4713                                        state = 0;
4714                                        break;
4715
4716                                case RES_EQ_HW:
4717                                        err = mlx4_cmd(dev, slave, eqn & 0x3ff,
4718                                                       1, MLX4_CMD_HW2SW_EQ,
4719                                                       MLX4_CMD_TIME_CLASS_A,
4720                                                       MLX4_CMD_NATIVE);
4721                                        if (err)
4722                                                mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4723                                                         slave, eqn & 0x3ff);
4724                                        atomic_dec(&eq->mtt->ref_count);
4725                                        state = RES_EQ_RESERVED;
4726                                        break;
4727
4728                                default:
4729                                        state = 0;
4730                                }
4731                        }
4732                }
4733                spin_lock_irq(mlx4_tlock(dev));
4734        }
4735        spin_unlock_irq(mlx4_tlock(dev));
4736}
4737
4738static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4739{
4740        struct mlx4_priv *priv = mlx4_priv(dev);
4741        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4742        struct list_head *counter_list =
4743                &tracker->slave_list[slave].res_list[RES_COUNTER];
4744        struct res_counter *counter;
4745        struct res_counter *tmp;
4746        int err;
4747        int index;
4748
4749        err = move_all_busy(dev, slave, RES_COUNTER);
4750        if (err)
4751                mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4752                          slave);
4753
4754        spin_lock_irq(mlx4_tlock(dev));
4755        list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4756                if (counter->com.owner == slave) {
4757                        index = counter->com.res_id;
4758                        rb_erase(&counter->com.node,
4759                                 &tracker->res_tree[RES_COUNTER]);
4760                        list_del(&counter->com.list);
4761                        kfree(counter);
4762                        __mlx4_counter_free(dev, index);
4763                        mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4764                }
4765        }
4766        spin_unlock_irq(mlx4_tlock(dev));
4767}
4768
4769static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4770{
4771        struct mlx4_priv *priv = mlx4_priv(dev);
4772        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4773        struct list_head *xrcdn_list =
4774                &tracker->slave_list[slave].res_list[RES_XRCD];
4775        struct res_xrcdn *xrcd;
4776        struct res_xrcdn *tmp;
4777        int err;
4778        int xrcdn;
4779
4780        err = move_all_busy(dev, slave, RES_XRCD);
4781        if (err)
4782                mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4783                          slave);
4784
4785        spin_lock_irq(mlx4_tlock(dev));
4786        list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4787                if (xrcd->com.owner == slave) {
4788                        xrcdn = xrcd->com.res_id;
4789                        rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4790                        list_del(&xrcd->com.list);
4791                        kfree(xrcd);
4792                        __mlx4_xrcd_free(dev, xrcdn);
4793                }
4794        }
4795        spin_unlock_irq(mlx4_tlock(dev));
4796}
4797
4798void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4799{
4800        struct mlx4_priv *priv = mlx4_priv(dev);
4801        mlx4_reset_roce_gids(dev, slave);
4802        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4803        rem_slave_vlans(dev, slave);
4804        rem_slave_macs(dev, slave);
4805        rem_slave_fs_rule(dev, slave);
4806        rem_slave_qps(dev, slave);
4807        rem_slave_srqs(dev, slave);
4808        rem_slave_cqs(dev, slave);
4809        rem_slave_mrs(dev, slave);
4810        rem_slave_eqs(dev, slave);
4811        rem_slave_mtts(dev, slave);
4812        rem_slave_counters(dev, slave);
4813        rem_slave_xrcdns(dev, slave);
4814        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4815}
4816
4817void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4818{
4819        struct mlx4_vf_immed_vlan_work *work =
4820                container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4821        struct mlx4_cmd_mailbox *mailbox;
4822        struct mlx4_update_qp_context *upd_context;
4823        struct mlx4_dev *dev = &work->priv->dev;
4824        struct mlx4_resource_tracker *tracker =
4825                &work->priv->mfunc.master.res_tracker;
4826        struct list_head *qp_list =
4827                &tracker->slave_list[work->slave].res_list[RES_QP];
4828        struct res_qp *qp;
4829        struct res_qp *tmp;
4830        u64 qp_path_mask_vlan_ctrl =
4831                       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4832                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4833                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4834                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4835                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4836                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4837
4838        u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4839                       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4840                       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4841                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4842                       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4843                       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4844                       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4845
4846        int err;
4847        int port, errors = 0;
4848        u8 vlan_control;
4849
4850        if (mlx4_is_slave(dev)) {
4851                mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4852                          work->slave);
4853                goto out;
4854        }
4855
4856        mailbox = mlx4_alloc_cmd_mailbox(dev);
4857        if (IS_ERR(mailbox))
4858                goto out;
4859        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4860                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4861                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4862                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4863                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4864                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4865                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4866        else if (!work->vlan_id)
4867                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4868                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4869        else
4870                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4871                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4872                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4873
4874        upd_context = mailbox->buf;
4875        upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4876
4877        spin_lock_irq(mlx4_tlock(dev));
4878        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4879                spin_unlock_irq(mlx4_tlock(dev));
4880                if (qp->com.owner == work->slave) {
4881                        if (qp->com.from_state != RES_QP_HW ||
4882                            !qp->sched_queue ||  /* no INIT2RTR trans yet */
4883                            mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4884                            qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4885                                spin_lock_irq(mlx4_tlock(dev));
4886                                continue;
4887                        }
4888                        port = (qp->sched_queue >> 6 & 1) + 1;
4889                        if (port != work->port) {
4890                                spin_lock_irq(mlx4_tlock(dev));
4891                                continue;
4892                        }
4893                        if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4894                                upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4895                        else
4896                                upd_context->primary_addr_path_mask =
4897                                        cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4898                        if (work->vlan_id == MLX4_VGT) {
4899                                upd_context->qp_context.param3 = qp->param3;
4900                                upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4901                                upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4902                                upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4903                                upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4904                                upd_context->qp_context.pri_path.feup = qp->feup;
4905                                upd_context->qp_context.pri_path.sched_queue =
4906                                        qp->sched_queue;
4907                        } else {
4908                                upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4909                                upd_context->qp_context.pri_path.vlan_control = vlan_control;
4910                                upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4911                                upd_context->qp_context.pri_path.fvl_rx =
4912                                        qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4913                                upd_context->qp_context.pri_path.fl =
4914                                        qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4915                                upd_context->qp_context.pri_path.feup =
4916                                        qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4917                                upd_context->qp_context.pri_path.sched_queue =
4918                                        qp->sched_queue & 0xC7;
4919                                upd_context->qp_context.pri_path.sched_queue |=
4920                                        ((work->qos & 0x7) << 3);
4921                                upd_context->qp_mask |=
4922                                        cpu_to_be64(1ULL <<
4923                                                    MLX4_UPD_QP_MASK_QOS_VPP);
4924                                upd_context->qp_context.qos_vport =
4925                                        work->qos_vport;
4926                        }
4927
4928                        err = mlx4_cmd(dev, mailbox->dma,
4929                                       qp->local_qpn & 0xffffff,
4930                                       0, MLX4_CMD_UPDATE_QP,
4931                                       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4932                        if (err) {
4933                                mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4934                                          work->slave, port, qp->local_qpn, err);
4935                                errors++;
4936                        }
4937                }
4938                spin_lock_irq(mlx4_tlock(dev));
4939        }
4940        spin_unlock_irq(mlx4_tlock(dev));
4941        mlx4_free_cmd_mailbox(dev, mailbox);
4942
4943        if (errors)
4944                mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4945                         errors, work->slave, work->port);
4946
4947        /* unregister previous vlan_id if needed and we had no errors
4948         * while updating the QPs
4949         */
4950        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4951            NO_INDX != work->orig_vlan_ix)
4952                __mlx4_unregister_vlan(&work->priv->dev, work->port,
4953                                       work->orig_vlan_id);
4954out:
4955        kfree(work);
4956        return;
4957}
4958