linux/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
   4 * All rights reserved.
   5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/sched.h>
  37#include <linux/pci.h>
  38#include <linux/errno.h>
  39#include <linux/kernel.h>
  40#include <linux/io.h>
  41#include <linux/slab.h>
  42#include <linux/mlx4/cmd.h>
  43#include <linux/mlx4/qp.h>
  44#include <linux/if_ether.h>
  45#include <linux/etherdevice.h>
  46
  47#include "mlx4.h"
  48#include "fw.h"
  49#include "mlx4_stats.h"
  50
  51#define MLX4_MAC_VALID          (1ull << 63)
  52#define MLX4_PF_COUNTERS_PER_PORT       2
  53#define MLX4_VF_COUNTERS_PER_PORT       1
  54
  55struct mac_res {
  56        struct list_head list;
  57        u64 mac;
  58        int ref_count;
  59        u8 smac_index;
  60        u8 port;
  61};
  62
  63struct vlan_res {
  64        struct list_head list;
  65        u16 vlan;
  66        int ref_count;
  67        int vlan_index;
  68        u8 port;
  69};
  70
  71struct res_common {
  72        struct list_head        list;
  73        struct rb_node          node;
  74        u64                     res_id;
  75        int                     owner;
  76        int                     state;
  77        int                     from_state;
  78        int                     to_state;
  79        int                     removing;
  80        const char              *func_name;
  81};
  82
  83enum {
  84        RES_ANY_BUSY = 1
  85};
  86
  87struct res_gid {
  88        struct list_head        list;
  89        u8                      gid[16];
  90        enum mlx4_protocol      prot;
  91        enum mlx4_steer_type    steer;
  92        u64                     reg_id;
  93};
  94
  95enum res_qp_states {
  96        RES_QP_BUSY = RES_ANY_BUSY,
  97
  98        /* QP number was allocated */
  99        RES_QP_RESERVED,
 100
 101        /* ICM memory for QP context was mapped */
 102        RES_QP_MAPPED,
 103
 104        /* QP is in hw ownership */
 105        RES_QP_HW
 106};
 107
 108struct res_qp {
 109        struct res_common       com;
 110        struct res_mtt         *mtt;
 111        struct res_cq          *rcq;
 112        struct res_cq          *scq;
 113        struct res_srq         *srq;
 114        struct list_head        mcg_list;
 115        spinlock_t              mcg_spl;
 116        int                     local_qpn;
 117        atomic_t                ref_count;
 118        u32                     qpc_flags;
 119        /* saved qp params before VST enforcement in order to restore on VGT */
 120        u8                      sched_queue;
 121        __be32                  param3;
 122        u8                      vlan_control;
 123        u8                      fvl_rx;
 124        u8                      pri_path_fl;
 125        u8                      vlan_index;
 126        u8                      feup;
 127};
 128
 129enum res_mtt_states {
 130        RES_MTT_BUSY = RES_ANY_BUSY,
 131        RES_MTT_ALLOCATED,
 132};
 133
 134static inline const char *mtt_states_str(enum res_mtt_states state)
 135{
 136        switch (state) {
 137        case RES_MTT_BUSY: return "RES_MTT_BUSY";
 138        case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
 139        default: return "Unknown";
 140        }
 141}
 142
 143struct res_mtt {
 144        struct res_common       com;
 145        int                     order;
 146        atomic_t                ref_count;
 147};
 148
 149enum res_mpt_states {
 150        RES_MPT_BUSY = RES_ANY_BUSY,
 151        RES_MPT_RESERVED,
 152        RES_MPT_MAPPED,
 153        RES_MPT_HW,
 154};
 155
 156struct res_mpt {
 157        struct res_common       com;
 158        struct res_mtt         *mtt;
 159        int                     key;
 160};
 161
 162enum res_eq_states {
 163        RES_EQ_BUSY = RES_ANY_BUSY,
 164        RES_EQ_RESERVED,
 165        RES_EQ_HW,
 166};
 167
 168struct res_eq {
 169        struct res_common       com;
 170        struct res_mtt         *mtt;
 171};
 172
 173enum res_cq_states {
 174        RES_CQ_BUSY = RES_ANY_BUSY,
 175        RES_CQ_ALLOCATED,
 176        RES_CQ_HW,
 177};
 178
 179struct res_cq {
 180        struct res_common       com;
 181        struct res_mtt         *mtt;
 182        atomic_t                ref_count;
 183};
 184
 185enum res_srq_states {
 186        RES_SRQ_BUSY = RES_ANY_BUSY,
 187        RES_SRQ_ALLOCATED,
 188        RES_SRQ_HW,
 189};
 190
 191struct res_srq {
 192        struct res_common       com;
 193        struct res_mtt         *mtt;
 194        struct res_cq          *cq;
 195        atomic_t                ref_count;
 196};
 197
 198enum res_counter_states {
 199        RES_COUNTER_BUSY = RES_ANY_BUSY,
 200        RES_COUNTER_ALLOCATED,
 201};
 202
 203struct res_counter {
 204        struct res_common       com;
 205        int                     port;
 206};
 207
 208enum res_xrcdn_states {
 209        RES_XRCD_BUSY = RES_ANY_BUSY,
 210        RES_XRCD_ALLOCATED,
 211};
 212
 213struct res_xrcdn {
 214        struct res_common       com;
 215        int                     port;
 216};
 217
 218enum res_fs_rule_states {
 219        RES_FS_RULE_BUSY = RES_ANY_BUSY,
 220        RES_FS_RULE_ALLOCATED,
 221};
 222
 223struct res_fs_rule {
 224        struct res_common       com;
 225        int                     qpn;
 226        /* VF DMFS mbox with port flipped */
 227        void                    *mirr_mbox;
 228        /* > 0 --> apply mirror when getting into HA mode      */
 229        /* = 0 --> un-apply mirror when getting out of HA mode */
 230        u32                     mirr_mbox_size;
 231        struct list_head        mirr_list;
 232        u64                     mirr_rule_id;
 233};
 234
 235static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
 236{
 237        struct rb_node *node = root->rb_node;
 238
 239        while (node) {
 240                struct res_common *res = rb_entry(node, struct res_common,
 241                                                  node);
 242
 243                if (res_id < res->res_id)
 244                        node = node->rb_left;
 245                else if (res_id > res->res_id)
 246                        node = node->rb_right;
 247                else
 248                        return res;
 249        }
 250        return NULL;
 251}
 252
 253static int res_tracker_insert(struct rb_root *root, struct res_common *res)
 254{
 255        struct rb_node **new = &(root->rb_node), *parent = NULL;
 256
 257        /* Figure out where to put new node */
 258        while (*new) {
 259                struct res_common *this = rb_entry(*new, struct res_common,
 260                                                   node);
 261
 262                parent = *new;
 263                if (res->res_id < this->res_id)
 264                        new = &((*new)->rb_left);
 265                else if (res->res_id > this->res_id)
 266                        new = &((*new)->rb_right);
 267                else
 268                        return -EEXIST;
 269        }
 270
 271        /* Add new node and rebalance tree. */
 272        rb_link_node(&res->node, parent, new);
 273        rb_insert_color(&res->node, root);
 274
 275        return 0;
 276}
 277
 278enum qp_transition {
 279        QP_TRANS_INIT2RTR,
 280        QP_TRANS_RTR2RTS,
 281        QP_TRANS_RTS2RTS,
 282        QP_TRANS_SQERR2RTS,
 283        QP_TRANS_SQD2SQD,
 284        QP_TRANS_SQD2RTS
 285};
 286
 287/* For Debug uses */
 288static const char *resource_str(enum mlx4_resource rt)
 289{
 290        switch (rt) {
 291        case RES_QP: return "RES_QP";
 292        case RES_CQ: return "RES_CQ";
 293        case RES_SRQ: return "RES_SRQ";
 294        case RES_MPT: return "RES_MPT";
 295        case RES_MTT: return "RES_MTT";
 296        case RES_MAC: return  "RES_MAC";
 297        case RES_VLAN: return  "RES_VLAN";
 298        case RES_EQ: return "RES_EQ";
 299        case RES_COUNTER: return "RES_COUNTER";
 300        case RES_FS_RULE: return "RES_FS_RULE";
 301        case RES_XRCD: return "RES_XRCD";
 302        default: return "Unknown resource type !!!";
 303        };
 304}
 305
 306static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
 307static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
 308                                      enum mlx4_resource res_type, int count,
 309                                      int port)
 310{
 311        struct mlx4_priv *priv = mlx4_priv(dev);
 312        struct resource_allocator *res_alloc =
 313                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 314        int err = -EDQUOT;
 315        int allocated, free, reserved, guaranteed, from_free;
 316        int from_rsvd;
 317
 318        if (slave > dev->persist->num_vfs)
 319                return -EINVAL;
 320
 321        spin_lock(&res_alloc->alloc_lock);
 322        allocated = (port > 0) ?
 323                res_alloc->allocated[(port - 1) *
 324                (dev->persist->num_vfs + 1) + slave] :
 325                res_alloc->allocated[slave];
 326        free = (port > 0) ? res_alloc->res_port_free[port - 1] :
 327                res_alloc->res_free;
 328        reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
 329                res_alloc->res_reserved;
 330        guaranteed = res_alloc->guaranteed[slave];
 331
 332        if (allocated + count > res_alloc->quota[slave]) {
 333                mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
 334                          slave, port, resource_str(res_type), count,
 335                          allocated, res_alloc->quota[slave]);
 336                goto out;
 337        }
 338
 339        if (allocated + count <= guaranteed) {
 340                err = 0;
 341                from_rsvd = count;
 342        } else {
 343                /* portion may need to be obtained from free area */
 344                if (guaranteed - allocated > 0)
 345                        from_free = count - (guaranteed - allocated);
 346                else
 347                        from_free = count;
 348
 349                from_rsvd = count - from_free;
 350
 351                if (free - from_free >= reserved)
 352                        err = 0;
 353                else
 354                        mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
 355                                  slave, port, resource_str(res_type), free,
 356                                  from_free, reserved);
 357        }
 358
 359        if (!err) {
 360                /* grant the request */
 361                if (port > 0) {
 362                        res_alloc->allocated[(port - 1) *
 363                        (dev->persist->num_vfs + 1) + slave] += count;
 364                        res_alloc->res_port_free[port - 1] -= count;
 365                        res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
 366                } else {
 367                        res_alloc->allocated[slave] += count;
 368                        res_alloc->res_free -= count;
 369                        res_alloc->res_reserved -= from_rsvd;
 370                }
 371        }
 372
 373out:
 374        spin_unlock(&res_alloc->alloc_lock);
 375        return err;
 376}
 377
 378static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
 379                                    enum mlx4_resource res_type, int count,
 380                                    int port)
 381{
 382        struct mlx4_priv *priv = mlx4_priv(dev);
 383        struct resource_allocator *res_alloc =
 384                &priv->mfunc.master.res_tracker.res_alloc[res_type];
 385        int allocated, guaranteed, from_rsvd;
 386
 387        if (slave > dev->persist->num_vfs)
 388                return;
 389
 390        spin_lock(&res_alloc->alloc_lock);
 391
 392        allocated = (port > 0) ?
 393                res_alloc->allocated[(port - 1) *
 394                (dev->persist->num_vfs + 1) + slave] :
 395                res_alloc->allocated[slave];
 396        guaranteed = res_alloc->guaranteed[slave];
 397
 398        if (allocated - count >= guaranteed) {
 399                from_rsvd = 0;
 400        } else {
 401                /* portion may need to be returned to reserved area */
 402                if (allocated - guaranteed > 0)
 403                        from_rsvd = count - (allocated - guaranteed);
 404                else
 405                        from_rsvd = count;
 406        }
 407
 408        if (port > 0) {
 409                res_alloc->allocated[(port - 1) *
 410                (dev->persist->num_vfs + 1) + slave] -= count;
 411                res_alloc->res_port_free[port - 1] += count;
 412                res_alloc->res_port_rsvd[port - 1] += from_rsvd;
 413        } else {
 414                res_alloc->allocated[slave] -= count;
 415                res_alloc->res_free += count;
 416                res_alloc->res_reserved += from_rsvd;
 417        }
 418
 419        spin_unlock(&res_alloc->alloc_lock);
 420        return;
 421}
 422
 423static inline void initialize_res_quotas(struct mlx4_dev *dev,
 424                                         struct resource_allocator *res_alloc,
 425                                         enum mlx4_resource res_type,
 426                                         int vf, int num_instances)
 427{
 428        res_alloc->guaranteed[vf] = num_instances /
 429                                    (2 * (dev->persist->num_vfs + 1));
 430        res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
 431        if (vf == mlx4_master_func_num(dev)) {
 432                res_alloc->res_free = num_instances;
 433                if (res_type == RES_MTT) {
 434                        /* reserved mtts will be taken out of the PF allocation */
 435                        res_alloc->res_free += dev->caps.reserved_mtts;
 436                        res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
 437                        res_alloc->quota[vf] += dev->caps.reserved_mtts;
 438                }
 439        }
 440}
 441
 442void mlx4_init_quotas(struct mlx4_dev *dev)
 443{
 444        struct mlx4_priv *priv = mlx4_priv(dev);
 445        int pf;
 446
 447        /* quotas for VFs are initialized in mlx4_slave_cap */
 448        if (mlx4_is_slave(dev))
 449                return;
 450
 451        if (!mlx4_is_mfunc(dev)) {
 452                dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
 453                        mlx4_num_reserved_sqps(dev);
 454                dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
 455                dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
 456                dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
 457                dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
 458                return;
 459        }
 460
 461        pf = mlx4_master_func_num(dev);
 462        dev->quotas.qp =
 463                priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
 464        dev->quotas.cq =
 465                priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
 466        dev->quotas.srq =
 467                priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
 468        dev->quotas.mtt =
 469                priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
 470        dev->quotas.mpt =
 471                priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
 472}
 473
 474static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
 475{
 476        /* reduce the sink counter */
 477        return (dev->caps.max_counters - 1 -
 478                (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
 479                / MLX4_MAX_PORTS;
 480}
 481
 482int mlx4_init_resource_tracker(struct mlx4_dev *dev)
 483{
 484        struct mlx4_priv *priv = mlx4_priv(dev);
 485        int i, j;
 486        int t;
 487        int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
 488
 489        priv->mfunc.master.res_tracker.slave_list =
 490                kzalloc(dev->num_slaves * sizeof(struct slave_list),
 491                        GFP_KERNEL);
 492        if (!priv->mfunc.master.res_tracker.slave_list)
 493                return -ENOMEM;
 494
 495        for (i = 0 ; i < dev->num_slaves; i++) {
 496                for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
 497                        INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
 498                                       slave_list[i].res_list[t]);
 499                mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 500        }
 501
 502        mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
 503                 dev->num_slaves);
 504        for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
 505                priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
 506
 507        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 508                struct resource_allocator *res_alloc =
 509                        &priv->mfunc.master.res_tracker.res_alloc[i];
 510                res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
 511                                           sizeof(int), GFP_KERNEL);
 512                res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
 513                                                sizeof(int), GFP_KERNEL);
 514                if (i == RES_MAC || i == RES_VLAN)
 515                        res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
 516                                                       (dev->persist->num_vfs
 517                                                       + 1) *
 518                                                       sizeof(int), GFP_KERNEL);
 519                else
 520                        res_alloc->allocated = kzalloc((dev->persist->
 521                                                        num_vfs + 1) *
 522                                                       sizeof(int), GFP_KERNEL);
 523                /* Reduce the sink counter */
 524                if (i == RES_COUNTER)
 525                        res_alloc->res_free = dev->caps.max_counters - 1;
 526
 527                if (!res_alloc->quota || !res_alloc->guaranteed ||
 528                    !res_alloc->allocated)
 529                        goto no_mem_err;
 530
 531                spin_lock_init(&res_alloc->alloc_lock);
 532                for (t = 0; t < dev->persist->num_vfs + 1; t++) {
 533                        struct mlx4_active_ports actv_ports =
 534                                mlx4_get_active_ports(dev, t);
 535                        switch (i) {
 536                        case RES_QP:
 537                                initialize_res_quotas(dev, res_alloc, RES_QP,
 538                                                      t, dev->caps.num_qps -
 539                                                      dev->caps.reserved_qps -
 540                                                      mlx4_num_reserved_sqps(dev));
 541                                break;
 542                        case RES_CQ:
 543                                initialize_res_quotas(dev, res_alloc, RES_CQ,
 544                                                      t, dev->caps.num_cqs -
 545                                                      dev->caps.reserved_cqs);
 546                                break;
 547                        case RES_SRQ:
 548                                initialize_res_quotas(dev, res_alloc, RES_SRQ,
 549                                                      t, dev->caps.num_srqs -
 550                                                      dev->caps.reserved_srqs);
 551                                break;
 552                        case RES_MPT:
 553                                initialize_res_quotas(dev, res_alloc, RES_MPT,
 554                                                      t, dev->caps.num_mpts -
 555                                                      dev->caps.reserved_mrws);
 556                                break;
 557                        case RES_MTT:
 558                                initialize_res_quotas(dev, res_alloc, RES_MTT,
 559                                                      t, dev->caps.num_mtts -
 560                                                      dev->caps.reserved_mtts);
 561                                break;
 562                        case RES_MAC:
 563                                if (t == mlx4_master_func_num(dev)) {
 564                                        int max_vfs_pport = 0;
 565                                        /* Calculate the max vfs per port for */
 566                                        /* both ports.                        */
 567                                        for (j = 0; j < dev->caps.num_ports;
 568                                             j++) {
 569                                                struct mlx4_slaves_pport slaves_pport =
 570                                                        mlx4_phys_to_slaves_pport(dev, j + 1);
 571                                                unsigned current_slaves =
 572                                                        bitmap_weight(slaves_pport.slaves,
 573                                                                      dev->caps.num_ports) - 1;
 574                                                if (max_vfs_pport < current_slaves)
 575                                                        max_vfs_pport =
 576                                                                current_slaves;
 577                                        }
 578                                        res_alloc->quota[t] =
 579                                                MLX4_MAX_MAC_NUM -
 580                                                2 * max_vfs_pport;
 581                                        res_alloc->guaranteed[t] = 2;
 582                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 583                                                res_alloc->res_port_free[j] =
 584                                                        MLX4_MAX_MAC_NUM;
 585                                } else {
 586                                        res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
 587                                        res_alloc->guaranteed[t] = 2;
 588                                }
 589                                break;
 590                        case RES_VLAN:
 591                                if (t == mlx4_master_func_num(dev)) {
 592                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
 593                                        res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
 594                                        for (j = 0; j < MLX4_MAX_PORTS; j++)
 595                                                res_alloc->res_port_free[j] =
 596                                                        res_alloc->quota[t];
 597                                } else {
 598                                        res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
 599                                        res_alloc->guaranteed[t] = 0;
 600                                }
 601                                break;
 602                        case RES_COUNTER:
 603                                res_alloc->quota[t] = dev->caps.max_counters;
 604                                if (t == mlx4_master_func_num(dev))
 605                                        res_alloc->guaranteed[t] =
 606                                                MLX4_PF_COUNTERS_PER_PORT *
 607                                                MLX4_MAX_PORTS;
 608                                else if (t <= max_vfs_guarantee_counter)
 609                                        res_alloc->guaranteed[t] =
 610                                                MLX4_VF_COUNTERS_PER_PORT *
 611                                                MLX4_MAX_PORTS;
 612                                else
 613                                        res_alloc->guaranteed[t] = 0;
 614                                res_alloc->res_free -= res_alloc->guaranteed[t];
 615                                break;
 616                        default:
 617                                break;
 618                        }
 619                        if (i == RES_MAC || i == RES_VLAN) {
 620                                for (j = 0; j < dev->caps.num_ports; j++)
 621                                        if (test_bit(j, actv_ports.ports))
 622                                                res_alloc->res_port_rsvd[j] +=
 623                                                        res_alloc->guaranteed[t];
 624                        } else {
 625                                res_alloc->res_reserved += res_alloc->guaranteed[t];
 626                        }
 627                }
 628        }
 629        spin_lock_init(&priv->mfunc.master.res_tracker.lock);
 630        return 0;
 631
 632no_mem_err:
 633        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 634                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 635                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 636                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 637                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 638                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 639                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 640        }
 641        return -ENOMEM;
 642}
 643
 644void mlx4_free_resource_tracker(struct mlx4_dev *dev,
 645                                enum mlx4_res_tracker_free_type type)
 646{
 647        struct mlx4_priv *priv = mlx4_priv(dev);
 648        int i;
 649
 650        if (priv->mfunc.master.res_tracker.slave_list) {
 651                if (type != RES_TR_FREE_STRUCTS_ONLY) {
 652                        for (i = 0; i < dev->num_slaves; i++) {
 653                                if (type == RES_TR_FREE_ALL ||
 654                                    dev->caps.function != i)
 655                                        mlx4_delete_all_resources_for_slave(dev, i);
 656                        }
 657                        /* free master's vlans */
 658                        i = dev->caps.function;
 659                        mlx4_reset_roce_gids(dev, i);
 660                        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 661                        rem_slave_vlans(dev, i);
 662                        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
 663                }
 664
 665                if (type != RES_TR_FREE_SLAVES_ONLY) {
 666                        for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
 667                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
 668                                priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
 669                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
 670                                priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
 671                                kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
 672                                priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
 673                        }
 674                        kfree(priv->mfunc.master.res_tracker.slave_list);
 675                        priv->mfunc.master.res_tracker.slave_list = NULL;
 676                }
 677        }
 678}
 679
 680static void update_pkey_index(struct mlx4_dev *dev, int slave,
 681                              struct mlx4_cmd_mailbox *inbox)
 682{
 683        u8 sched = *(u8 *)(inbox->buf + 64);
 684        u8 orig_index = *(u8 *)(inbox->buf + 35);
 685        u8 new_index;
 686        struct mlx4_priv *priv = mlx4_priv(dev);
 687        int port;
 688
 689        port = (sched >> 6 & 1) + 1;
 690
 691        new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
 692        *(u8 *)(inbox->buf + 35) = new_index;
 693}
 694
 695static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
 696                       u8 slave)
 697{
 698        struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
 699        enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
 700        u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
 701        int port;
 702
 703        if (MLX4_QP_ST_UD == ts) {
 704                port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 705                if (mlx4_is_eth(dev, port))
 706                        qp_ctx->pri_path.mgid_index =
 707                                mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
 708                else
 709                        qp_ctx->pri_path.mgid_index = slave | 0x80;
 710
 711        } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
 712                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
 713                        port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
 714                        if (mlx4_is_eth(dev, port)) {
 715                                qp_ctx->pri_path.mgid_index +=
 716                                        mlx4_get_base_gid_ix(dev, slave, port);
 717                                qp_ctx->pri_path.mgid_index &= 0x7f;
 718                        } else {
 719                                qp_ctx->pri_path.mgid_index = slave & 0x7F;
 720                        }
 721                }
 722                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
 723                        port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
 724                        if (mlx4_is_eth(dev, port)) {
 725                                qp_ctx->alt_path.mgid_index +=
 726                                        mlx4_get_base_gid_ix(dev, slave, port);
 727                                qp_ctx->alt_path.mgid_index &= 0x7f;
 728                        } else {
 729                                qp_ctx->alt_path.mgid_index = slave & 0x7F;
 730                        }
 731                }
 732        }
 733}
 734
 735static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
 736                          u8 slave, int port);
 737
 738static int update_vport_qp_param(struct mlx4_dev *dev,
 739                                 struct mlx4_cmd_mailbox *inbox,
 740                                 u8 slave, u32 qpn)
 741{
 742        struct mlx4_qp_context  *qpc = inbox->buf + 8;
 743        struct mlx4_vport_oper_state *vp_oper;
 744        struct mlx4_priv *priv;
 745        u32 qp_type;
 746        int port, err = 0;
 747
 748        port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
 749        priv = mlx4_priv(dev);
 750        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 751        qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
 752
 753        err = handle_counter(dev, qpc, slave, port);
 754        if (err)
 755                goto out;
 756
 757        if (MLX4_VGT != vp_oper->state.default_vlan) {
 758                /* the reserved QPs (special, proxy, tunnel)
 759                 * do not operate over vlans
 760                 */
 761                if (mlx4_is_qp_reserved(dev, qpn))
 762                        return 0;
 763
 764                /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
 765                if (qp_type == MLX4_QP_ST_UD ||
 766                    (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
 767                        if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
 768                                *(__be32 *)inbox->buf =
 769                                        cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
 770                                        MLX4_QP_OPTPAR_VLAN_STRIPPING);
 771                                qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
 772                        } else {
 773                                struct mlx4_update_qp_params params = {.flags = 0};
 774
 775                                err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
 776                                if (err)
 777                                        goto out;
 778                        }
 779                }
 780
 781                /* preserve IF_COUNTER flag */
 782                qpc->pri_path.vlan_control &=
 783                        MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
 784                if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
 785                    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
 786                        qpc->pri_path.vlan_control |=
 787                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 788                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 789                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
 790                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 791                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
 792                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 793                } else if (0 != vp_oper->state.default_vlan) {
 794                        if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
 795                                /* vst QinQ should block untagged on TX,
 796                                 * but cvlan is in payload and phv is set so
 797                                 * hw see it as untagged. Block tagged instead.
 798                                 */
 799                                qpc->pri_path.vlan_control |=
 800                                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
 801                                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 802                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 803                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 804                        } else { /* vst 802.1Q */
 805                                qpc->pri_path.vlan_control |=
 806                                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 807                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
 808                                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
 809                        }
 810                } else { /* priority tagged */
 811                        qpc->pri_path.vlan_control |=
 812                                MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
 813                                MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
 814                }
 815
 816                qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
 817                qpc->pri_path.vlan_index = vp_oper->vlan_idx;
 818                qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
 819                if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
 820                        qpc->pri_path.fl |= MLX4_FL_SV;
 821                else
 822                        qpc->pri_path.fl |= MLX4_FL_CV;
 823                qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
 824                qpc->pri_path.sched_queue &= 0xC7;
 825                qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
 826                qpc->qos_vport = vp_oper->state.qos_vport;
 827        }
 828        if (vp_oper->state.spoofchk) {
 829                qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
 830                qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
 831        }
 832out:
 833        return err;
 834}
 835
 836static int mpt_mask(struct mlx4_dev *dev)
 837{
 838        return dev->caps.num_mpts - 1;
 839}
 840
 841static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
 842{
 843        switch (t) {
 844        case RES_QP:
 845                return "QP";
 846        case RES_CQ:
 847                return "CQ";
 848        case RES_SRQ:
 849                return "SRQ";
 850        case RES_XRCD:
 851                return "XRCD";
 852        case RES_MPT:
 853                return "MPT";
 854        case RES_MTT:
 855                return "MTT";
 856        case RES_MAC:
 857                return "MAC";
 858        case RES_VLAN:
 859                return "VLAN";
 860        case RES_COUNTER:
 861                return "COUNTER";
 862        case RES_FS_RULE:
 863                return "FS_RULE";
 864        case RES_EQ:
 865                return "EQ";
 866        default:
 867                return "INVALID RESOURCE";
 868        }
 869}
 870
 871static void *find_res(struct mlx4_dev *dev, u64 res_id,
 872                      enum mlx4_resource type)
 873{
 874        struct mlx4_priv *priv = mlx4_priv(dev);
 875
 876        return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
 877                                  res_id);
 878}
 879
 880static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
 881                    enum mlx4_resource type,
 882                    void *res, const char *func_name)
 883{
 884        struct res_common *r;
 885        int err = 0;
 886
 887        spin_lock_irq(mlx4_tlock(dev));
 888        r = find_res(dev, res_id, type);
 889        if (!r) {
 890                err = -ENONET;
 891                goto exit;
 892        }
 893
 894        if (r->state == RES_ANY_BUSY) {
 895                mlx4_warn(dev,
 896                          "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
 897                          func_name, slave, res_id, mlx4_resource_type_to_str(type),
 898                          r->func_name);
 899                err = -EBUSY;
 900                goto exit;
 901        }
 902
 903        if (r->owner != slave) {
 904                err = -EPERM;
 905                goto exit;
 906        }
 907
 908        r->from_state = r->state;
 909        r->state = RES_ANY_BUSY;
 910        r->func_name = func_name;
 911
 912        if (res)
 913                *((struct res_common **)res) = r;
 914
 915exit:
 916        spin_unlock_irq(mlx4_tlock(dev));
 917        return err;
 918}
 919
 920#define get_res(dev, slave, res_id, type, res) \
 921        _get_res((dev), (slave), (res_id), (type), (res), __func__)
 922
 923int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
 924                                    enum mlx4_resource type,
 925                                    u64 res_id, int *slave)
 926{
 927
 928        struct res_common *r;
 929        int err = -ENOENT;
 930        int id = res_id;
 931
 932        if (type == RES_QP)
 933                id &= 0x7fffff;
 934        spin_lock(mlx4_tlock(dev));
 935
 936        r = find_res(dev, id, type);
 937        if (r) {
 938                *slave = r->owner;
 939                err = 0;
 940        }
 941        spin_unlock(mlx4_tlock(dev));
 942
 943        return err;
 944}
 945
 946static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
 947                    enum mlx4_resource type)
 948{
 949        struct res_common *r;
 950
 951        spin_lock_irq(mlx4_tlock(dev));
 952        r = find_res(dev, res_id, type);
 953        if (r) {
 954                r->state = r->from_state;
 955                r->func_name = "";
 956        }
 957        spin_unlock_irq(mlx4_tlock(dev));
 958}
 959
 960static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
 961                             u64 in_param, u64 *out_param, int port);
 962
 963static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
 964                                   int counter_index)
 965{
 966        struct res_common *r;
 967        struct res_counter *counter;
 968        int ret = 0;
 969
 970        if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
 971                return ret;
 972
 973        spin_lock_irq(mlx4_tlock(dev));
 974        r = find_res(dev, counter_index, RES_COUNTER);
 975        if (!r || r->owner != slave) {
 976                ret = -EINVAL;
 977        } else {
 978                counter = container_of(r, struct res_counter, com);
 979                if (!counter->port)
 980                        counter->port = port;
 981        }
 982
 983        spin_unlock_irq(mlx4_tlock(dev));
 984        return ret;
 985}
 986
 987static int handle_unexisting_counter(struct mlx4_dev *dev,
 988                                     struct mlx4_qp_context *qpc, u8 slave,
 989                                     int port)
 990{
 991        struct mlx4_priv *priv = mlx4_priv(dev);
 992        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
 993        struct res_common *tmp;
 994        struct res_counter *counter;
 995        u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
 996        int err = 0;
 997
 998        spin_lock_irq(mlx4_tlock(dev));
 999        list_for_each_entry(tmp,
1000                            &tracker->slave_list[slave].res_list[RES_COUNTER],
1001                            list) {
1002                counter = container_of(tmp, struct res_counter, com);
1003                if (port == counter->port) {
1004                        qpc->pri_path.counter_index  = counter->com.res_id;
1005                        spin_unlock_irq(mlx4_tlock(dev));
1006                        return 0;
1007                }
1008        }
1009        spin_unlock_irq(mlx4_tlock(dev));
1010
1011        /* No existing counter, need to allocate a new counter */
1012        err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
1013                                port);
1014        if (err == -ENOENT) {
1015                err = 0;
1016        } else if (err && err != -ENOSPC) {
1017                mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
1018                         __func__, slave, err);
1019        } else {
1020                qpc->pri_path.counter_index = counter_idx;
1021                mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
1022                         __func__, slave, qpc->pri_path.counter_index);
1023                err = 0;
1024        }
1025
1026        return err;
1027}
1028
1029static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1030                          u8 slave, int port)
1031{
1032        if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1033                return handle_existing_counter(dev, slave, port,
1034                                               qpc->pri_path.counter_index);
1035
1036        return handle_unexisting_counter(dev, qpc, slave, port);
1037}
1038
1039static struct res_common *alloc_qp_tr(int id)
1040{
1041        struct res_qp *ret;
1042
1043        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1044        if (!ret)
1045                return NULL;
1046
1047        ret->com.res_id = id;
1048        ret->com.state = RES_QP_RESERVED;
1049        ret->local_qpn = id;
1050        INIT_LIST_HEAD(&ret->mcg_list);
1051        spin_lock_init(&ret->mcg_spl);
1052        atomic_set(&ret->ref_count, 0);
1053
1054        return &ret->com;
1055}
1056
1057static struct res_common *alloc_mtt_tr(int id, int order)
1058{
1059        struct res_mtt *ret;
1060
1061        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1062        if (!ret)
1063                return NULL;
1064
1065        ret->com.res_id = id;
1066        ret->order = order;
1067        ret->com.state = RES_MTT_ALLOCATED;
1068        atomic_set(&ret->ref_count, 0);
1069
1070        return &ret->com;
1071}
1072
1073static struct res_common *alloc_mpt_tr(int id, int key)
1074{
1075        struct res_mpt *ret;
1076
1077        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1078        if (!ret)
1079                return NULL;
1080
1081        ret->com.res_id = id;
1082        ret->com.state = RES_MPT_RESERVED;
1083        ret->key = key;
1084
1085        return &ret->com;
1086}
1087
1088static struct res_common *alloc_eq_tr(int id)
1089{
1090        struct res_eq *ret;
1091
1092        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1093        if (!ret)
1094                return NULL;
1095
1096        ret->com.res_id = id;
1097        ret->com.state = RES_EQ_RESERVED;
1098
1099        return &ret->com;
1100}
1101
1102static struct res_common *alloc_cq_tr(int id)
1103{
1104        struct res_cq *ret;
1105
1106        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1107        if (!ret)
1108                return NULL;
1109
1110        ret->com.res_id = id;
1111        ret->com.state = RES_CQ_ALLOCATED;
1112        atomic_set(&ret->ref_count, 0);
1113
1114        return &ret->com;
1115}
1116
1117static struct res_common *alloc_srq_tr(int id)
1118{
1119        struct res_srq *ret;
1120
1121        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1122        if (!ret)
1123                return NULL;
1124
1125        ret->com.res_id = id;
1126        ret->com.state = RES_SRQ_ALLOCATED;
1127        atomic_set(&ret->ref_count, 0);
1128
1129        return &ret->com;
1130}
1131
1132static struct res_common *alloc_counter_tr(int id, int port)
1133{
1134        struct res_counter *ret;
1135
1136        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1137        if (!ret)
1138                return NULL;
1139
1140        ret->com.res_id = id;
1141        ret->com.state = RES_COUNTER_ALLOCATED;
1142        ret->port = port;
1143
1144        return &ret->com;
1145}
1146
1147static struct res_common *alloc_xrcdn_tr(int id)
1148{
1149        struct res_xrcdn *ret;
1150
1151        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1152        if (!ret)
1153                return NULL;
1154
1155        ret->com.res_id = id;
1156        ret->com.state = RES_XRCD_ALLOCATED;
1157
1158        return &ret->com;
1159}
1160
1161static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1162{
1163        struct res_fs_rule *ret;
1164
1165        ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1166        if (!ret)
1167                return NULL;
1168
1169        ret->com.res_id = id;
1170        ret->com.state = RES_FS_RULE_ALLOCATED;
1171        ret->qpn = qpn;
1172        return &ret->com;
1173}
1174
1175static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1176                                   int extra)
1177{
1178        struct res_common *ret;
1179
1180        switch (type) {
1181        case RES_QP:
1182                ret = alloc_qp_tr(id);
1183                break;
1184        case RES_MPT:
1185                ret = alloc_mpt_tr(id, extra);
1186                break;
1187        case RES_MTT:
1188                ret = alloc_mtt_tr(id, extra);
1189                break;
1190        case RES_EQ:
1191                ret = alloc_eq_tr(id);
1192                break;
1193        case RES_CQ:
1194                ret = alloc_cq_tr(id);
1195                break;
1196        case RES_SRQ:
1197                ret = alloc_srq_tr(id);
1198                break;
1199        case RES_MAC:
1200                pr_err("implementation missing\n");
1201                return NULL;
1202        case RES_COUNTER:
1203                ret = alloc_counter_tr(id, extra);
1204                break;
1205        case RES_XRCD:
1206                ret = alloc_xrcdn_tr(id);
1207                break;
1208        case RES_FS_RULE:
1209                ret = alloc_fs_rule_tr(id, extra);
1210                break;
1211        default:
1212                return NULL;
1213        }
1214        if (ret)
1215                ret->owner = slave;
1216
1217        return ret;
1218}
1219
1220int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1221                          struct mlx4_counter *data)
1222{
1223        struct mlx4_priv *priv = mlx4_priv(dev);
1224        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1225        struct res_common *tmp;
1226        struct res_counter *counter;
1227        int *counters_arr;
1228        int i = 0, err = 0;
1229
1230        memset(data, 0, sizeof(*data));
1231
1232        counters_arr = kmalloc_array(dev->caps.max_counters,
1233                                     sizeof(*counters_arr), GFP_KERNEL);
1234        if (!counters_arr)
1235                return -ENOMEM;
1236
1237        spin_lock_irq(mlx4_tlock(dev));
1238        list_for_each_entry(tmp,
1239                            &tracker->slave_list[slave].res_list[RES_COUNTER],
1240                            list) {
1241                counter = container_of(tmp, struct res_counter, com);
1242                if (counter->port == port) {
1243                        counters_arr[i] = (int)tmp->res_id;
1244                        i++;
1245                }
1246        }
1247        spin_unlock_irq(mlx4_tlock(dev));
1248        counters_arr[i] = -1;
1249
1250        i = 0;
1251
1252        while (counters_arr[i] != -1) {
1253                err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1254                                             0);
1255                if (err) {
1256                        memset(data, 0, sizeof(*data));
1257                        goto table_changed;
1258                }
1259                i++;
1260        }
1261
1262table_changed:
1263        kfree(counters_arr);
1264        return 0;
1265}
1266
1267static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1268                         enum mlx4_resource type, int extra)
1269{
1270        int i;
1271        int err;
1272        struct mlx4_priv *priv = mlx4_priv(dev);
1273        struct res_common **res_arr;
1274        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1275        struct rb_root *root = &tracker->res_tree[type];
1276
1277        res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL);
1278        if (!res_arr)
1279                return -ENOMEM;
1280
1281        for (i = 0; i < count; ++i) {
1282                res_arr[i] = alloc_tr(base + i, type, slave, extra);
1283                if (!res_arr[i]) {
1284                        for (--i; i >= 0; --i)
1285                                kfree(res_arr[i]);
1286
1287                        kfree(res_arr);
1288                        return -ENOMEM;
1289                }
1290        }
1291
1292        spin_lock_irq(mlx4_tlock(dev));
1293        for (i = 0; i < count; ++i) {
1294                if (find_res(dev, base + i, type)) {
1295                        err = -EEXIST;
1296                        goto undo;
1297                }
1298                err = res_tracker_insert(root, res_arr[i]);
1299                if (err)
1300                        goto undo;
1301                list_add_tail(&res_arr[i]->list,
1302                              &tracker->slave_list[slave].res_list[type]);
1303        }
1304        spin_unlock_irq(mlx4_tlock(dev));
1305        kfree(res_arr);
1306
1307        return 0;
1308
1309undo:
1310        for (--i; i >= 0; --i) {
1311                rb_erase(&res_arr[i]->node, root);
1312                list_del_init(&res_arr[i]->list);
1313        }
1314
1315        spin_unlock_irq(mlx4_tlock(dev));
1316
1317        for (i = 0; i < count; ++i)
1318                kfree(res_arr[i]);
1319
1320        kfree(res_arr);
1321
1322        return err;
1323}
1324
1325static int remove_qp_ok(struct res_qp *res)
1326{
1327        if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1328            !list_empty(&res->mcg_list)) {
1329                pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1330                       res->com.state, atomic_read(&res->ref_count));
1331                return -EBUSY;
1332        } else if (res->com.state != RES_QP_RESERVED) {
1333                return -EPERM;
1334        }
1335
1336        return 0;
1337}
1338
1339static int remove_mtt_ok(struct res_mtt *res, int order)
1340{
1341        if (res->com.state == RES_MTT_BUSY ||
1342            atomic_read(&res->ref_count)) {
1343                pr_devel("%s-%d: state %s, ref_count %d\n",
1344                         __func__, __LINE__,
1345                         mtt_states_str(res->com.state),
1346                         atomic_read(&res->ref_count));
1347                return -EBUSY;
1348        } else if (res->com.state != RES_MTT_ALLOCATED)
1349                return -EPERM;
1350        else if (res->order != order)
1351                return -EINVAL;
1352
1353        return 0;
1354}
1355
1356static int remove_mpt_ok(struct res_mpt *res)
1357{
1358        if (res->com.state == RES_MPT_BUSY)
1359                return -EBUSY;
1360        else if (res->com.state != RES_MPT_RESERVED)
1361                return -EPERM;
1362
1363        return 0;
1364}
1365
1366static int remove_eq_ok(struct res_eq *res)
1367{
1368        if (res->com.state == RES_MPT_BUSY)
1369                return -EBUSY;
1370        else if (res->com.state != RES_MPT_RESERVED)
1371                return -EPERM;
1372
1373        return 0;
1374}
1375
1376static int remove_counter_ok(struct res_counter *res)
1377{
1378        if (res->com.state == RES_COUNTER_BUSY)
1379                return -EBUSY;
1380        else if (res->com.state != RES_COUNTER_ALLOCATED)
1381                return -EPERM;
1382
1383        return 0;
1384}
1385
1386static int remove_xrcdn_ok(struct res_xrcdn *res)
1387{
1388        if (res->com.state == RES_XRCD_BUSY)
1389                return -EBUSY;
1390        else if (res->com.state != RES_XRCD_ALLOCATED)
1391                return -EPERM;
1392
1393        return 0;
1394}
1395
1396static int remove_fs_rule_ok(struct res_fs_rule *res)
1397{
1398        if (res->com.state == RES_FS_RULE_BUSY)
1399                return -EBUSY;
1400        else if (res->com.state != RES_FS_RULE_ALLOCATED)
1401                return -EPERM;
1402
1403        return 0;
1404}
1405
1406static int remove_cq_ok(struct res_cq *res)
1407{
1408        if (res->com.state == RES_CQ_BUSY)
1409                return -EBUSY;
1410        else if (res->com.state != RES_CQ_ALLOCATED)
1411                return -EPERM;
1412
1413        return 0;
1414}
1415
1416static int remove_srq_ok(struct res_srq *res)
1417{
1418        if (res->com.state == RES_SRQ_BUSY)
1419                return -EBUSY;
1420        else if (res->com.state != RES_SRQ_ALLOCATED)
1421                return -EPERM;
1422
1423        return 0;
1424}
1425
1426static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1427{
1428        switch (type) {
1429        case RES_QP:
1430                return remove_qp_ok((struct res_qp *)res);
1431        case RES_CQ:
1432                return remove_cq_ok((struct res_cq *)res);
1433        case RES_SRQ:
1434                return remove_srq_ok((struct res_srq *)res);
1435        case RES_MPT:
1436                return remove_mpt_ok((struct res_mpt *)res);
1437        case RES_MTT:
1438                return remove_mtt_ok((struct res_mtt *)res, extra);
1439        case RES_MAC:
1440                return -EOPNOTSUPP;
1441        case RES_EQ:
1442                return remove_eq_ok((struct res_eq *)res);
1443        case RES_COUNTER:
1444                return remove_counter_ok((struct res_counter *)res);
1445        case RES_XRCD:
1446                return remove_xrcdn_ok((struct res_xrcdn *)res);
1447        case RES_FS_RULE:
1448                return remove_fs_rule_ok((struct res_fs_rule *)res);
1449        default:
1450                return -EINVAL;
1451        }
1452}
1453
1454static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1455                         enum mlx4_resource type, int extra)
1456{
1457        u64 i;
1458        int err;
1459        struct mlx4_priv *priv = mlx4_priv(dev);
1460        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1461        struct res_common *r;
1462
1463        spin_lock_irq(mlx4_tlock(dev));
1464        for (i = base; i < base + count; ++i) {
1465                r = res_tracker_lookup(&tracker->res_tree[type], i);
1466                if (!r) {
1467                        err = -ENOENT;
1468                        goto out;
1469                }
1470                if (r->owner != slave) {
1471                        err = -EPERM;
1472                        goto out;
1473                }
1474                err = remove_ok(r, type, extra);
1475                if (err)
1476                        goto out;
1477        }
1478
1479        for (i = base; i < base + count; ++i) {
1480                r = res_tracker_lookup(&tracker->res_tree[type], i);
1481                rb_erase(&r->node, &tracker->res_tree[type]);
1482                list_del(&r->list);
1483                kfree(r);
1484        }
1485        err = 0;
1486
1487out:
1488        spin_unlock_irq(mlx4_tlock(dev));
1489
1490        return err;
1491}
1492
1493static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1494                                enum res_qp_states state, struct res_qp **qp,
1495                                int alloc)
1496{
1497        struct mlx4_priv *priv = mlx4_priv(dev);
1498        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1499        struct res_qp *r;
1500        int err = 0;
1501
1502        spin_lock_irq(mlx4_tlock(dev));
1503        r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1504        if (!r)
1505                err = -ENOENT;
1506        else if (r->com.owner != slave)
1507                err = -EPERM;
1508        else {
1509                switch (state) {
1510                case RES_QP_BUSY:
1511                        mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1512                                 __func__, r->com.res_id);
1513                        err = -EBUSY;
1514                        break;
1515
1516                case RES_QP_RESERVED:
1517                        if (r->com.state == RES_QP_MAPPED && !alloc)
1518                                break;
1519
1520                        mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1521                        err = -EINVAL;
1522                        break;
1523
1524                case RES_QP_MAPPED:
1525                        if ((r->com.state == RES_QP_RESERVED && alloc) ||
1526                            r->com.state == RES_QP_HW)
1527                                break;
1528                        else {
1529                                mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1530                                          r->com.res_id);
1531                                err = -EINVAL;
1532                        }
1533
1534                        break;
1535
1536                case RES_QP_HW:
1537                        if (r->com.state != RES_QP_MAPPED)
1538                                err = -EINVAL;
1539                        break;
1540                default:
1541                        err = -EINVAL;
1542                }
1543
1544                if (!err) {
1545                        r->com.from_state = r->com.state;
1546                        r->com.to_state = state;
1547                        r->com.state = RES_QP_BUSY;
1548                        if (qp)
1549                                *qp = r;
1550                }
1551        }
1552
1553        spin_unlock_irq(mlx4_tlock(dev));
1554
1555        return err;
1556}
1557
1558static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1559                                enum res_mpt_states state, struct res_mpt **mpt)
1560{
1561        struct mlx4_priv *priv = mlx4_priv(dev);
1562        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1563        struct res_mpt *r;
1564        int err = 0;
1565
1566        spin_lock_irq(mlx4_tlock(dev));
1567        r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1568        if (!r)
1569                err = -ENOENT;
1570        else if (r->com.owner != slave)
1571                err = -EPERM;
1572        else {
1573                switch (state) {
1574                case RES_MPT_BUSY:
1575                        err = -EINVAL;
1576                        break;
1577
1578                case RES_MPT_RESERVED:
1579                        if (r->com.state != RES_MPT_MAPPED)
1580                                err = -EINVAL;
1581                        break;
1582
1583                case RES_MPT_MAPPED:
1584                        if (r->com.state != RES_MPT_RESERVED &&
1585                            r->com.state != RES_MPT_HW)
1586                                err = -EINVAL;
1587                        break;
1588
1589                case RES_MPT_HW:
1590                        if (r->com.state != RES_MPT_MAPPED)
1591                                err = -EINVAL;
1592                        break;
1593                default:
1594                        err = -EINVAL;
1595                }
1596
1597                if (!err) {
1598                        r->com.from_state = r->com.state;
1599                        r->com.to_state = state;
1600                        r->com.state = RES_MPT_BUSY;
1601                        if (mpt)
1602                                *mpt = r;
1603                }
1604        }
1605
1606        spin_unlock_irq(mlx4_tlock(dev));
1607
1608        return err;
1609}
1610
1611static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1612                                enum res_eq_states state, struct res_eq **eq)
1613{
1614        struct mlx4_priv *priv = mlx4_priv(dev);
1615        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1616        struct res_eq *r;
1617        int err = 0;
1618
1619        spin_lock_irq(mlx4_tlock(dev));
1620        r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1621        if (!r)
1622                err = -ENOENT;
1623        else if (r->com.owner != slave)
1624                err = -EPERM;
1625        else {
1626                switch (state) {
1627                case RES_EQ_BUSY:
1628                        err = -EINVAL;
1629                        break;
1630
1631                case RES_EQ_RESERVED:
1632                        if (r->com.state != RES_EQ_HW)
1633                                err = -EINVAL;
1634                        break;
1635
1636                case RES_EQ_HW:
1637                        if (r->com.state != RES_EQ_RESERVED)
1638                                err = -EINVAL;
1639                        break;
1640
1641                default:
1642                        err = -EINVAL;
1643                }
1644
1645                if (!err) {
1646                        r->com.from_state = r->com.state;
1647                        r->com.to_state = state;
1648                        r->com.state = RES_EQ_BUSY;
1649                }
1650        }
1651
1652        spin_unlock_irq(mlx4_tlock(dev));
1653
1654        if (!err && eq)
1655                *eq = r;
1656
1657        return err;
1658}
1659
1660static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1661                                enum res_cq_states state, struct res_cq **cq)
1662{
1663        struct mlx4_priv *priv = mlx4_priv(dev);
1664        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1665        struct res_cq *r;
1666        int err;
1667
1668        spin_lock_irq(mlx4_tlock(dev));
1669        r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1670        if (!r) {
1671                err = -ENOENT;
1672        } else if (r->com.owner != slave) {
1673                err = -EPERM;
1674        } else if (state == RES_CQ_ALLOCATED) {
1675                if (r->com.state != RES_CQ_HW)
1676                        err = -EINVAL;
1677                else if (atomic_read(&r->ref_count))
1678                        err = -EBUSY;
1679                else
1680                        err = 0;
1681        } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1682                err = -EINVAL;
1683        } else {
1684                err = 0;
1685        }
1686
1687        if (!err) {
1688                r->com.from_state = r->com.state;
1689                r->com.to_state = state;
1690                r->com.state = RES_CQ_BUSY;
1691                if (cq)
1692                        *cq = r;
1693        }
1694
1695        spin_unlock_irq(mlx4_tlock(dev));
1696
1697        return err;
1698}
1699
1700static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1701                                 enum res_srq_states state, struct res_srq **srq)
1702{
1703        struct mlx4_priv *priv = mlx4_priv(dev);
1704        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1705        struct res_srq *r;
1706        int err = 0;
1707
1708        spin_lock_irq(mlx4_tlock(dev));
1709        r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1710        if (!r) {
1711                err = -ENOENT;
1712        } else if (r->com.owner != slave) {
1713                err = -EPERM;
1714        } else if (state == RES_SRQ_ALLOCATED) {
1715                if (r->com.state != RES_SRQ_HW)
1716                        err = -EINVAL;
1717                else if (atomic_read(&r->ref_count))
1718                        err = -EBUSY;
1719        } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1720                err = -EINVAL;
1721        }
1722
1723        if (!err) {
1724                r->com.from_state = r->com.state;
1725                r->com.to_state = state;
1726                r->com.state = RES_SRQ_BUSY;
1727                if (srq)
1728                        *srq = r;
1729        }
1730
1731        spin_unlock_irq(mlx4_tlock(dev));
1732
1733        return err;
1734}
1735
1736static void res_abort_move(struct mlx4_dev *dev, int slave,
1737                           enum mlx4_resource type, int id)
1738{
1739        struct mlx4_priv *priv = mlx4_priv(dev);
1740        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1741        struct res_common *r;
1742
1743        spin_lock_irq(mlx4_tlock(dev));
1744        r = res_tracker_lookup(&tracker->res_tree[type], id);
1745        if (r && (r->owner == slave))
1746                r->state = r->from_state;
1747        spin_unlock_irq(mlx4_tlock(dev));
1748}
1749
1750static void res_end_move(struct mlx4_dev *dev, int slave,
1751                         enum mlx4_resource type, int id)
1752{
1753        struct mlx4_priv *priv = mlx4_priv(dev);
1754        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1755        struct res_common *r;
1756
1757        spin_lock_irq(mlx4_tlock(dev));
1758        r = res_tracker_lookup(&tracker->res_tree[type], id);
1759        if (r && (r->owner == slave))
1760                r->state = r->to_state;
1761        spin_unlock_irq(mlx4_tlock(dev));
1762}
1763
1764static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1765{
1766        return mlx4_is_qp_reserved(dev, qpn) &&
1767                (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1768}
1769
1770static int fw_reserved(struct mlx4_dev *dev, int qpn)
1771{
1772        return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1773}
1774
1775static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1776                        u64 in_param, u64 *out_param)
1777{
1778        int err;
1779        int count;
1780        int align;
1781        int base;
1782        int qpn;
1783        u8 flags;
1784
1785        switch (op) {
1786        case RES_OP_RESERVE:
1787                count = get_param_l(&in_param) & 0xffffff;
1788                /* Turn off all unsupported QP allocation flags that the
1789                 * slave tries to set.
1790                 */
1791                flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1792                align = get_param_h(&in_param);
1793                err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1794                if (err)
1795                        return err;
1796
1797                err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1798                if (err) {
1799                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1800                        return err;
1801                }
1802
1803                err = add_res_range(dev, slave, base, count, RES_QP, 0);
1804                if (err) {
1805                        mlx4_release_resource(dev, slave, RES_QP, count, 0);
1806                        __mlx4_qp_release_range(dev, base, count);
1807                        return err;
1808                }
1809                set_param_l(out_param, base);
1810                break;
1811        case RES_OP_MAP_ICM:
1812                qpn = get_param_l(&in_param) & 0x7fffff;
1813                if (valid_reserved(dev, slave, qpn)) {
1814                        err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1815                        if (err)
1816                                return err;
1817                }
1818
1819                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1820                                           NULL, 1);
1821                if (err)
1822                        return err;
1823
1824                if (!fw_reserved(dev, qpn)) {
1825                        err = __mlx4_qp_alloc_icm(dev, qpn);
1826                        if (err) {
1827                                res_abort_move(dev, slave, RES_QP, qpn);
1828                                return err;
1829                        }
1830                }
1831
1832                res_end_move(dev, slave, RES_QP, qpn);
1833                break;
1834
1835        default:
1836                err = -EINVAL;
1837                break;
1838        }
1839        return err;
1840}
1841
1842static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1843                         u64 in_param, u64 *out_param)
1844{
1845        int err = -EINVAL;
1846        int base;
1847        int order;
1848
1849        if (op != RES_OP_RESERVE_AND_MAP)
1850                return err;
1851
1852        order = get_param_l(&in_param);
1853
1854        err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1855        if (err)
1856                return err;
1857
1858        base = __mlx4_alloc_mtt_range(dev, order);
1859        if (base == -1) {
1860                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1861                return -ENOMEM;
1862        }
1863
1864        err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1865        if (err) {
1866                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1867                __mlx4_free_mtt_range(dev, base, order);
1868        } else {
1869                set_param_l(out_param, base);
1870        }
1871
1872        return err;
1873}
1874
1875static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1876                         u64 in_param, u64 *out_param)
1877{
1878        int err = -EINVAL;
1879        int index;
1880        int id;
1881        struct res_mpt *mpt;
1882
1883        switch (op) {
1884        case RES_OP_RESERVE:
1885                err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1886                if (err)
1887                        break;
1888
1889                index = __mlx4_mpt_reserve(dev);
1890                if (index == -1) {
1891                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1892                        break;
1893                }
1894                id = index & mpt_mask(dev);
1895
1896                err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1897                if (err) {
1898                        mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1899                        __mlx4_mpt_release(dev, index);
1900                        break;
1901                }
1902                set_param_l(out_param, index);
1903                break;
1904        case RES_OP_MAP_ICM:
1905                index = get_param_l(&in_param);
1906                id = index & mpt_mask(dev);
1907                err = mr_res_start_move_to(dev, slave, id,
1908                                           RES_MPT_MAPPED, &mpt);
1909                if (err)
1910                        return err;
1911
1912                err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1913                if (err) {
1914                        res_abort_move(dev, slave, RES_MPT, id);
1915                        return err;
1916                }
1917
1918                res_end_move(dev, slave, RES_MPT, id);
1919                break;
1920        }
1921        return err;
1922}
1923
1924static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1925                        u64 in_param, u64 *out_param)
1926{
1927        int cqn;
1928        int err;
1929
1930        switch (op) {
1931        case RES_OP_RESERVE_AND_MAP:
1932                err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1933                if (err)
1934                        break;
1935
1936                err = __mlx4_cq_alloc_icm(dev, &cqn);
1937                if (err) {
1938                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1939                        break;
1940                }
1941
1942                err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1943                if (err) {
1944                        mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1945                        __mlx4_cq_free_icm(dev, cqn);
1946                        break;
1947                }
1948
1949                set_param_l(out_param, cqn);
1950                break;
1951
1952        default:
1953                err = -EINVAL;
1954        }
1955
1956        return err;
1957}
1958
1959static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1960                         u64 in_param, u64 *out_param)
1961{
1962        int srqn;
1963        int err;
1964
1965        switch (op) {
1966        case RES_OP_RESERVE_AND_MAP:
1967                err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1968                if (err)
1969                        break;
1970
1971                err = __mlx4_srq_alloc_icm(dev, &srqn);
1972                if (err) {
1973                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1974                        break;
1975                }
1976
1977                err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1978                if (err) {
1979                        mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1980                        __mlx4_srq_free_icm(dev, srqn);
1981                        break;
1982                }
1983
1984                set_param_l(out_param, srqn);
1985                break;
1986
1987        default:
1988                err = -EINVAL;
1989        }
1990
1991        return err;
1992}
1993
1994static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1995                                     u8 smac_index, u64 *mac)
1996{
1997        struct mlx4_priv *priv = mlx4_priv(dev);
1998        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1999        struct list_head *mac_list =
2000                &tracker->slave_list[slave].res_list[RES_MAC];
2001        struct mac_res *res, *tmp;
2002
2003        list_for_each_entry_safe(res, tmp, mac_list, list) {
2004                if (res->smac_index == smac_index && res->port == (u8) port) {
2005                        *mac = res->mac;
2006                        return 0;
2007                }
2008        }
2009        return -ENOENT;
2010}
2011
2012static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
2013{
2014        struct mlx4_priv *priv = mlx4_priv(dev);
2015        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2016        struct list_head *mac_list =
2017                &tracker->slave_list[slave].res_list[RES_MAC];
2018        struct mac_res *res, *tmp;
2019
2020        list_for_each_entry_safe(res, tmp, mac_list, list) {
2021                if (res->mac == mac && res->port == (u8) port) {
2022                        /* mac found. update ref count */
2023                        ++res->ref_count;
2024                        return 0;
2025                }
2026        }
2027
2028        if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
2029                return -EINVAL;
2030        res = kzalloc(sizeof(*res), GFP_KERNEL);
2031        if (!res) {
2032                mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2033                return -ENOMEM;
2034        }
2035        res->mac = mac;
2036        res->port = (u8) port;
2037        res->smac_index = smac_index;
2038        res->ref_count = 1;
2039        list_add_tail(&res->list,
2040                      &tracker->slave_list[slave].res_list[RES_MAC]);
2041        return 0;
2042}
2043
2044static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2045                               int port)
2046{
2047        struct mlx4_priv *priv = mlx4_priv(dev);
2048        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2049        struct list_head *mac_list =
2050                &tracker->slave_list[slave].res_list[RES_MAC];
2051        struct mac_res *res, *tmp;
2052
2053        list_for_each_entry_safe(res, tmp, mac_list, list) {
2054                if (res->mac == mac && res->port == (u8) port) {
2055                        if (!--res->ref_count) {
2056                                list_del(&res->list);
2057                                mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2058                                kfree(res);
2059                        }
2060                        break;
2061                }
2062        }
2063}
2064
2065static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2066{
2067        struct mlx4_priv *priv = mlx4_priv(dev);
2068        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2069        struct list_head *mac_list =
2070                &tracker->slave_list[slave].res_list[RES_MAC];
2071        struct mac_res *res, *tmp;
2072        int i;
2073
2074        list_for_each_entry_safe(res, tmp, mac_list, list) {
2075                list_del(&res->list);
2076                /* dereference the mac the num times the slave referenced it */
2077                for (i = 0; i < res->ref_count; i++)
2078                        __mlx4_unregister_mac(dev, res->port, res->mac);
2079                mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2080                kfree(res);
2081        }
2082}
2083
2084static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2085                         u64 in_param, u64 *out_param, int in_port)
2086{
2087        int err = -EINVAL;
2088        int port;
2089        u64 mac;
2090        u8 smac_index;
2091
2092        if (op != RES_OP_RESERVE_AND_MAP)
2093                return err;
2094
2095        port = !in_port ? get_param_l(out_param) : in_port;
2096        port = mlx4_slave_convert_port(
2097                        dev, slave, port);
2098
2099        if (port < 0)
2100                return -EINVAL;
2101        mac = in_param;
2102
2103        err = __mlx4_register_mac(dev, port, mac);
2104        if (err >= 0) {
2105                smac_index = err;
2106                set_param_l(out_param, err);
2107                err = 0;
2108        }
2109
2110        if (!err) {
2111                err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2112                if (err)
2113                        __mlx4_unregister_mac(dev, port, mac);
2114        }
2115        return err;
2116}
2117
2118static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2119                             int port, int vlan_index)
2120{
2121        struct mlx4_priv *priv = mlx4_priv(dev);
2122        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2123        struct list_head *vlan_list =
2124                &tracker->slave_list[slave].res_list[RES_VLAN];
2125        struct vlan_res *res, *tmp;
2126
2127        list_for_each_entry_safe(res, tmp, vlan_list, list) {
2128                if (res->vlan == vlan && res->port == (u8) port) {
2129                        /* vlan found. update ref count */
2130                        ++res->ref_count;
2131                        return 0;
2132                }
2133        }
2134
2135        if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2136                return -EINVAL;
2137        res = kzalloc(sizeof(*res), GFP_KERNEL);
2138        if (!res) {
2139                mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2140                return -ENOMEM;
2141        }
2142        res->vlan = vlan;
2143        res->port = (u8) port;
2144        res->vlan_index = vlan_index;
2145        res->ref_count = 1;
2146        list_add_tail(&res->list,
2147                      &tracker->slave_list[slave].res_list[RES_VLAN]);
2148        return 0;
2149}
2150
2151
2152static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2153                                int port)
2154{
2155        struct mlx4_priv *priv = mlx4_priv(dev);
2156        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2157        struct list_head *vlan_list =
2158                &tracker->slave_list[slave].res_list[RES_VLAN];
2159        struct vlan_res *res, *tmp;
2160
2161        list_for_each_entry_safe(res, tmp, vlan_list, list) {
2162                if (res->vlan == vlan && res->port == (u8) port) {
2163                        if (!--res->ref_count) {
2164                                list_del(&res->list);
2165                                mlx4_release_resource(dev, slave, RES_VLAN,
2166                                                      1, port);
2167                                kfree(res);
2168                        }
2169                        break;
2170                }
2171        }
2172}
2173
2174static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2175{
2176        struct mlx4_priv *priv = mlx4_priv(dev);
2177        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2178        struct list_head *vlan_list =
2179                &tracker->slave_list[slave].res_list[RES_VLAN];
2180        struct vlan_res *res, *tmp;
2181        int i;
2182
2183        list_for_each_entry_safe(res, tmp, vlan_list, list) {
2184                list_del(&res->list);
2185                /* dereference the vlan the num times the slave referenced it */
2186                for (i = 0; i < res->ref_count; i++)
2187                        __mlx4_unregister_vlan(dev, res->port, res->vlan);
2188                mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2189                kfree(res);
2190        }
2191}
2192
2193static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2194                          u64 in_param, u64 *out_param, int in_port)
2195{
2196        struct mlx4_priv *priv = mlx4_priv(dev);
2197        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2198        int err;
2199        u16 vlan;
2200        int vlan_index;
2201        int port;
2202
2203        port = !in_port ? get_param_l(out_param) : in_port;
2204
2205        if (!port || op != RES_OP_RESERVE_AND_MAP)
2206                return -EINVAL;
2207
2208        port = mlx4_slave_convert_port(
2209                        dev, slave, port);
2210
2211        if (port < 0)
2212                return -EINVAL;
2213        /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2214        if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2215                slave_state[slave].old_vlan_api = true;
2216                return 0;
2217        }
2218
2219        vlan = (u16) in_param;
2220
2221        err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2222        if (!err) {
2223                set_param_l(out_param, (u32) vlan_index);
2224                err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2225                if (err)
2226                        __mlx4_unregister_vlan(dev, port, vlan);
2227        }
2228        return err;
2229}
2230
2231static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2232                             u64 in_param, u64 *out_param, int port)
2233{
2234        u32 index;
2235        int err;
2236
2237        if (op != RES_OP_RESERVE)
2238                return -EINVAL;
2239
2240        err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2241        if (err)
2242                return err;
2243
2244        err = __mlx4_counter_alloc(dev, &index);
2245        if (err) {
2246                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2247                return err;
2248        }
2249
2250        err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2251        if (err) {
2252                __mlx4_counter_free(dev, index);
2253                mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2254        } else {
2255                set_param_l(out_param, index);
2256        }
2257
2258        return err;
2259}
2260
2261static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2262                           u64 in_param, u64 *out_param)
2263{
2264        u32 xrcdn;
2265        int err;
2266
2267        if (op != RES_OP_RESERVE)
2268                return -EINVAL;
2269
2270        err = __mlx4_xrcd_alloc(dev, &xrcdn);
2271        if (err)
2272                return err;
2273
2274        err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2275        if (err)
2276                __mlx4_xrcd_free(dev, xrcdn);
2277        else
2278                set_param_l(out_param, xrcdn);
2279
2280        return err;
2281}
2282
2283int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2284                           struct mlx4_vhcr *vhcr,
2285                           struct mlx4_cmd_mailbox *inbox,
2286                           struct mlx4_cmd_mailbox *outbox,
2287                           struct mlx4_cmd_info *cmd)
2288{
2289        int err;
2290        int alop = vhcr->op_modifier;
2291
2292        switch (vhcr->in_modifier & 0xFF) {
2293        case RES_QP:
2294                err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2295                                   vhcr->in_param, &vhcr->out_param);
2296                break;
2297
2298        case RES_MTT:
2299                err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2300                                    vhcr->in_param, &vhcr->out_param);
2301                break;
2302
2303        case RES_MPT:
2304                err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2305                                    vhcr->in_param, &vhcr->out_param);
2306                break;
2307
2308        case RES_CQ:
2309                err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2310                                   vhcr->in_param, &vhcr->out_param);
2311                break;
2312
2313        case RES_SRQ:
2314                err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2315                                    vhcr->in_param, &vhcr->out_param);
2316                break;
2317
2318        case RES_MAC:
2319                err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2320                                    vhcr->in_param, &vhcr->out_param,
2321                                    (vhcr->in_modifier >> 8) & 0xFF);
2322                break;
2323
2324        case RES_VLAN:
2325                err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2326                                     vhcr->in_param, &vhcr->out_param,
2327                                     (vhcr->in_modifier >> 8) & 0xFF);
2328                break;
2329
2330        case RES_COUNTER:
2331                err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2332                                        vhcr->in_param, &vhcr->out_param, 0);
2333                break;
2334
2335        case RES_XRCD:
2336                err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2337                                      vhcr->in_param, &vhcr->out_param);
2338                break;
2339
2340        default:
2341                err = -EINVAL;
2342                break;
2343        }
2344
2345        return err;
2346}
2347
2348static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2349                       u64 in_param)
2350{
2351        int err;
2352        int count;
2353        int base;
2354        int qpn;
2355
2356        switch (op) {
2357        case RES_OP_RESERVE:
2358                base = get_param_l(&in_param) & 0x7fffff;
2359                count = get_param_h(&in_param);
2360                err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2361                if (err)
2362                        break;
2363                mlx4_release_resource(dev, slave, RES_QP, count, 0);
2364                __mlx4_qp_release_range(dev, base, count);
2365                break;
2366        case RES_OP_MAP_ICM:
2367                qpn = get_param_l(&in_param) & 0x7fffff;
2368                err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2369                                           NULL, 0);
2370                if (err)
2371                        return err;
2372
2373                if (!fw_reserved(dev, qpn))
2374                        __mlx4_qp_free_icm(dev, qpn);
2375
2376                res_end_move(dev, slave, RES_QP, qpn);
2377
2378                if (valid_reserved(dev, slave, qpn))
2379                        err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2380                break;
2381        default:
2382                err = -EINVAL;
2383                break;
2384        }
2385        return err;
2386}
2387
2388static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2389                        u64 in_param, u64 *out_param)
2390{
2391        int err = -EINVAL;
2392        int base;
2393        int order;
2394
2395        if (op != RES_OP_RESERVE_AND_MAP)
2396                return err;
2397
2398        base = get_param_l(&in_param);
2399        order = get_param_h(&in_param);
2400        err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2401        if (!err) {
2402                mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2403                __mlx4_free_mtt_range(dev, base, order);
2404        }
2405        return err;
2406}
2407
2408static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2409                        u64 in_param)
2410{
2411        int err = -EINVAL;
2412        int index;
2413        int id;
2414        struct res_mpt *mpt;
2415
2416        switch (op) {
2417        case RES_OP_RESERVE:
2418                index = get_param_l(&in_param);
2419                id = index & mpt_mask(dev);
2420                err = get_res(dev, slave, id, RES_MPT, &mpt);
2421                if (err)
2422                        break;
2423                index = mpt->key;
2424                put_res(dev, slave, id, RES_MPT);
2425
2426                err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2427                if (err)
2428                        break;
2429                mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2430                __mlx4_mpt_release(dev, index);
2431                break;
2432        case RES_OP_MAP_ICM:
2433                index = get_param_l(&in_param);
2434                id = index & mpt_mask(dev);
2435                err = mr_res_start_move_to(dev, slave, id,
2436                                           RES_MPT_RESERVED, &mpt);
2437                if (err)
2438                        return err;
2439
2440                __mlx4_mpt_free_icm(dev, mpt->key);
2441                res_end_move(dev, slave, RES_MPT, id);
2442                break;
2443        default:
2444                err = -EINVAL;
2445                break;
2446        }
2447        return err;
2448}
2449
2450static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2451                       u64 in_param, u64 *out_param)
2452{
2453        int cqn;
2454        int err;
2455
2456        switch (op) {
2457        case RES_OP_RESERVE_AND_MAP:
2458                cqn = get_param_l(&in_param);
2459                err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2460                if (err)
2461                        break;
2462
2463                mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2464                __mlx4_cq_free_icm(dev, cqn);
2465                break;
2466
2467        default:
2468                err = -EINVAL;
2469                break;
2470        }
2471
2472        return err;
2473}
2474
2475static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2476                        u64 in_param, u64 *out_param)
2477{
2478        int srqn;
2479        int err;
2480
2481        switch (op) {
2482        case RES_OP_RESERVE_AND_MAP:
2483                srqn = get_param_l(&in_param);
2484                err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2485                if (err)
2486                        break;
2487
2488                mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2489                __mlx4_srq_free_icm(dev, srqn);
2490                break;
2491
2492        default:
2493                err = -EINVAL;
2494                break;
2495        }
2496
2497        return err;
2498}
2499
2500static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2501                            u64 in_param, u64 *out_param, int in_port)
2502{
2503        int port;
2504        int err = 0;
2505
2506        switch (op) {
2507        case RES_OP_RESERVE_AND_MAP:
2508                port = !in_port ? get_param_l(out_param) : in_port;
2509                port = mlx4_slave_convert_port(
2510                                dev, slave, port);
2511
2512                if (port < 0)
2513                        return -EINVAL;
2514                mac_del_from_slave(dev, slave, in_param, port);
2515                __mlx4_unregister_mac(dev, port, in_param);
2516                break;
2517        default:
2518                err = -EINVAL;
2519                break;
2520        }
2521
2522        return err;
2523
2524}
2525
2526static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2527                            u64 in_param, u64 *out_param, int port)
2528{
2529        struct mlx4_priv *priv = mlx4_priv(dev);
2530        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2531        int err = 0;
2532
2533        port = mlx4_slave_convert_port(
2534                        dev, slave, port);
2535
2536        if (port < 0)
2537                return -EINVAL;
2538        switch (op) {
2539        case RES_OP_RESERVE_AND_MAP:
2540                if (slave_state[slave].old_vlan_api)
2541                        return 0;
2542                if (!port)
2543                        return -EINVAL;
2544                vlan_del_from_slave(dev, slave, in_param, port);
2545                __mlx4_unregister_vlan(dev, port, in_param);
2546                break;
2547        default:
2548                err = -EINVAL;
2549                break;
2550        }
2551
2552        return err;
2553}
2554
2555static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2556                            u64 in_param, u64 *out_param)
2557{
2558        int index;
2559        int err;
2560
2561        if (op != RES_OP_RESERVE)
2562                return -EINVAL;
2563
2564        index = get_param_l(&in_param);
2565        if (index == MLX4_SINK_COUNTER_INDEX(dev))
2566                return 0;
2567
2568        err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2569        if (err)
2570                return err;
2571
2572        __mlx4_counter_free(dev, index);
2573        mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2574
2575        return err;
2576}
2577
2578static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2579                          u64 in_param, u64 *out_param)
2580{
2581        int xrcdn;
2582        int err;
2583
2584        if (op != RES_OP_RESERVE)
2585                return -EINVAL;
2586
2587        xrcdn = get_param_l(&in_param);
2588        err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2589        if (err)
2590                return err;
2591
2592        __mlx4_xrcd_free(dev, xrcdn);
2593
2594        return err;
2595}
2596
2597int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2598                          struct mlx4_vhcr *vhcr,
2599                          struct mlx4_cmd_mailbox *inbox,
2600                          struct mlx4_cmd_mailbox *outbox,
2601                          struct mlx4_cmd_info *cmd)
2602{
2603        int err = -EINVAL;
2604        int alop = vhcr->op_modifier;
2605
2606        switch (vhcr->in_modifier & 0xFF) {
2607        case RES_QP:
2608                err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2609                                  vhcr->in_param);
2610                break;
2611
2612        case RES_MTT:
2613                err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2614                                   vhcr->in_param, &vhcr->out_param);
2615                break;
2616
2617        case RES_MPT:
2618                err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2619                                   vhcr->in_param);
2620                break;
2621
2622        case RES_CQ:
2623                err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2624                                  vhcr->in_param, &vhcr->out_param);
2625                break;
2626
2627        case RES_SRQ:
2628                err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2629                                   vhcr->in_param, &vhcr->out_param);
2630                break;
2631
2632        case RES_MAC:
2633                err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2634                                   vhcr->in_param, &vhcr->out_param,
2635                                   (vhcr->in_modifier >> 8) & 0xFF);
2636                break;
2637
2638        case RES_VLAN:
2639                err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2640                                    vhcr->in_param, &vhcr->out_param,
2641                                    (vhcr->in_modifier >> 8) & 0xFF);
2642                break;
2643
2644        case RES_COUNTER:
2645                err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2646                                       vhcr->in_param, &vhcr->out_param);
2647                break;
2648
2649        case RES_XRCD:
2650                err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2651                                     vhcr->in_param, &vhcr->out_param);
2652
2653        default:
2654                break;
2655        }
2656        return err;
2657}
2658
2659/* ugly but other choices are uglier */
2660static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2661{
2662        return (be32_to_cpu(mpt->flags) >> 9) & 1;
2663}
2664
2665static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2666{
2667        return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2668}
2669
2670static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2671{
2672        return be32_to_cpu(mpt->mtt_sz);
2673}
2674
2675static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2676{
2677        return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2678}
2679
2680static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2681{
2682        return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2683}
2684
2685static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2686{
2687        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2688}
2689
2690static int mr_is_region(struct mlx4_mpt_entry *mpt)
2691{
2692        return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2693}
2694
2695static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2696{
2697        return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2698}
2699
2700static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2701{
2702        return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2703}
2704
2705static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2706{
2707        int page_shift = (qpc->log_page_size & 0x3f) + 12;
2708        int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2709        int log_sq_sride = qpc->sq_size_stride & 7;
2710        int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2711        int log_rq_stride = qpc->rq_size_stride & 7;
2712        int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2713        int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2714        u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2715        int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2716        int sq_size;
2717        int rq_size;
2718        int total_pages;
2719        int total_mem;
2720        int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2721
2722        sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2723        rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2724        total_mem = sq_size + rq_size;
2725        total_pages =
2726                roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2727                                   page_shift);
2728
2729        return total_pages;
2730}
2731
2732static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2733                           int size, struct res_mtt *mtt)
2734{
2735        int res_start = mtt->com.res_id;
2736        int res_size = (1 << mtt->order);
2737
2738        if (start < res_start || start + size > res_start + res_size)
2739                return -EPERM;
2740        return 0;
2741}
2742
2743int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2744                           struct mlx4_vhcr *vhcr,
2745                           struct mlx4_cmd_mailbox *inbox,
2746                           struct mlx4_cmd_mailbox *outbox,
2747                           struct mlx4_cmd_info *cmd)
2748{
2749        int err;
2750        int index = vhcr->in_modifier;
2751        struct res_mtt *mtt;
2752        struct res_mpt *mpt = NULL;
2753        int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2754        int phys;
2755        int id;
2756        u32 pd;
2757        int pd_slave;
2758
2759        id = index & mpt_mask(dev);
2760        err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2761        if (err)
2762                return err;
2763
2764        /* Disable memory windows for VFs. */
2765        if (!mr_is_region(inbox->buf)) {
2766                err = -EPERM;
2767                goto ex_abort;
2768        }
2769
2770        /* Make sure that the PD bits related to the slave id are zeros. */
2771        pd = mr_get_pd(inbox->buf);
2772        pd_slave = (pd >> 17) & 0x7f;
2773        if (pd_slave != 0 && --pd_slave != slave) {
2774                err = -EPERM;
2775                goto ex_abort;
2776        }
2777
2778        if (mr_is_fmr(inbox->buf)) {
2779                /* FMR and Bind Enable are forbidden in slave devices. */
2780                if (mr_is_bind_enabled(inbox->buf)) {
2781                        err = -EPERM;
2782                        goto ex_abort;
2783                }
2784                /* FMR and Memory Windows are also forbidden. */
2785                if (!mr_is_region(inbox->buf)) {
2786                        err = -EPERM;
2787                        goto ex_abort;
2788                }
2789        }
2790
2791        phys = mr_phys_mpt(inbox->buf);
2792        if (!phys) {
2793                err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2794                if (err)
2795                        goto ex_abort;
2796
2797                err = check_mtt_range(dev, slave, mtt_base,
2798                                      mr_get_mtt_size(inbox->buf), mtt);
2799                if (err)
2800                        goto ex_put;
2801
2802                mpt->mtt = mtt;
2803        }
2804
2805        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2806        if (err)
2807                goto ex_put;
2808
2809        if (!phys) {
2810                atomic_inc(&mtt->ref_count);
2811                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2812        }
2813
2814        res_end_move(dev, slave, RES_MPT, id);
2815        return 0;
2816
2817ex_put:
2818        if (!phys)
2819                put_res(dev, slave, mtt->com.res_id, RES_MTT);
2820ex_abort:
2821        res_abort_move(dev, slave, RES_MPT, id);
2822
2823        return err;
2824}
2825
2826int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2827                           struct mlx4_vhcr *vhcr,
2828                           struct mlx4_cmd_mailbox *inbox,
2829                           struct mlx4_cmd_mailbox *outbox,
2830                           struct mlx4_cmd_info *cmd)
2831{
2832        int err;
2833        int index = vhcr->in_modifier;
2834        struct res_mpt *mpt;
2835        int id;
2836
2837        id = index & mpt_mask(dev);
2838        err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2839        if (err)
2840                return err;
2841
2842        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2843        if (err)
2844                goto ex_abort;
2845
2846        if (mpt->mtt)
2847                atomic_dec(&mpt->mtt->ref_count);
2848
2849        res_end_move(dev, slave, RES_MPT, id);
2850        return 0;
2851
2852ex_abort:
2853        res_abort_move(dev, slave, RES_MPT, id);
2854
2855        return err;
2856}
2857
2858int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2859                           struct mlx4_vhcr *vhcr,
2860                           struct mlx4_cmd_mailbox *inbox,
2861                           struct mlx4_cmd_mailbox *outbox,
2862                           struct mlx4_cmd_info *cmd)
2863{
2864        int err;
2865        int index = vhcr->in_modifier;
2866        struct res_mpt *mpt;
2867        int id;
2868
2869        id = index & mpt_mask(dev);
2870        err = get_res(dev, slave, id, RES_MPT, &mpt);
2871        if (err)
2872                return err;
2873
2874        if (mpt->com.from_state == RES_MPT_MAPPED) {
2875                /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2876                 * that, the VF must read the MPT. But since the MPT entry memory is not
2877                 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2878                 * entry contents. To guarantee that the MPT cannot be changed, the driver
2879                 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2880                 * ownership fofollowing the change. The change here allows the VF to
2881                 * perform QUERY_MPT also when the entry is in SW ownership.
2882                 */
2883                struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2884                                        &mlx4_priv(dev)->mr_table.dmpt_table,
2885                                        mpt->key, NULL);
2886
2887                if (NULL == mpt_entry || NULL == outbox->buf) {
2888                        err = -EINVAL;
2889                        goto out;
2890                }
2891
2892                memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2893
2894                err = 0;
2895        } else if (mpt->com.from_state == RES_MPT_HW) {
2896                err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2897        } else {
2898                err = -EBUSY;
2899                goto out;
2900        }
2901
2902
2903out:
2904        put_res(dev, slave, id, RES_MPT);
2905        return err;
2906}
2907
2908static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2909{
2910        return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2911}
2912
2913static int qp_get_scqn(struct mlx4_qp_context *qpc)
2914{
2915        return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2916}
2917
2918static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2919{
2920        return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2921}
2922
2923static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2924                                  struct mlx4_qp_context *context)
2925{
2926        u32 qpn = vhcr->in_modifier & 0xffffff;
2927        u32 qkey = 0;
2928
2929        if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2930                return;
2931
2932        /* adjust qkey in qp context */
2933        context->qkey = cpu_to_be32(qkey);
2934}
2935
2936static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2937                                 struct mlx4_qp_context *qpc,
2938                                 struct mlx4_cmd_mailbox *inbox);
2939
2940int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2941                             struct mlx4_vhcr *vhcr,
2942                             struct mlx4_cmd_mailbox *inbox,
2943                             struct mlx4_cmd_mailbox *outbox,
2944                             struct mlx4_cmd_info *cmd)
2945{
2946        int err;
2947        int qpn = vhcr->in_modifier & 0x7fffff;
2948        struct res_mtt *mtt;
2949        struct res_qp *qp;
2950        struct mlx4_qp_context *qpc = inbox->buf + 8;
2951        int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2952        int mtt_size = qp_get_mtt_size(qpc);
2953        struct res_cq *rcq;
2954        struct res_cq *scq;
2955        int rcqn = qp_get_rcqn(qpc);
2956        int scqn = qp_get_scqn(qpc);
2957        u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2958        int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2959        struct res_srq *srq;
2960        int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2961
2962        err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2963        if (err)
2964                return err;
2965
2966        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2967        if (err)
2968                return err;
2969        qp->local_qpn = local_qpn;
2970        qp->sched_queue = 0;
2971        qp->param3 = 0;
2972        qp->vlan_control = 0;
2973        qp->fvl_rx = 0;
2974        qp->pri_path_fl = 0;
2975        qp->vlan_index = 0;
2976        qp->feup = 0;
2977        qp->qpc_flags = be32_to_cpu(qpc->flags);
2978
2979        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2980        if (err)
2981                goto ex_abort;
2982
2983        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2984        if (err)
2985                goto ex_put_mtt;
2986
2987        err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2988        if (err)
2989                goto ex_put_mtt;
2990
2991        if (scqn != rcqn) {
2992                err = get_res(dev, slave, scqn, RES_CQ, &scq);
2993                if (err)
2994                        goto ex_put_rcq;
2995        } else
2996                scq = rcq;
2997
2998        if (use_srq) {
2999                err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3000                if (err)
3001                        goto ex_put_scq;
3002        }
3003
3004        adjust_proxy_tun_qkey(dev, vhcr, qpc);
3005        update_pkey_index(dev, slave, inbox);
3006        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3007        if (err)
3008                goto ex_put_srq;
3009        atomic_inc(&mtt->ref_count);
3010        qp->mtt = mtt;
3011        atomic_inc(&rcq->ref_count);
3012        qp->rcq = rcq;
3013        atomic_inc(&scq->ref_count);
3014        qp->scq = scq;
3015
3016        if (scqn != rcqn)
3017                put_res(dev, slave, scqn, RES_CQ);
3018
3019        if (use_srq) {
3020                atomic_inc(&srq->ref_count);
3021                put_res(dev, slave, srqn, RES_SRQ);
3022                qp->srq = srq;
3023        }
3024
3025        /* Save param3 for dynamic changes from VST back to VGT */
3026        qp->param3 = qpc->param3;
3027        put_res(dev, slave, rcqn, RES_CQ);
3028        put_res(dev, slave, mtt_base, RES_MTT);
3029        res_end_move(dev, slave, RES_QP, qpn);
3030
3031        return 0;
3032
3033ex_put_srq:
3034        if (use_srq)
3035                put_res(dev, slave, srqn, RES_SRQ);
3036ex_put_scq:
3037        if (scqn != rcqn)
3038                put_res(dev, slave, scqn, RES_CQ);
3039ex_put_rcq:
3040        put_res(dev, slave, rcqn, RES_CQ);
3041ex_put_mtt:
3042        put_res(dev, slave, mtt_base, RES_MTT);
3043ex_abort:
3044        res_abort_move(dev, slave, RES_QP, qpn);
3045
3046        return err;
3047}
3048
3049static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3050{
3051        return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3052}
3053
3054static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3055{
3056        int log_eq_size = eqc->log_eq_size & 0x1f;
3057        int page_shift = (eqc->log_page_size & 0x3f) + 12;
3058
3059        if (log_eq_size + 5 < page_shift)
3060                return 1;
3061
3062        return 1 << (log_eq_size + 5 - page_shift);
3063}
3064
3065static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3066{
3067        return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3068}
3069
3070static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3071{
3072        int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3073        int page_shift = (cqc->log_page_size & 0x3f) + 12;
3074
3075        if (log_cq_size + 5 < page_shift)
3076                return 1;
3077
3078        return 1 << (log_cq_size + 5 - page_shift);
3079}
3080
3081int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3082                          struct mlx4_vhcr *vhcr,
3083                          struct mlx4_cmd_mailbox *inbox,
3084                          struct mlx4_cmd_mailbox *outbox,
3085                          struct mlx4_cmd_info *cmd)
3086{
3087        int err;
3088        int eqn = vhcr->in_modifier;
3089        int res_id = (slave << 10) | eqn;
3090        struct mlx4_eq_context *eqc = inbox->buf;
3091        int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3092        int mtt_size = eq_get_mtt_size(eqc);
3093        struct res_eq *eq;
3094        struct res_mtt *mtt;
3095
3096        err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3097        if (err)
3098                return err;
3099        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3100        if (err)
3101                goto out_add;
3102
3103        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3104        if (err)
3105                goto out_move;
3106
3107        err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3108        if (err)
3109                goto out_put;
3110
3111        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3112        if (err)
3113                goto out_put;
3114
3115        atomic_inc(&mtt->ref_count);
3116        eq->mtt = mtt;
3117        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3118        res_end_move(dev, slave, RES_EQ, res_id);
3119        return 0;
3120
3121out_put:
3122        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3123out_move:
3124        res_abort_move(dev, slave, RES_EQ, res_id);
3125out_add:
3126        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3127        return err;
3128}
3129
3130int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3131                            struct mlx4_vhcr *vhcr,
3132                            struct mlx4_cmd_mailbox *inbox,
3133                            struct mlx4_cmd_mailbox *outbox,
3134                            struct mlx4_cmd_info *cmd)
3135{
3136        int err;
3137        u8 get = vhcr->op_modifier;
3138
3139        if (get != 1)
3140                return -EPERM;
3141
3142        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3143
3144        return err;
3145}
3146
3147static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3148                              int len, struct res_mtt **res)
3149{
3150        struct mlx4_priv *priv = mlx4_priv(dev);
3151        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3152        struct res_mtt *mtt;
3153        int err = -EINVAL;
3154
3155        spin_lock_irq(mlx4_tlock(dev));
3156        list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3157                            com.list) {
3158                if (!check_mtt_range(dev, slave, start, len, mtt)) {
3159                        *res = mtt;
3160                        mtt->com.from_state = mtt->com.state;
3161                        mtt->com.state = RES_MTT_BUSY;
3162                        err = 0;
3163                        break;
3164                }
3165        }
3166        spin_unlock_irq(mlx4_tlock(dev));
3167
3168        return err;
3169}
3170
3171static int verify_qp_parameters(struct mlx4_dev *dev,
3172                                struct mlx4_vhcr *vhcr,
3173                                struct mlx4_cmd_mailbox *inbox,
3174                                enum qp_transition transition, u8 slave)
3175{
3176        u32                     qp_type;
3177        u32                     qpn;
3178        struct mlx4_qp_context  *qp_ctx;
3179        enum mlx4_qp_optpar     optpar;
3180        int port;
3181        int num_gids;
3182
3183        qp_ctx  = inbox->buf + 8;
3184        qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3185        optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
3186
3187        if (slave != mlx4_master_func_num(dev)) {
3188                qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3189                /* setting QP rate-limit is disallowed for VFs */
3190                if (qp_ctx->rate_limit_params)
3191                        return -EPERM;
3192        }
3193
3194        switch (qp_type) {
3195        case MLX4_QP_ST_RC:
3196        case MLX4_QP_ST_XRC:
3197        case MLX4_QP_ST_UC:
3198                switch (transition) {
3199                case QP_TRANS_INIT2RTR:
3200                case QP_TRANS_RTR2RTS:
3201                case QP_TRANS_RTS2RTS:
3202                case QP_TRANS_SQD2SQD:
3203                case QP_TRANS_SQD2RTS:
3204                        if (slave != mlx4_master_func_num(dev)) {
3205                                if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3206                                        port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3207                                        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3208                                                num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3209                                        else
3210                                                num_gids = 1;
3211                                        if (qp_ctx->pri_path.mgid_index >= num_gids)
3212                                                return -EINVAL;
3213                                }
3214                                if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3215                                        port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3216                                        if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3217                                                num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3218                                        else
3219                                                num_gids = 1;
3220                                        if (qp_ctx->alt_path.mgid_index >= num_gids)
3221                                                return -EINVAL;
3222                                }
3223                        }
3224                        break;
3225                default:
3226                        break;
3227                }
3228                break;
3229
3230        case MLX4_QP_ST_MLX:
3231                qpn = vhcr->in_modifier & 0x7fffff;
3232                port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3233                if (transition == QP_TRANS_INIT2RTR &&
3234                    slave != mlx4_master_func_num(dev) &&
3235                    mlx4_is_qp_reserved(dev, qpn) &&
3236                    !mlx4_vf_smi_enabled(dev, slave, port)) {
3237                        /* only enabled VFs may create MLX proxy QPs */
3238                        mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3239                                 __func__, slave, port);
3240                        return -EPERM;
3241                }
3242                break;
3243
3244        default:
3245                break;
3246        }
3247
3248        return 0;
3249}
3250
3251int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3252                           struct mlx4_vhcr *vhcr,
3253                           struct mlx4_cmd_mailbox *inbox,
3254                           struct mlx4_cmd_mailbox *outbox,
3255                           struct mlx4_cmd_info *cmd)
3256{
3257        struct mlx4_mtt mtt;
3258        __be64 *page_list = inbox->buf;
3259        u64 *pg_list = (u64 *)page_list;
3260        int i;
3261        struct res_mtt *rmtt = NULL;
3262        int start = be64_to_cpu(page_list[0]);
3263        int npages = vhcr->in_modifier;
3264        int err;
3265
3266        err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3267        if (err)
3268                return err;
3269
3270        /* Call the SW implementation of write_mtt:
3271         * - Prepare a dummy mtt struct
3272         * - Translate inbox contents to simple addresses in host endianness */
3273        mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3274                            we don't really use it */
3275        mtt.order = 0;
3276        mtt.page_shift = 0;
3277        for (i = 0; i < npages; ++i)
3278                pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3279
3280        err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3281                               ((u64 *)page_list + 2));
3282
3283        if (rmtt)
3284                put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3285
3286        return err;
3287}
3288
3289int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3290                          struct mlx4_vhcr *vhcr,
3291                          struct mlx4_cmd_mailbox *inbox,
3292                          struct mlx4_cmd_mailbox *outbox,
3293                          struct mlx4_cmd_info *cmd)
3294{
3295        int eqn = vhcr->in_modifier;
3296        int res_id = eqn | (slave << 10);
3297        struct res_eq *eq;
3298        int err;
3299
3300        err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3301        if (err)
3302                return err;
3303
3304        err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3305        if (err)
3306                goto ex_abort;
3307
3308        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3309        if (err)
3310                goto ex_put;
3311
3312        atomic_dec(&eq->mtt->ref_count);
3313        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3314        res_end_move(dev, slave, RES_EQ, res_id);
3315        rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3316
3317        return 0;
3318
3319ex_put:
3320        put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3321ex_abort:
3322        res_abort_move(dev, slave, RES_EQ, res_id);
3323
3324        return err;
3325}
3326
3327int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3328{
3329        struct mlx4_priv *priv = mlx4_priv(dev);
3330        struct mlx4_slave_event_eq_info *event_eq;
3331        struct mlx4_cmd_mailbox *mailbox;
3332        u32 in_modifier = 0;
3333        int err;
3334        int res_id;
3335        struct res_eq *req;
3336
3337        if (!priv->mfunc.master.slave_state)
3338                return -EINVAL;
3339
3340        /* check for slave valid, slave not PF, and slave active */
3341        if (slave < 0 || slave > dev->persist->num_vfs ||
3342            slave == dev->caps.function ||
3343            !priv->mfunc.master.slave_state[slave].active)
3344                return 0;
3345
3346        event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3347
3348        /* Create the event only if the slave is registered */
3349        if (event_eq->eqn < 0)
3350                return 0;
3351
3352        mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3353        res_id = (slave << 10) | event_eq->eqn;
3354        err = get_res(dev, slave, res_id, RES_EQ, &req);
3355        if (err)
3356                goto unlock;
3357
3358        if (req->com.from_state != RES_EQ_HW) {
3359                err = -EINVAL;
3360                goto put;
3361        }
3362
3363        mailbox = mlx4_alloc_cmd_mailbox(dev);
3364        if (IS_ERR(mailbox)) {
3365                err = PTR_ERR(mailbox);
3366                goto put;
3367        }
3368
3369        if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3370                ++event_eq->token;
3371                eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3372        }
3373
3374        memcpy(mailbox->buf, (u8 *) eqe, 28);
3375
3376        in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3377
3378        err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3379                       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3380                       MLX4_CMD_NATIVE);
3381
3382        put_res(dev, slave, res_id, RES_EQ);
3383        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3384        mlx4_free_cmd_mailbox(dev, mailbox);
3385        return err;
3386
3387put:
3388        put_res(dev, slave, res_id, RES_EQ);
3389
3390unlock:
3391        mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3392        return err;
3393}
3394
3395int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3396                          struct mlx4_vhcr *vhcr,
3397                          struct mlx4_cmd_mailbox *inbox,
3398                          struct mlx4_cmd_mailbox *outbox,
3399                          struct mlx4_cmd_info *cmd)
3400{
3401        int eqn = vhcr->in_modifier;
3402        int res_id = eqn | (slave << 10);
3403        struct res_eq *eq;
3404        int err;
3405
3406        err = get_res(dev, slave, res_id, RES_EQ, &eq);
3407        if (err)
3408                return err;
3409
3410        if (eq->com.from_state != RES_EQ_HW) {
3411                err = -EINVAL;
3412                goto ex_put;
3413        }
3414
3415        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3416
3417ex_put:
3418        put_res(dev, slave, res_id, RES_EQ);
3419        return err;
3420}
3421
3422int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3423                          struct mlx4_vhcr *vhcr,
3424                          struct mlx4_cmd_mailbox *inbox,
3425                          struct mlx4_cmd_mailbox *outbox,
3426                          struct mlx4_cmd_info *cmd)
3427{
3428        int err;
3429        int cqn = vhcr->in_modifier;
3430        struct mlx4_cq_context *cqc = inbox->buf;
3431        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3432        struct res_cq *cq = NULL;
3433        struct res_mtt *mtt;
3434
3435        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3436        if (err)
3437                return err;
3438        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3439        if (err)
3440                goto out_move;
3441        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3442        if (err)
3443                goto out_put;
3444        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3445        if (err)
3446                goto out_put;
3447        atomic_inc(&mtt->ref_count);
3448        cq->mtt = mtt;
3449        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3450        res_end_move(dev, slave, RES_CQ, cqn);
3451        return 0;
3452
3453out_put:
3454        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3455out_move:
3456        res_abort_move(dev, slave, RES_CQ, cqn);
3457        return err;
3458}
3459
3460int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3461                          struct mlx4_vhcr *vhcr,
3462                          struct mlx4_cmd_mailbox *inbox,
3463                          struct mlx4_cmd_mailbox *outbox,
3464                          struct mlx4_cmd_info *cmd)
3465{
3466        int err;
3467        int cqn = vhcr->in_modifier;
3468        struct res_cq *cq = NULL;
3469
3470        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3471        if (err)
3472                return err;
3473        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3474        if (err)
3475                goto out_move;
3476        atomic_dec(&cq->mtt->ref_count);
3477        res_end_move(dev, slave, RES_CQ, cqn);
3478        return 0;
3479
3480out_move:
3481        res_abort_move(dev, slave, RES_CQ, cqn);
3482        return err;
3483}
3484
3485int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3486                          struct mlx4_vhcr *vhcr,
3487                          struct mlx4_cmd_mailbox *inbox,
3488                          struct mlx4_cmd_mailbox *outbox,
3489                          struct mlx4_cmd_info *cmd)
3490{
3491        int cqn = vhcr->in_modifier;
3492        struct res_cq *cq;
3493        int err;
3494
3495        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3496        if (err)
3497                return err;
3498
3499        if (cq->com.from_state != RES_CQ_HW)
3500                goto ex_put;
3501
3502        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3503ex_put:
3504        put_res(dev, slave, cqn, RES_CQ);
3505
3506        return err;
3507}
3508
3509static int handle_resize(struct mlx4_dev *dev, int slave,
3510                         struct mlx4_vhcr *vhcr,
3511                         struct mlx4_cmd_mailbox *inbox,
3512                         struct mlx4_cmd_mailbox *outbox,
3513                         struct mlx4_cmd_info *cmd,
3514                         struct res_cq *cq)
3515{
3516        int err;
3517        struct res_mtt *orig_mtt;
3518        struct res_mtt *mtt;
3519        struct mlx4_cq_context *cqc = inbox->buf;
3520        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3521
3522        err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3523        if (err)
3524                return err;
3525
3526        if (orig_mtt != cq->mtt) {
3527                err = -EINVAL;
3528                goto ex_put;
3529        }
3530
3531        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3532        if (err)
3533                goto ex_put;
3534
3535        err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3536        if (err)
3537                goto ex_put1;
3538        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3539        if (err)
3540                goto ex_put1;
3541        atomic_dec(&orig_mtt->ref_count);
3542        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3543        atomic_inc(&mtt->ref_count);
3544        cq->mtt = mtt;
3545        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3546        return 0;
3547
3548ex_put1:
3549        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3550ex_put:
3551        put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3552
3553        return err;
3554
3555}
3556
3557int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3558                           struct mlx4_vhcr *vhcr,
3559                           struct mlx4_cmd_mailbox *inbox,
3560                           struct mlx4_cmd_mailbox *outbox,
3561                           struct mlx4_cmd_info *cmd)
3562{
3563        int cqn = vhcr->in_modifier;
3564        struct res_cq *cq;
3565        int err;
3566
3567        err = get_res(dev, slave, cqn, RES_CQ, &cq);
3568        if (err)
3569                return err;
3570
3571        if (cq->com.from_state != RES_CQ_HW)
3572                goto ex_put;
3573
3574        if (vhcr->op_modifier == 0) {
3575                err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3576                goto ex_put;
3577        }
3578
3579        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3580ex_put:
3581        put_res(dev, slave, cqn, RES_CQ);
3582
3583        return err;
3584}
3585
3586static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3587{
3588        int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3589        int log_rq_stride = srqc->logstride & 7;
3590        int page_shift = (srqc->log_page_size & 0x3f) + 12;
3591
3592        if (log_srq_size + log_rq_stride + 4 < page_shift)
3593                return 1;
3594
3595        return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3596}
3597
3598int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3599                           struct mlx4_vhcr *vhcr,
3600                           struct mlx4_cmd_mailbox *inbox,
3601                           struct mlx4_cmd_mailbox *outbox,
3602                           struct mlx4_cmd_info *cmd)
3603{
3604        int err;
3605        int srqn = vhcr->in_modifier;
3606        struct res_mtt *mtt;
3607        struct res_srq *srq = NULL;
3608        struct mlx4_srq_context *srqc = inbox->buf;
3609        int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3610
3611        if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3612                return -EINVAL;
3613
3614        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3615        if (err)
3616                return err;
3617        err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3618        if (err)
3619                goto ex_abort;
3620        err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3621                              mtt);
3622        if (err)
3623                goto ex_put_mtt;
3624
3625        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3626        if (err)
3627                goto ex_put_mtt;
3628
3629        atomic_inc(&mtt->ref_count);
3630        srq->mtt = mtt;
3631        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3632        res_end_move(dev, slave, RES_SRQ, srqn);
3633        return 0;
3634
3635ex_put_mtt:
3636        put_res(dev, slave, mtt->com.res_id, RES_MTT);
3637ex_abort:
3638        res_abort_move(dev, slave, RES_SRQ, srqn);
3639
3640        return err;
3641}
3642
3643int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3644                           struct mlx4_vhcr *vhcr,
3645                           struct mlx4_cmd_mailbox *inbox,
3646                           struct mlx4_cmd_mailbox *outbox,
3647                           struct mlx4_cmd_info *cmd)
3648{
3649        int err;
3650        int srqn = vhcr->in_modifier;
3651        struct res_srq *srq = NULL;
3652
3653        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3654        if (err)
3655                return err;
3656        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3657        if (err)
3658                goto ex_abort;
3659        atomic_dec(&srq->mtt->ref_count);
3660        if (srq->cq)
3661                atomic_dec(&srq->cq->ref_count);
3662        res_end_move(dev, slave, RES_SRQ, srqn);
3663
3664        return 0;
3665
3666ex_abort:
3667        res_abort_move(dev, slave, RES_SRQ, srqn);
3668
3669        return err;
3670}
3671
3672int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3673                           struct mlx4_vhcr *vhcr,
3674                           struct mlx4_cmd_mailbox *inbox,
3675                           struct mlx4_cmd_mailbox *outbox,
3676                           struct mlx4_cmd_info *cmd)
3677{
3678        int err;
3679        int srqn = vhcr->in_modifier;
3680        struct res_srq *srq;
3681
3682        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3683        if (err)
3684                return err;
3685        if (srq->com.from_state != RES_SRQ_HW) {
3686                err = -EBUSY;
3687                goto out;
3688        }
3689        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3690out:
3691        put_res(dev, slave, srqn, RES_SRQ);
3692        return err;
3693}
3694
3695int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3696                         struct mlx4_vhcr *vhcr,
3697                         struct mlx4_cmd_mailbox *inbox,
3698                         struct mlx4_cmd_mailbox *outbox,
3699                         struct mlx4_cmd_info *cmd)
3700{
3701        int err;
3702        int srqn = vhcr->in_modifier;
3703        struct res_srq *srq;
3704
3705        err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3706        if (err)
3707                return err;
3708
3709        if (srq->com.from_state != RES_SRQ_HW) {
3710                err = -EBUSY;
3711                goto out;
3712        }
3713
3714        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3715out:
3716        put_res(dev, slave, srqn, RES_SRQ);
3717        return err;
3718}
3719
3720int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3721                        struct mlx4_vhcr *vhcr,
3722                        struct mlx4_cmd_mailbox *inbox,
3723                        struct mlx4_cmd_mailbox *outbox,
3724                        struct mlx4_cmd_info *cmd)
3725{
3726        int err;
3727        int qpn = vhcr->in_modifier & 0x7fffff;
3728        struct res_qp *qp;
3729
3730        err = get_res(dev, slave, qpn, RES_QP, &qp);
3731        if (err)
3732                return err;
3733        if (qp->com.from_state != RES_QP_HW) {
3734                err = -EBUSY;
3735                goto out;
3736        }
3737
3738        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3739out:
3740        put_res(dev, slave, qpn, RES_QP);
3741        return err;
3742}
3743
3744int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3745                              struct mlx4_vhcr *vhcr,
3746                              struct mlx4_cmd_mailbox *inbox,
3747                              struct mlx4_cmd_mailbox *outbox,
3748                              struct mlx4_cmd_info *cmd)
3749{
3750        struct mlx4_qp_context *context = inbox->buf + 8;
3751        adjust_proxy_tun_qkey(dev, vhcr, context);
3752        update_pkey_index(dev, slave, inbox);
3753        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3754}
3755
3756static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3757                                  struct mlx4_qp_context *qpc,
3758                                  struct mlx4_cmd_mailbox *inbox)
3759{
3760        enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3761        u8 pri_sched_queue;
3762        int port = mlx4_slave_convert_port(
3763                   dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3764
3765        if (port < 0)
3766                return -EINVAL;
3767
3768        pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3769                          ((port & 1) << 6);
3770
3771        if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3772            qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3773                qpc->pri_path.sched_queue = pri_sched_queue;
3774        }
3775
3776        if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3777                port = mlx4_slave_convert_port(
3778                                dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3779                                + 1) - 1;
3780                if (port < 0)
3781                        return -EINVAL;
3782                qpc->alt_path.sched_queue =
3783                        (qpc->alt_path.sched_queue & ~(1 << 6)) |
3784                        (port & 1) << 6;
3785        }
3786        return 0;
3787}
3788
3789static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3790                                struct mlx4_qp_context *qpc,
3791                                struct mlx4_cmd_mailbox *inbox)
3792{
3793        u64 mac;
3794        int port;
3795        u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3796        u8 sched = *(u8 *)(inbox->buf + 64);
3797        u8 smac_ix;
3798
3799        port = (sched >> 6 & 1) + 1;
3800        if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3801                smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3802                if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3803                        return -ENOENT;
3804        }
3805        return 0;
3806}
3807
3808int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3809                             struct mlx4_vhcr *vhcr,
3810                             struct mlx4_cmd_mailbox *inbox,
3811                             struct mlx4_cmd_mailbox *outbox,
3812                             struct mlx4_cmd_info *cmd)
3813{
3814        int err;
3815        struct mlx4_qp_context *qpc = inbox->buf + 8;
3816        int qpn = vhcr->in_modifier & 0x7fffff;
3817        struct res_qp *qp;
3818        u8 orig_sched_queue;
3819        u8 orig_vlan_control = qpc->pri_path.vlan_control;
3820        u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3821        u8 orig_pri_path_fl = qpc->pri_path.fl;
3822        u8 orig_vlan_index = qpc->pri_path.vlan_index;
3823        u8 orig_feup = qpc->pri_path.feup;
3824
3825        err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3826        if (err)
3827                return err;
3828        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3829        if (err)
3830                return err;
3831
3832        if (roce_verify_mac(dev, slave, qpc, inbox))
3833                return -EINVAL;
3834
3835        update_pkey_index(dev, slave, inbox);
3836        update_gid(dev, inbox, (u8)slave);
3837        adjust_proxy_tun_qkey(dev, vhcr, qpc);
3838        orig_sched_queue = qpc->pri_path.sched_queue;
3839
3840        err = get_res(dev, slave, qpn, RES_QP, &qp);
3841        if (err)
3842                return err;
3843        if (qp->com.from_state != RES_QP_HW) {
3844                err = -EBUSY;
3845                goto out;
3846        }
3847
3848        err = update_vport_qp_param(dev, inbox, slave, qpn);
3849        if (err)
3850                goto out;
3851
3852        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3853out:
3854        /* if no error, save sched queue value passed in by VF. This is
3855         * essentially the QOS value provided by the VF. This will be useful
3856         * if we allow dynamic changes from VST back to VGT
3857         */
3858        if (!err) {
3859                qp->sched_queue = orig_sched_queue;
3860                qp->vlan_control = orig_vlan_control;
3861                qp->fvl_rx      =  orig_fvl_rx;
3862                qp->pri_path_fl = orig_pri_path_fl;
3863                qp->vlan_index  = orig_vlan_index;
3864                qp->feup        = orig_feup;
3865        }
3866        put_res(dev, slave, qpn, RES_QP);
3867        return err;
3868}
3869
3870int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3871                            struct mlx4_vhcr *vhcr,
3872                            struct mlx4_cmd_mailbox *inbox,
3873                            struct mlx4_cmd_mailbox *outbox,
3874                            struct mlx4_cmd_info *cmd)
3875{
3876        int err;
3877        struct mlx4_qp_context *context = inbox->buf + 8;
3878
3879        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3880        if (err)
3881                return err;
3882        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3883        if (err)
3884                return err;
3885
3886        update_pkey_index(dev, slave, inbox);
3887        update_gid(dev, inbox, (u8)slave);
3888        adjust_proxy_tun_qkey(dev, vhcr, context);
3889        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3890}
3891
3892int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3893                            struct mlx4_vhcr *vhcr,
3894                            struct mlx4_cmd_mailbox *inbox,
3895                            struct mlx4_cmd_mailbox *outbox,
3896                            struct mlx4_cmd_info *cmd)
3897{
3898        int err;
3899        struct mlx4_qp_context *context = inbox->buf + 8;
3900
3901        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3902        if (err)
3903                return err;
3904        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3905        if (err)
3906                return err;
3907
3908        update_pkey_index(dev, slave, inbox);
3909        update_gid(dev, inbox, (u8)slave);
3910        adjust_proxy_tun_qkey(dev, vhcr, context);
3911        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3912}
3913
3914
3915int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3916                              struct mlx4_vhcr *vhcr,
3917                              struct mlx4_cmd_mailbox *inbox,
3918                              struct mlx4_cmd_mailbox *outbox,
3919                              struct mlx4_cmd_info *cmd)
3920{
3921        struct mlx4_qp_context *context = inbox->buf + 8;
3922        int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3923        if (err)
3924                return err;
3925        adjust_proxy_tun_qkey(dev, vhcr, context);
3926        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3927}
3928
3929int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3930                            struct mlx4_vhcr *vhcr,
3931                            struct mlx4_cmd_mailbox *inbox,
3932                            struct mlx4_cmd_mailbox *outbox,
3933                            struct mlx4_cmd_info *cmd)
3934{
3935        int err;
3936        struct mlx4_qp_context *context = inbox->buf + 8;
3937
3938        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3939        if (err)
3940                return err;
3941        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3942        if (err)
3943                return err;
3944
3945        adjust_proxy_tun_qkey(dev, vhcr, context);
3946        update_gid(dev, inbox, (u8)slave);
3947        update_pkey_index(dev, slave, inbox);
3948        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3949}
3950
3951int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3952                            struct mlx4_vhcr *vhcr,
3953                            struct mlx4_cmd_mailbox *inbox,
3954                            struct mlx4_cmd_mailbox *outbox,
3955                            struct mlx4_cmd_info *cmd)
3956{
3957        int err;
3958        struct mlx4_qp_context *context = inbox->buf + 8;
3959
3960        err = adjust_qp_sched_queue(dev, slave, context, inbox);
3961        if (err)
3962                return err;
3963        err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3964        if (err)
3965                return err;
3966
3967        adjust_proxy_tun_qkey(dev, vhcr, context);
3968        update_gid(dev, inbox, (u8)slave);
3969        update_pkey_index(dev, slave, inbox);
3970        return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3971}
3972
3973int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3974                         struct mlx4_vhcr *vhcr,
3975                         struct mlx4_cmd_mailbox *inbox,
3976                         struct mlx4_cmd_mailbox *outbox,
3977                         struct mlx4_cmd_info *cmd)
3978{
3979        int err;
3980        int qpn = vhcr->in_modifier & 0x7fffff;
3981        struct res_qp *qp;
3982
3983        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3984        if (err)
3985                return err;
3986        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3987        if (err)
3988                goto ex_abort;
3989
3990        atomic_dec(&qp->mtt->ref_count);
3991        atomic_dec(&qp->rcq->ref_count);
3992        atomic_dec(&qp->scq->ref_count);
3993        if (qp->srq)
3994                atomic_dec(&qp->srq->ref_count);
3995        res_end_move(dev, slave, RES_QP, qpn);
3996        return 0;
3997
3998ex_abort:
3999        res_abort_move(dev, slave, RES_QP, qpn);
4000
4001        return err;
4002}
4003
4004static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
4005                                struct res_qp *rqp, u8 *gid)
4006{
4007        struct res_gid *res;
4008
4009        list_for_each_entry(res, &rqp->mcg_list, list) {
4010                if (!memcmp(res->gid, gid, 16))
4011                        return res;
4012        }
4013        return NULL;
4014}
4015
4016static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4017                       u8 *gid, enum mlx4_protocol prot,
4018                       enum mlx4_steer_type steer, u64 reg_id)
4019{
4020        struct res_gid *res;
4021        int err;
4022
4023        res = kzalloc(sizeof(*res), GFP_KERNEL);
4024        if (!res)
4025                return -ENOMEM;
4026
4027        spin_lock_irq(&rqp->mcg_spl);
4028        if (find_gid(dev, slave, rqp, gid)) {
4029                kfree(res);
4030                err = -EEXIST;
4031        } else {
4032                memcpy(res->gid, gid, 16);
4033                res->prot = prot;
4034                res->steer = steer;
4035                res->reg_id = reg_id;
4036                list_add_tail(&res->list, &rqp->mcg_list);
4037                err = 0;
4038        }
4039        spin_unlock_irq(&rqp->mcg_spl);
4040
4041        return err;
4042}
4043
4044static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4045                       u8 *gid, enum mlx4_protocol prot,
4046                       enum mlx4_steer_type steer, u64 *reg_id)
4047{
4048        struct res_gid *res;
4049        int err;
4050
4051        spin_lock_irq(&rqp->mcg_spl);
4052        res = find_gid(dev, slave, rqp, gid);
4053        if (!res || res->prot != prot || res->steer != steer)
4054                err = -EINVAL;
4055        else {
4056                *reg_id = res->reg_id;
4057                list_del(&res->list);
4058                kfree(res);
4059                err = 0;
4060        }
4061        spin_unlock_irq(&rqp->mcg_spl);
4062
4063        return err;
4064}
4065
4066static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4067                     u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4068                     enum mlx4_steer_type type, u64 *reg_id)
4069{
4070        switch (dev->caps.steering_mode) {
4071        case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4072                int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4073                if (port < 0)
4074                        return port;
4075                return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4076                                                block_loopback, prot,
4077                                                reg_id);
4078        }
4079        case MLX4_STEERING_MODE_B0:
4080                if (prot == MLX4_PROT_ETH) {
4081                        int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4082                        if (port < 0)
4083                                return port;
4084                        gid[5] = port;
4085                }
4086                return mlx4_qp_attach_common(dev, qp, gid,
4087                                            block_loopback, prot, type);
4088        default:
4089                return -EINVAL;
4090        }
4091}
4092
4093static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4094                     u8 gid[16], enum mlx4_protocol prot,
4095                     enum mlx4_steer_type type, u64 reg_id)
4096{
4097        switch (dev->caps.steering_mode) {
4098        case MLX4_STEERING_MODE_DEVICE_MANAGED:
4099                return mlx4_flow_detach(dev, reg_id);
4100        case MLX4_STEERING_MODE_B0:
4101                return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4102        default:
4103                return -EINVAL;
4104        }
4105}
4106
4107static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4108                            u8 *gid, enum mlx4_protocol prot)
4109{
4110        int real_port;
4111
4112        if (prot != MLX4_PROT_ETH)
4113                return 0;
4114
4115        if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4116            dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4117                real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4118                if (real_port < 0)
4119                        return -EINVAL;
4120                gid[5] = real_port;
4121        }
4122
4123        return 0;
4124}
4125
4126int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4127                               struct mlx4_vhcr *vhcr,
4128                               struct mlx4_cmd_mailbox *inbox,
4129                               struct mlx4_cmd_mailbox *outbox,
4130                               struct mlx4_cmd_info *cmd)
4131{
4132        struct mlx4_qp qp; /* dummy for calling attach/detach */
4133        u8 *gid = inbox->buf;
4134        enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4135        int err;
4136        int qpn;
4137        struct res_qp *rqp;
4138        u64 reg_id = 0;
4139        int attach = vhcr->op_modifier;
4140        int block_loopback = vhcr->in_modifier >> 31;
4141        u8 steer_type_mask = 2;
4142        enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4143
4144        qpn = vhcr->in_modifier & 0xffffff;
4145        err = get_res(dev, slave, qpn, RES_QP, &rqp);
4146        if (err)
4147                return err;
4148
4149        qp.qpn = qpn;
4150        if (attach) {
4151                err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4152                                type, &reg_id);
4153                if (err) {
4154                        pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4155                        goto ex_put;
4156                }
4157                err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4158                if (err)
4159                        goto ex_detach;
4160        } else {
4161                err = mlx4_adjust_port(dev, slave, gid, prot);
4162                if (err)
4163                        goto ex_put;
4164
4165                err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
4166                if (err)
4167                        goto ex_put;
4168
4169                err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4170                if (err)
4171                        pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4172                               qpn, reg_id);
4173        }
4174        put_res(dev, slave, qpn, RES_QP);
4175        return err;
4176
4177ex_detach:
4178        qp_detach(dev, &qp, gid, prot, type, reg_id);
4179ex_put:
4180        put_res(dev, slave, qpn, RES_QP);
4181        return err;
4182}
4183
4184/*
4185 * MAC validation for Flow Steering rules.
4186 * VF can attach rules only with a mac address which is assigned to it.
4187 */
4188static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4189                                   struct list_head *rlist)
4190{
4191        struct mac_res *res, *tmp;
4192        __be64 be_mac;
4193
4194        /* make sure it isn't multicast or broadcast mac*/
4195        if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4196            !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4197                list_for_each_entry_safe(res, tmp, rlist, list) {
4198                        be_mac = cpu_to_be64(res->mac << 16);
4199                        if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4200                                return 0;
4201                }
4202                pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4203                       eth_header->eth.dst_mac, slave);
4204                return -EINVAL;
4205        }
4206        return 0;
4207}
4208
4209/*
4210 * In case of missing eth header, append eth header with a MAC address
4211 * assigned to the VF.
4212 */
4213static int add_eth_header(struct mlx4_dev *dev, int slave,
4214                          struct mlx4_cmd_mailbox *inbox,
4215                          struct list_head *rlist, int header_id)
4216{
4217        struct mac_res *res, *tmp;
4218        u8 port;
4219        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4220        struct mlx4_net_trans_rule_hw_eth *eth_header;
4221        struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4222        struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4223        __be64 be_mac = 0;
4224        __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4225
4226        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4227        port = ctrl->port;
4228        eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4229
4230        /* Clear a space in the inbox for eth header */
4231        switch (header_id) {
4232        case MLX4_NET_TRANS_RULE_ID_IPV4:
4233                ip_header =
4234                        (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4235                memmove(ip_header, eth_header,
4236                        sizeof(*ip_header) + sizeof(*l4_header));
4237                break;
4238        case MLX4_NET_TRANS_RULE_ID_TCP:
4239        case MLX4_NET_TRANS_RULE_ID_UDP:
4240                l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4241                            (eth_header + 1);
4242                memmove(l4_header, eth_header, sizeof(*l4_header));
4243                break;
4244        default:
4245                return -EINVAL;
4246        }
4247        list_for_each_entry_safe(res, tmp, rlist, list) {
4248                if (port == res->port) {
4249                        be_mac = cpu_to_be64(res->mac << 16);
4250                        break;
4251                }
4252        }
4253        if (!be_mac) {
4254                pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4255                       port);
4256                return -EINVAL;
4257        }
4258
4259        memset(eth_header, 0, sizeof(*eth_header));
4260        eth_header->size = sizeof(*eth_header) >> 2;
4261        eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4262        memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4263        memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4264
4265        return 0;
4266
4267}
4268
4269#define MLX4_UPD_QP_PATH_MASK_SUPPORTED      (                                \
4270        1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX                     |\
4271        1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4272int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4273                           struct mlx4_vhcr *vhcr,
4274                           struct mlx4_cmd_mailbox *inbox,
4275                           struct mlx4_cmd_mailbox *outbox,
4276                           struct mlx4_cmd_info *cmd_info)
4277{
4278        int err;
4279        u32 qpn = vhcr->in_modifier & 0xffffff;
4280        struct res_qp *rqp;
4281        u64 mac;
4282        unsigned port;
4283        u64 pri_addr_path_mask;
4284        struct mlx4_update_qp_context *cmd;
4285        int smac_index;
4286
4287        cmd = (struct mlx4_update_qp_context *)inbox->buf;
4288
4289        pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4290        if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4291            (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4292                return -EPERM;
4293
4294        if ((pri_addr_path_mask &
4295             (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4296                !(dev->caps.flags2 &
4297                  MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4298                mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4299                          slave);
4300                return -EOPNOTSUPP;
4301        }
4302
4303        /* Just change the smac for the QP */
4304        err = get_res(dev, slave, qpn, RES_QP, &rqp);
4305        if (err) {
4306                mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4307                return err;
4308        }
4309
4310        port = (rqp->sched_queue >> 6 & 1) + 1;
4311
4312        if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4313                smac_index = cmd->qp_context.pri_path.grh_mylmc;
4314                err = mac_find_smac_ix_in_slave(dev, slave, port,
4315                                                smac_index, &mac);
4316
4317                if (err) {
4318                        mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4319                                 qpn, smac_index);
4320                        goto err_mac;
4321                }
4322        }
4323
4324        err = mlx4_cmd(dev, inbox->dma,
4325                       vhcr->in_modifier, 0,
4326                       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4327                       MLX4_CMD_NATIVE);
4328        if (err) {
4329                mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4330                goto err_mac;
4331        }
4332
4333err_mac:
4334        put_res(dev, slave, qpn, RES_QP);
4335        return err;
4336}
4337
4338static u32 qp_attach_mbox_size(void *mbox)
4339{
4340        u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4341        struct _rule_hw  *rule_header;
4342
4343        rule_header = (struct _rule_hw *)(mbox + size);
4344
4345        while (rule_header->size) {
4346                size += rule_header->size * sizeof(u32);
4347                rule_header += 1;
4348        }
4349        return size;
4350}
4351
4352static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4353
4354int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4355                                         struct mlx4_vhcr *vhcr,
4356                                         struct mlx4_cmd_mailbox *inbox,
4357                                         struct mlx4_cmd_mailbox *outbox,
4358                                         struct mlx4_cmd_info *cmd)
4359{
4360
4361        struct mlx4_priv *priv = mlx4_priv(dev);
4362        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4363        struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4364        int err;
4365        int qpn;
4366        struct res_qp *rqp;
4367        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4368        struct _rule_hw  *rule_header;
4369        int header_id;
4370        struct res_fs_rule *rrule;
4371        u32 mbox_size;
4372
4373        if (dev->caps.steering_mode !=
4374            MLX4_STEERING_MODE_DEVICE_MANAGED)
4375                return -EOPNOTSUPP;
4376
4377        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4378        err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4379        if (err <= 0)
4380                return -EINVAL;
4381        ctrl->port = err;
4382        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4383        err = get_res(dev, slave, qpn, RES_QP, &rqp);
4384        if (err) {
4385                pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4386                return err;
4387        }
4388        rule_header = (struct _rule_hw *)(ctrl + 1);
4389        header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4390
4391        if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4392                mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
4393
4394        switch (header_id) {
4395        case MLX4_NET_TRANS_RULE_ID_ETH:
4396                if (validate_eth_header_mac(slave, rule_header, rlist)) {
4397                        err = -EINVAL;
4398                        goto err_put_qp;
4399                }
4400                break;
4401        case MLX4_NET_TRANS_RULE_ID_IB:
4402                break;
4403        case MLX4_NET_TRANS_RULE_ID_IPV4:
4404        case MLX4_NET_TRANS_RULE_ID_TCP:
4405        case MLX4_NET_TRANS_RULE_ID_UDP:
4406                pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4407                if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4408                        err = -EINVAL;
4409                        goto err_put_qp;
4410                }
4411                vhcr->in_modifier +=
4412                        sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4413                break;
4414        default:
4415                pr_err("Corrupted mailbox\n");
4416                err = -EINVAL;
4417                goto err_put_qp;
4418        }
4419
4420        err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4421                           vhcr->in_modifier, 0,
4422                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4423                           MLX4_CMD_NATIVE);
4424        if (err)
4425                goto err_put_qp;
4426
4427
4428        err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4429        if (err) {
4430                mlx4_err(dev, "Fail to add flow steering resources\n");
4431                goto err_detach;
4432        }
4433
4434        err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4435        if (err)
4436                goto err_detach;
4437
4438        mbox_size = qp_attach_mbox_size(inbox->buf);
4439        rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4440        if (!rrule->mirr_mbox) {
4441                err = -ENOMEM;
4442                goto err_put_rule;
4443        }
4444        rrule->mirr_mbox_size = mbox_size;
4445        rrule->mirr_rule_id = 0;
4446        memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4447
4448        /* set different port */
4449        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4450        if (ctrl->port == 1)
4451                ctrl->port = 2;
4452        else
4453                ctrl->port = 1;
4454
4455        if (mlx4_is_bonded(dev))
4456                mlx4_do_mirror_rule(dev, rrule);
4457
4458        atomic_inc(&rqp->ref_count);
4459
4460err_put_rule:
4461        put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4462err_detach:
4463        /* detach rule on error */
4464        if (err)
4465                mlx4_cmd(dev, vhcr->out_param, 0, 0,
4466                         MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4467                         MLX4_CMD_NATIVE);
4468err_put_qp:
4469        put_res(dev, slave, qpn, RES_QP);
4470        return err;
4471}
4472
4473static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4474{
4475        int err;
4476
4477        err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4478        if (err) {
4479                mlx4_err(dev, "Fail to remove flow steering resources\n");
4480                return err;
4481        }
4482
4483        mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4484                 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4485        return 0;
4486}
4487
4488int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4489                                         struct mlx4_vhcr *vhcr,
4490                                         struct mlx4_cmd_mailbox *inbox,
4491                                         struct mlx4_cmd_mailbox *outbox,
4492                                         struct mlx4_cmd_info *cmd)
4493{
4494        int err;
4495        struct res_qp *rqp;
4496        struct res_fs_rule *rrule;
4497        u64 mirr_reg_id;
4498        int qpn;
4499
4500        if (dev->caps.steering_mode !=
4501            MLX4_STEERING_MODE_DEVICE_MANAGED)
4502                return -EOPNOTSUPP;
4503
4504        err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4505        if (err)
4506                return err;
4507
4508        if (!rrule->mirr_mbox) {
4509                mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4510                put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4511                return -EINVAL;
4512        }
4513        mirr_reg_id = rrule->mirr_rule_id;
4514        kfree(rrule->mirr_mbox);
4515        qpn = rrule->qpn;
4516
4517        /* Release the rule form busy state before removal */
4518        put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4519        err = get_res(dev, slave, qpn, RES_QP, &rqp);
4520        if (err)
4521                return err;
4522
4523        if (mirr_reg_id && mlx4_is_bonded(dev)) {
4524                err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4525                if (err) {
4526                        mlx4_err(dev, "Fail to get resource of mirror rule\n");
4527                } else {
4528                        put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4529                        mlx4_undo_mirror_rule(dev, rrule);
4530                }
4531        }
4532        err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4533        if (err) {
4534                mlx4_err(dev, "Fail to remove flow steering resources\n");
4535                goto out;
4536        }
4537
4538        err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4539                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4540                       MLX4_CMD_NATIVE);
4541        if (!err)
4542                atomic_dec(&rqp->ref_count);
4543out:
4544        put_res(dev, slave, qpn, RES_QP);
4545        return err;
4546}
4547
4548enum {
4549        BUSY_MAX_RETRIES = 10
4550};
4551
4552int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4553                               struct mlx4_vhcr *vhcr,
4554                               struct mlx4_cmd_mailbox *inbox,
4555                               struct mlx4_cmd_mailbox *outbox,
4556                               struct mlx4_cmd_info *cmd)
4557{
4558        int err;
4559        int index = vhcr->in_modifier & 0xffff;
4560
4561        err = get_res(dev, slave, index, RES_COUNTER, NULL);
4562        if (err)
4563                return err;
4564
4565        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4566        put_res(dev, slave, index, RES_COUNTER);
4567        return err;
4568}
4569
4570static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4571{
4572        struct res_gid *rgid;
4573        struct res_gid *tmp;
4574        struct mlx4_qp qp; /* dummy for calling attach/detach */
4575
4576        list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4577                switch (dev->caps.steering_mode) {
4578                case MLX4_STEERING_MODE_DEVICE_MANAGED:
4579                        mlx4_flow_detach(dev, rgid->reg_id);
4580                        break;
4581                case MLX4_STEERING_MODE_B0:
4582                        qp.qpn = rqp->local_qpn;
4583                        (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4584                                                     rgid->prot, rgid->steer);
4585                        break;
4586                }
4587                list_del(&rgid->list);
4588                kfree(rgid);
4589        }
4590}
4591
4592static int _move_all_busy(struct mlx4_dev *dev, int slave,
4593                          enum mlx4_resource type, int print)
4594{
4595        struct mlx4_priv *priv = mlx4_priv(dev);
4596        struct mlx4_resource_tracker *tracker =
4597                &priv->mfunc.master.res_tracker;
4598        struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4599        struct res_common *r;
4600        struct res_common *tmp;
4601        int busy;
4602
4603        busy = 0;
4604        spin_lock_irq(mlx4_tlock(dev));
4605        list_for_each_entry_safe(r, tmp, rlist, list) {
4606                if (r->owner == slave) {
4607                        if (!r->removing) {
4608                                if (r->state == RES_ANY_BUSY) {
4609                                        if (print)
4610                                                mlx4_dbg(dev,
4611                                                         "%s id 0x%llx is busy\n",
4612                                                          resource_str(type),
4613                                                          r->res_id);
4614                                        ++busy;
4615                                } else {
4616                                        r->from_state = r->state;
4617                                        r->state = RES_ANY_BUSY;
4618                                        r->removing = 1;
4619                                }
4620                        }
4621                }
4622        }
4623        spin_unlock_irq(mlx4_tlock(dev));
4624
4625        return busy;
4626}
4627
4628static int move_all_busy(struct mlx4_dev *dev, int slave,
4629                         enum mlx4_resource type)
4630{
4631        unsigned long begin;
4632        int busy;
4633
4634        begin = jiffies;
4635        do {
4636                busy = _move_all_busy(dev, slave, type, 0);
4637                if (time_after(jiffies, begin + 5 * HZ))
4638                        break;
4639                if (busy)
4640                        cond_resched();
4641        } while (busy);
4642
4643        if (busy)
4644                busy = _move_all_busy(dev, slave, type, 1);
4645
4646        return busy;
4647}
4648static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4649{
4650        struct mlx4_priv *priv = mlx4_priv(dev);
4651        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4652        struct list_head *qp_list =
4653                &tracker->slave_list[slave].res_list[RES_QP];
4654        struct res_qp *qp;
4655        struct res_qp *tmp;
4656        int state;
4657        u64 in_param;
4658        int qpn;
4659        int err;
4660
4661        err = move_all_busy(dev, slave, RES_QP);
4662        if (err)
4663                mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4664                          slave);
4665
4666        spin_lock_irq(mlx4_tlock(dev));
4667        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4668                spin_unlock_irq(mlx4_tlock(dev));
4669                if (qp->com.owner == slave) {
4670                        qpn = qp->com.res_id;
4671                        detach_qp(dev, slave, qp);
4672                        state = qp->com.from_state;
4673                        while (state != 0) {
4674                                switch (state) {
4675                                case RES_QP_RESERVED:
4676                                        spin_lock_irq(mlx4_tlock(dev));
4677                                        rb_erase(&qp->com.node,
4678                                                 &tracker->res_tree[RES_QP]);
4679                                        list_del(&qp->com.list);
4680                                        spin_unlock_irq(mlx4_tlock(dev));
4681                                        if (!valid_reserved(dev, slave, qpn)) {
4682                                                __mlx4_qp_release_range(dev, qpn, 1);
4683                                                mlx4_release_resource(dev, slave,
4684                                                                      RES_QP, 1, 0);
4685                                        }
4686                                        kfree(qp);
4687                                        state = 0;
4688                                        break;
4689                                case RES_QP_MAPPED:
4690                                        if (!valid_reserved(dev, slave, qpn))
4691                                                __mlx4_qp_free_icm(dev, qpn);
4692                                        state = RES_QP_RESERVED;
4693                                        break;
4694                                case RES_QP_HW:
4695                                        in_param = slave;
4696                                        err = mlx4_cmd(dev, in_param,
4697                                                       qp->local_qpn, 2,
4698                                                       MLX4_CMD_2RST_QP,
4699                                                       MLX4_CMD_TIME_CLASS_A,
4700                                                       MLX4_CMD_NATIVE);
4701                                        if (err)
4702                                                mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4703                                                         slave, qp->local_qpn);
4704                                        atomic_dec(&qp->rcq->ref_count);
4705                                        atomic_dec(&qp->scq->ref_count);
4706                                        atomic_dec(&qp->mtt->ref_count);
4707                                        if (qp->srq)
4708                                                atomic_dec(&qp->srq->ref_count);
4709                                        state = RES_QP_MAPPED;
4710                                        break;
4711                                default:
4712                                        state = 0;
4713                                }
4714                        }
4715                }
4716                spin_lock_irq(mlx4_tlock(dev));
4717        }
4718        spin_unlock_irq(mlx4_tlock(dev));
4719}
4720
4721static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4722{
4723        struct mlx4_priv *priv = mlx4_priv(dev);
4724        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4725        struct list_head *srq_list =
4726                &tracker->slave_list[slave].res_list[RES_SRQ];
4727        struct res_srq *srq;
4728        struct res_srq *tmp;
4729        int state;
4730        u64 in_param;
4731        LIST_HEAD(tlist);
4732        int srqn;
4733        int err;
4734
4735        err = move_all_busy(dev, slave, RES_SRQ);
4736        if (err)
4737                mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4738                          slave);
4739
4740        spin_lock_irq(mlx4_tlock(dev));
4741        list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4742                spin_unlock_irq(mlx4_tlock(dev));
4743                if (srq->com.owner == slave) {
4744                        srqn = srq->com.res_id;
4745                        state = srq->com.from_state;
4746                        while (state != 0) {
4747                                switch (state) {
4748                                case RES_SRQ_ALLOCATED:
4749                                        __mlx4_srq_free_icm(dev, srqn);
4750                                        spin_lock_irq(mlx4_tlock(dev));
4751                                        rb_erase(&srq->com.node,
4752                                                 &tracker->res_tree[RES_SRQ]);
4753                                        list_del(&srq->com.list);
4754                                        spin_unlock_irq(mlx4_tlock(dev));
4755                                        mlx4_release_resource(dev, slave,
4756                                                              RES_SRQ, 1, 0);
4757                                        kfree(srq);
4758                                        state = 0;
4759                                        break;
4760
4761                                case RES_SRQ_HW:
4762                                        in_param = slave;
4763                                        err = mlx4_cmd(dev, in_param, srqn, 1,
4764                                                       MLX4_CMD_HW2SW_SRQ,
4765                                                       MLX4_CMD_TIME_CLASS_A,
4766                                                       MLX4_CMD_NATIVE);
4767                                        if (err)
4768                                                mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4769                                                         slave, srqn);
4770
4771                                        atomic_dec(&srq->mtt->ref_count);
4772                                        if (srq->cq)
4773                                                atomic_dec(&srq->cq->ref_count);
4774                                        state = RES_SRQ_ALLOCATED;
4775                                        break;
4776
4777                                default:
4778                                        state = 0;
4779                                }
4780                        }
4781                }
4782                spin_lock_irq(mlx4_tlock(dev));
4783        }
4784        spin_unlock_irq(mlx4_tlock(dev));
4785}
4786
4787static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4788{
4789        struct mlx4_priv *priv = mlx4_priv(dev);
4790        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4791        struct list_head *cq_list =
4792                &tracker->slave_list[slave].res_list[RES_CQ];
4793        struct res_cq *cq;
4794        struct res_cq *tmp;
4795        int state;
4796        u64 in_param;
4797        LIST_HEAD(tlist);
4798        int cqn;
4799        int err;
4800
4801        err = move_all_busy(dev, slave, RES_CQ);
4802        if (err)
4803                mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4804                          slave);
4805
4806        spin_lock_irq(mlx4_tlock(dev));
4807        list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4808                spin_unlock_irq(mlx4_tlock(dev));
4809                if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4810                        cqn = cq->com.res_id;
4811                        state = cq->com.from_state;
4812                        while (state != 0) {
4813                                switch (state) {
4814                                case RES_CQ_ALLOCATED:
4815                                        __mlx4_cq_free_icm(dev, cqn);
4816                                        spin_lock_irq(mlx4_tlock(dev));
4817                                        rb_erase(&cq->com.node,
4818                                                 &tracker->res_tree[RES_CQ]);
4819                                        list_del(&cq->com.list);
4820                                        spin_unlock_irq(mlx4_tlock(dev));
4821                                        mlx4_release_resource(dev, slave,
4822                                                              RES_CQ, 1, 0);
4823                                        kfree(cq);
4824                                        state = 0;
4825                                        break;
4826
4827                                case RES_CQ_HW:
4828                                        in_param = slave;
4829                                        err = mlx4_cmd(dev, in_param, cqn, 1,
4830                                                       MLX4_CMD_HW2SW_CQ,
4831                                                       MLX4_CMD_TIME_CLASS_A,
4832                                                       MLX4_CMD_NATIVE);
4833                                        if (err)
4834                                                mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4835                                                         slave, cqn);
4836                                        atomic_dec(&cq->mtt->ref_count);
4837                                        state = RES_CQ_ALLOCATED;
4838                                        break;
4839
4840                                default:
4841                                        state = 0;
4842                                }
4843                        }
4844                }
4845                spin_lock_irq(mlx4_tlock(dev));
4846        }
4847        spin_unlock_irq(mlx4_tlock(dev));
4848}
4849
4850static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4851{
4852        struct mlx4_priv *priv = mlx4_priv(dev);
4853        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4854        struct list_head *mpt_list =
4855                &tracker->slave_list[slave].res_list[RES_MPT];
4856        struct res_mpt *mpt;
4857        struct res_mpt *tmp;
4858        int state;
4859        u64 in_param;
4860        LIST_HEAD(tlist);
4861        int mptn;
4862        int err;
4863
4864        err = move_all_busy(dev, slave, RES_MPT);
4865        if (err)
4866                mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4867                          slave);
4868
4869        spin_lock_irq(mlx4_tlock(dev));
4870        list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4871                spin_unlock_irq(mlx4_tlock(dev));
4872                if (mpt->com.owner == slave) {
4873                        mptn = mpt->com.res_id;
4874                        state = mpt->com.from_state;
4875                        while (state != 0) {
4876                                switch (state) {
4877                                case RES_MPT_RESERVED:
4878                                        __mlx4_mpt_release(dev, mpt->key);
4879                                        spin_lock_irq(mlx4_tlock(dev));
4880                                        rb_erase(&mpt->com.node,
4881                                                 &tracker->res_tree[RES_MPT]);
4882                                        list_del(&mpt->com.list);
4883                                        spin_unlock_irq(mlx4_tlock(dev));
4884                                        mlx4_release_resource(dev, slave,
4885                                                              RES_MPT, 1, 0);
4886                                        kfree(mpt);
4887                                        state = 0;
4888                                        break;
4889
4890                                case RES_MPT_MAPPED:
4891                                        __mlx4_mpt_free_icm(dev, mpt->key);
4892                                        state = RES_MPT_RESERVED;
4893                                        break;
4894
4895                                case RES_MPT_HW:
4896                                        in_param = slave;
4897                                        err = mlx4_cmd(dev, in_param, mptn, 0,
4898                                                     MLX4_CMD_HW2SW_MPT,
4899                                                     MLX4_CMD_TIME_CLASS_A,
4900                                                     MLX4_CMD_NATIVE);
4901                                        if (err)
4902                                                mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4903                                                         slave, mptn);
4904                                        if (mpt->mtt)
4905                                                atomic_dec(&mpt->mtt->ref_count);
4906                                        state = RES_MPT_MAPPED;
4907                                        break;
4908                                default:
4909                                        state = 0;
4910                                }
4911                        }
4912                }
4913                spin_lock_irq(mlx4_tlock(dev));
4914        }
4915        spin_unlock_irq(mlx4_tlock(dev));
4916}
4917
4918static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4919{
4920        struct mlx4_priv *priv = mlx4_priv(dev);
4921        struct mlx4_resource_tracker *tracker =
4922                &priv->mfunc.master.res_tracker;
4923        struct list_head *mtt_list =
4924                &tracker->slave_list[slave].res_list[RES_MTT];
4925        struct res_mtt *mtt;
4926        struct res_mtt *tmp;
4927        int state;
4928        LIST_HEAD(tlist);
4929        int base;
4930        int err;
4931
4932        err = move_all_busy(dev, slave, RES_MTT);
4933        if (err)
4934                mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4935                          slave);
4936
4937        spin_lock_irq(mlx4_tlock(dev));
4938        list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4939                spin_unlock_irq(mlx4_tlock(dev));
4940                if (mtt->com.owner == slave) {
4941                        base = mtt->com.res_id;
4942                        state = mtt->com.from_state;
4943                        while (state != 0) {
4944                                switch (state) {
4945                                case RES_MTT_ALLOCATED:
4946                                        __mlx4_free_mtt_range(dev, base,
4947                                                              mtt->order);
4948                                        spin_lock_irq(mlx4_tlock(dev));
4949                                        rb_erase(&mtt->com.node,
4950                                                 &tracker->res_tree[RES_MTT]);
4951                                        list_del(&mtt->com.list);
4952                                        spin_unlock_irq(mlx4_tlock(dev));
4953                                        mlx4_release_resource(dev, slave, RES_MTT,
4954                                                              1 << mtt->order, 0);
4955                                        kfree(mtt);
4956                                        state = 0;
4957                                        break;
4958
4959                                default:
4960                                        state = 0;
4961                                }
4962                        }
4963                }
4964                spin_lock_irq(mlx4_tlock(dev));
4965        }
4966        spin_unlock_irq(mlx4_tlock(dev));
4967}
4968
4969static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4970{
4971        struct mlx4_cmd_mailbox *mailbox;
4972        int err;
4973        struct res_fs_rule *mirr_rule;
4974        u64 reg_id;
4975
4976        mailbox = mlx4_alloc_cmd_mailbox(dev);
4977        if (IS_ERR(mailbox))
4978                return PTR_ERR(mailbox);
4979
4980        if (!fs_rule->mirr_mbox) {
4981                mlx4_err(dev, "rule mirroring mailbox is null\n");
4982                return -EINVAL;
4983        }
4984        memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4985        err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
4986                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4987                           MLX4_CMD_NATIVE);
4988        mlx4_free_cmd_mailbox(dev, mailbox);
4989
4990        if (err)
4991                goto err;
4992
4993        err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4994        if (err)
4995                goto err_detach;
4996
4997        err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4998        if (err)
4999                goto err_rem;
5000
5001        fs_rule->mirr_rule_id = reg_id;
5002        mirr_rule->mirr_rule_id = 0;
5003        mirr_rule->mirr_mbox_size = 0;
5004        mirr_rule->mirr_mbox = NULL;
5005        put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
5006
5007        return 0;
5008err_rem:
5009        rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
5010err_detach:
5011        mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
5012                 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
5013err:
5014        return err;
5015}
5016
5017static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
5018{
5019        struct mlx4_priv *priv = mlx4_priv(dev);
5020        struct mlx4_resource_tracker *tracker =
5021                &priv->mfunc.master.res_tracker;
5022        struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
5023        struct rb_node *p;
5024        struct res_fs_rule *fs_rule;
5025        int err = 0;
5026        LIST_HEAD(mirr_list);
5027
5028        for (p = rb_first(root); p; p = rb_next(p)) {
5029                fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5030                if ((bond && fs_rule->mirr_mbox_size) ||
5031                    (!bond && !fs_rule->mirr_mbox_size))
5032                        list_add_tail(&fs_rule->mirr_list, &mirr_list);
5033        }
5034
5035        list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5036                if (bond)
5037                        err += mlx4_do_mirror_rule(dev, fs_rule);
5038                else
5039                        err += mlx4_undo_mirror_rule(dev, fs_rule);
5040        }
5041        return err;
5042}
5043
5044int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5045{
5046        return mlx4_mirror_fs_rules(dev, true);
5047}
5048
5049int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5050{
5051        return mlx4_mirror_fs_rules(dev, false);
5052}
5053
5054static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5055{
5056        struct mlx4_priv *priv = mlx4_priv(dev);
5057        struct mlx4_resource_tracker *tracker =
5058                &priv->mfunc.master.res_tracker;
5059        struct list_head *fs_rule_list =
5060                &tracker->slave_list[slave].res_list[RES_FS_RULE];
5061        struct res_fs_rule *fs_rule;
5062        struct res_fs_rule *tmp;
5063        int state;
5064        u64 base;
5065        int err;
5066
5067        err = move_all_busy(dev, slave, RES_FS_RULE);
5068        if (err)
5069                mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5070                          slave);
5071
5072        spin_lock_irq(mlx4_tlock(dev));
5073        list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5074                spin_unlock_irq(mlx4_tlock(dev));
5075                if (fs_rule->com.owner == slave) {
5076                        base = fs_rule->com.res_id;
5077                        state = fs_rule->com.from_state;
5078                        while (state != 0) {
5079                                switch (state) {
5080                                case RES_FS_RULE_ALLOCATED:
5081                                        /* detach rule */
5082                                        err = mlx4_cmd(dev, base, 0, 0,
5083                                                       MLX4_QP_FLOW_STEERING_DETACH,
5084                                                       MLX4_CMD_TIME_CLASS_A,
5085                                                       MLX4_CMD_NATIVE);
5086
5087                                        spin_lock_irq(mlx4_tlock(dev));
5088                                        rb_erase(&fs_rule->com.node,
5089                                                 &tracker->res_tree[RES_FS_RULE]);
5090                                        list_del(&fs_rule->com.list);
5091                                        spin_unlock_irq(mlx4_tlock(dev));
5092                                        kfree(fs_rule);
5093                                        state = 0;
5094                                        break;
5095
5096                                default:
5097                                        state = 0;
5098                                }
5099                        }
5100                }
5101                spin_lock_irq(mlx4_tlock(dev));
5102        }
5103        spin_unlock_irq(mlx4_tlock(dev));
5104}
5105
5106static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5107{
5108        struct mlx4_priv *priv = mlx4_priv(dev);
5109        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5110        struct list_head *eq_list =
5111                &tracker->slave_list[slave].res_list[RES_EQ];
5112        struct res_eq *eq;
5113        struct res_eq *tmp;
5114        int err;
5115        int state;
5116        LIST_HEAD(tlist);
5117        int eqn;
5118
5119        err = move_all_busy(dev, slave, RES_EQ);
5120        if (err)
5121                mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5122                          slave);
5123
5124        spin_lock_irq(mlx4_tlock(dev));
5125        list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5126                spin_unlock_irq(mlx4_tlock(dev));
5127                if (eq->com.owner == slave) {
5128                        eqn = eq->com.res_id;
5129                        state = eq->com.from_state;
5130                        while (state != 0) {
5131                                switch (state) {
5132                                case RES_EQ_RESERVED:
5133                                        spin_lock_irq(mlx4_tlock(dev));
5134                                        rb_erase(&eq->com.node,
5135                                                 &tracker->res_tree[RES_EQ]);
5136                                        list_del(&eq->com.list);
5137                                        spin_unlock_irq(mlx4_tlock(dev));
5138                                        kfree(eq);
5139                                        state = 0;
5140                                        break;
5141
5142                                case RES_EQ_HW:
5143                                        err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5144                                                       1, MLX4_CMD_HW2SW_EQ,
5145                                                       MLX4_CMD_TIME_CLASS_A,
5146                                                       MLX4_CMD_NATIVE);
5147                                        if (err)
5148                                                mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5149                                                         slave, eqn & 0x3ff);
5150                                        atomic_dec(&eq->mtt->ref_count);
5151                                        state = RES_EQ_RESERVED;
5152                                        break;
5153
5154                                default:
5155                                        state = 0;
5156                                }
5157                        }
5158                }
5159                spin_lock_irq(mlx4_tlock(dev));
5160        }
5161        spin_unlock_irq(mlx4_tlock(dev));
5162}
5163
5164static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5165{
5166        struct mlx4_priv *priv = mlx4_priv(dev);
5167        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5168        struct list_head *counter_list =
5169                &tracker->slave_list[slave].res_list[RES_COUNTER];
5170        struct res_counter *counter;
5171        struct res_counter *tmp;
5172        int err;
5173        int *counters_arr = NULL;
5174        int i, j;
5175
5176        err = move_all_busy(dev, slave, RES_COUNTER);
5177        if (err)
5178                mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5179                          slave);
5180
5181        counters_arr = kmalloc_array(dev->caps.max_counters,
5182                                     sizeof(*counters_arr), GFP_KERNEL);
5183        if (!counters_arr)
5184                return;
5185
5186        do {
5187                i = 0;
5188                j = 0;
5189                spin_lock_irq(mlx4_tlock(dev));
5190                list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5191                        if (counter->com.owner == slave) {
5192                                counters_arr[i++] = counter->com.res_id;
5193                                rb_erase(&counter->com.node,
5194                                         &tracker->res_tree[RES_COUNTER]);
5195                                list_del(&counter->com.list);
5196                                kfree(counter);
5197                        }
5198                }
5199                spin_unlock_irq(mlx4_tlock(dev));
5200
5201                while (j < i) {
5202                        __mlx4_counter_free(dev, counters_arr[j++]);
5203                        mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5204                }
5205        } while (i);
5206
5207        kfree(counters_arr);
5208}
5209
5210static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5211{
5212        struct mlx4_priv *priv = mlx4_priv(dev);
5213        struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5214        struct list_head *xrcdn_list =
5215                &tracker->slave_list[slave].res_list[RES_XRCD];
5216        struct res_xrcdn *xrcd;
5217        struct res_xrcdn *tmp;
5218        int err;
5219        int xrcdn;
5220
5221        err = move_all_busy(dev, slave, RES_XRCD);
5222        if (err)
5223                mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5224                          slave);
5225
5226        spin_lock_irq(mlx4_tlock(dev));
5227        list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5228                if (xrcd->com.owner == slave) {
5229                        xrcdn = xrcd->com.res_id;
5230                        rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5231                        list_del(&xrcd->com.list);
5232                        kfree(xrcd);
5233                        __mlx4_xrcd_free(dev, xrcdn);
5234                }
5235        }
5236        spin_unlock_irq(mlx4_tlock(dev));
5237}
5238
5239void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5240{
5241        struct mlx4_priv *priv = mlx4_priv(dev);
5242        mlx4_reset_roce_gids(dev, slave);
5243        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5244        rem_slave_vlans(dev, slave);
5245        rem_slave_macs(dev, slave);
5246        rem_slave_fs_rule(dev, slave);
5247        rem_slave_qps(dev, slave);
5248        rem_slave_srqs(dev, slave);
5249        rem_slave_cqs(dev, slave);
5250        rem_slave_mrs(dev, slave);
5251        rem_slave_eqs(dev, slave);
5252        rem_slave_mtts(dev, slave);
5253        rem_slave_counters(dev, slave);
5254        rem_slave_xrcdns(dev, slave);
5255        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5256}
5257
5258static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5259                           struct mlx4_vf_immed_vlan_work *work)
5260{
5261        ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5262        ctx->qp_context.qos_vport = work->qos_vport;
5263}
5264
5265void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5266{
5267        struct mlx4_vf_immed_vlan_work *work =
5268                container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5269        struct mlx4_cmd_mailbox *mailbox;
5270        struct mlx4_update_qp_context *upd_context;
5271        struct mlx4_dev *dev = &work->priv->dev;
5272        struct mlx4_resource_tracker *tracker =
5273                &work->priv->mfunc.master.res_tracker;
5274        struct list_head *qp_list =
5275                &tracker->slave_list[work->slave].res_list[RES_QP];
5276        struct res_qp *qp;
5277        struct res_qp *tmp;
5278        u64 qp_path_mask_vlan_ctrl =
5279                       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5280                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5281                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5282                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5283                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5284                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5285
5286        u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5287                       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5288                       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5289                       (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5290                       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5291                       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5292                       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5293                       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5294
5295        int err;
5296        int port, errors = 0;
5297        u8 vlan_control;
5298
5299        if (mlx4_is_slave(dev)) {
5300                mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5301                          work->slave);
5302                goto out;
5303        }
5304
5305        mailbox = mlx4_alloc_cmd_mailbox(dev);
5306        if (IS_ERR(mailbox))
5307                goto out;
5308        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5309                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5310                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5311                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5312                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5313                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5314                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5315        else if (!work->vlan_id)
5316                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5317                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5318        else if (work->vlan_proto == htons(ETH_P_8021AD))
5319                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5320                        MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5321                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5322                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5323        else  /* vst 802.1Q */
5324                vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5325                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5326                        MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5327
5328        upd_context = mailbox->buf;
5329        upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5330
5331        spin_lock_irq(mlx4_tlock(dev));
5332        list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5333                spin_unlock_irq(mlx4_tlock(dev));
5334                if (qp->com.owner == work->slave) {
5335                        if (qp->com.from_state != RES_QP_HW ||
5336                            !qp->sched_queue ||  /* no INIT2RTR trans yet */
5337                            mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5338                            qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5339                                spin_lock_irq(mlx4_tlock(dev));
5340                                continue;
5341                        }
5342                        port = (qp->sched_queue >> 6 & 1) + 1;
5343                        if (port != work->port) {
5344                                spin_lock_irq(mlx4_tlock(dev));
5345                                continue;
5346                        }
5347                        if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5348                                upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5349                        else
5350                                upd_context->primary_addr_path_mask =
5351                                        cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5352                        if (work->vlan_id == MLX4_VGT) {
5353                                upd_context->qp_context.param3 = qp->param3;
5354                                upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5355                                upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5356                                upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5357                                upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5358                                upd_context->qp_context.pri_path.feup = qp->feup;
5359                                upd_context->qp_context.pri_path.sched_queue =
5360                                        qp->sched_queue;
5361                        } else {
5362                                upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5363                                upd_context->qp_context.pri_path.vlan_control = vlan_control;
5364                                upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5365                                upd_context->qp_context.pri_path.fvl_rx =
5366                                        qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5367                                upd_context->qp_context.pri_path.fl =
5368                                        qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5369                                if (work->vlan_proto == htons(ETH_P_8021AD))
5370                                        upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5371                                else
5372                                        upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5373                                upd_context->qp_context.pri_path.feup =
5374                                        qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5375                                upd_context->qp_context.pri_path.sched_queue =
5376                                        qp->sched_queue & 0xC7;
5377                                upd_context->qp_context.pri_path.sched_queue |=
5378                                        ((work->qos & 0x7) << 3);
5379
5380                                if (dev->caps.flags2 &
5381                                    MLX4_DEV_CAP_FLAG2_QOS_VPP)
5382                                        update_qos_vpp(upd_context, work);
5383                        }
5384
5385                        err = mlx4_cmd(dev, mailbox->dma,
5386                                       qp->local_qpn & 0xffffff,
5387                                       0, MLX4_CMD_UPDATE_QP,
5388                                       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5389                        if (err) {
5390                                mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5391                                          work->slave, port, qp->local_qpn, err);
5392                                errors++;
5393                        }
5394                }
5395                spin_lock_irq(mlx4_tlock(dev));
5396        }
5397        spin_unlock_irq(mlx4_tlock(dev));
5398        mlx4_free_cmd_mailbox(dev, mailbox);
5399
5400        if (errors)
5401                mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5402                         errors, work->slave, work->port);
5403
5404        /* unregister previous vlan_id if needed and we had no errors
5405         * while updating the QPs
5406         */
5407        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5408            NO_INDX != work->orig_vlan_ix)
5409                __mlx4_unregister_vlan(&work->priv->dev, work->port,
5410                                       work->orig_vlan_id);
5411out:
5412        kfree(work);
5413        return;
5414}
5415