linux/drivers/net/ethernet/mellanox/mlx4/qp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
   4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
   5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/gfp.h>
  37#include <linux/export.h>
  38#include <linux/init.h>
  39
  40#include <linux/mlx4/cmd.h>
  41#include <linux/mlx4/qp.h>
  42
  43#include "mlx4.h"
  44#include "icm.h"
  45
  46void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
  47{
  48        struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
  49        struct mlx4_qp *qp;
  50
  51        spin_lock(&qp_table->lock);
  52
  53        qp = __mlx4_qp_lookup(dev, qpn);
  54        if (qp)
  55                atomic_inc(&qp->refcount);
  56
  57        spin_unlock(&qp_table->lock);
  58
  59        if (!qp) {
  60                mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
  61                return;
  62        }
  63
  64        qp->event(qp, event_type);
  65
  66        if (atomic_dec_and_test(&qp->refcount))
  67                complete(&qp->free);
  68}
  69
  70/* used for INIT/CLOSE port logic */
  71static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
  72{
  73        /* this procedure is called after we already know we are on the master */
  74        /* qp0 is either the proxy qp0, or the real qp0 */
  75        u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
  76        *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
  77
  78        *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
  79                qp->qpn <= dev->phys_caps.base_sqpn + 1;
  80
  81        return *real_qp0 || *proxy_qp0;
  82}
  83
  84static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
  85                     enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
  86                     struct mlx4_qp_context *context,
  87                     enum mlx4_qp_optpar optpar,
  88                     int sqd_event, struct mlx4_qp *qp, int native)
  89{
  90        static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
  91                [MLX4_QP_STATE_RST] = {
  92                        [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
  93                        [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
  94                        [MLX4_QP_STATE_INIT]    = MLX4_CMD_RST2INIT_QP,
  95                },
  96                [MLX4_QP_STATE_INIT]  = {
  97                        [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
  98                        [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
  99                        [MLX4_QP_STATE_INIT]    = MLX4_CMD_INIT2INIT_QP,
 100                        [MLX4_QP_STATE_RTR]     = MLX4_CMD_INIT2RTR_QP,
 101                },
 102                [MLX4_QP_STATE_RTR]   = {
 103                        [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
 104                        [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
 105                        [MLX4_QP_STATE_RTS]     = MLX4_CMD_RTR2RTS_QP,
 106                },
 107                [MLX4_QP_STATE_RTS]   = {
 108                        [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
 109                        [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
 110                        [MLX4_QP_STATE_RTS]     = MLX4_CMD_RTS2RTS_QP,
 111                        [MLX4_QP_STATE_SQD]     = MLX4_CMD_RTS2SQD_QP,
 112                },
 113                [MLX4_QP_STATE_SQD] = {
 114                        [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
 115                        [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
 116                        [MLX4_QP_STATE_RTS]     = MLX4_CMD_SQD2RTS_QP,
 117                        [MLX4_QP_STATE_SQD]     = MLX4_CMD_SQD2SQD_QP,
 118                },
 119                [MLX4_QP_STATE_SQER] = {
 120                        [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
 121                        [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
 122                        [MLX4_QP_STATE_RTS]     = MLX4_CMD_SQERR2RTS_QP,
 123                },
 124                [MLX4_QP_STATE_ERR] = {
 125                        [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
 126                        [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
 127                }
 128        };
 129
 130        struct mlx4_priv *priv = mlx4_priv(dev);
 131        struct mlx4_cmd_mailbox *mailbox;
 132        int ret = 0;
 133        int real_qp0 = 0;
 134        int proxy_qp0 = 0;
 135        u8 port;
 136
 137        if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
 138            !op[cur_state][new_state])
 139                return -EINVAL;
 140
 141        if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
 142                ret = mlx4_cmd(dev, 0, qp->qpn, 2,
 143                        MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
 144                if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
 145                    cur_state != MLX4_QP_STATE_RST &&
 146                    is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
 147                        port = (qp->qpn & 1) + 1;
 148                        if (proxy_qp0)
 149                                priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
 150                        else
 151                                priv->mfunc.master.qp0_state[port].qp0_active = 0;
 152                }
 153                return ret;
 154        }
 155
 156        mailbox = mlx4_alloc_cmd_mailbox(dev);
 157        if (IS_ERR(mailbox))
 158                return PTR_ERR(mailbox);
 159
 160        if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
 161                u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
 162                context->mtt_base_addr_h = mtt_addr >> 32;
 163                context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
 164                context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
 165        }
 166
 167        *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
 168        memcpy(mailbox->buf + 8, context, sizeof *context);
 169
 170        ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
 171                cpu_to_be32(qp->qpn);
 172
 173        ret = mlx4_cmd(dev, mailbox->dma,
 174                       qp->qpn | (!!sqd_event << 31),
 175                       new_state == MLX4_QP_STATE_RST ? 2 : 0,
 176                       op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
 177
 178        if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
 179                port = (qp->qpn & 1) + 1;
 180                if (cur_state != MLX4_QP_STATE_ERR &&
 181                    cur_state != MLX4_QP_STATE_RST &&
 182                    new_state == MLX4_QP_STATE_ERR) {
 183                        if (proxy_qp0)
 184                                priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
 185                        else
 186                                priv->mfunc.master.qp0_state[port].qp0_active = 0;
 187                } else if (new_state == MLX4_QP_STATE_RTR) {
 188                        if (proxy_qp0)
 189                                priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
 190                        else
 191                                priv->mfunc.master.qp0_state[port].qp0_active = 1;
 192                }
 193        }
 194
 195        mlx4_free_cmd_mailbox(dev, mailbox);
 196        return ret;
 197}
 198
 199int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
 200                   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
 201                   struct mlx4_qp_context *context,
 202                   enum mlx4_qp_optpar optpar,
 203                   int sqd_event, struct mlx4_qp *qp)
 204{
 205        return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
 206                                optpar, sqd_event, qp, 0);
 207}
 208EXPORT_SYMBOL_GPL(mlx4_qp_modify);
 209
 210int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
 211                                   int *base)
 212{
 213        struct mlx4_priv *priv = mlx4_priv(dev);
 214        struct mlx4_qp_table *qp_table = &priv->qp_table;
 215
 216        *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
 217        if (*base == -1)
 218                return -ENOMEM;
 219
 220        return 0;
 221}
 222
 223int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
 224{
 225        u64 in_param = 0;
 226        u64 out_param;
 227        int err;
 228
 229        if (mlx4_is_mfunc(dev)) {
 230                set_param_l(&in_param, cnt);
 231                set_param_h(&in_param, align);
 232                err = mlx4_cmd_imm(dev, in_param, &out_param,
 233                                   RES_QP, RES_OP_RESERVE,
 234                                   MLX4_CMD_ALLOC_RES,
 235                                   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 236                if (err)
 237                        return err;
 238
 239                *base = get_param_l(&out_param);
 240                return 0;
 241        }
 242        return __mlx4_qp_reserve_range(dev, cnt, align, base);
 243}
 244EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
 245
 246void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
 247{
 248        struct mlx4_priv *priv = mlx4_priv(dev);
 249        struct mlx4_qp_table *qp_table = &priv->qp_table;
 250
 251        if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
 252                return;
 253        mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
 254}
 255
 256void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
 257{
 258        u64 in_param = 0;
 259        int err;
 260
 261        if (mlx4_is_mfunc(dev)) {
 262                set_param_l(&in_param, base_qpn);
 263                set_param_h(&in_param, cnt);
 264                err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
 265                               MLX4_CMD_FREE_RES,
 266                               MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 267                if (err) {
 268                        mlx4_warn(dev, "Failed to release qp range"
 269                                  " base:%d cnt:%d\n", base_qpn, cnt);
 270                }
 271        } else
 272                 __mlx4_qp_release_range(dev, base_qpn, cnt);
 273}
 274EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
 275
 276int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
 277{
 278        struct mlx4_priv *priv = mlx4_priv(dev);
 279        struct mlx4_qp_table *qp_table = &priv->qp_table;
 280        int err;
 281
 282        err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
 283        if (err)
 284                goto err_out;
 285
 286        err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
 287        if (err)
 288                goto err_put_qp;
 289
 290        err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
 291        if (err)
 292                goto err_put_auxc;
 293
 294        err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
 295        if (err)
 296                goto err_put_altc;
 297
 298        err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
 299        if (err)
 300                goto err_put_rdmarc;
 301
 302        return 0;
 303
 304err_put_rdmarc:
 305        mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
 306
 307err_put_altc:
 308        mlx4_table_put(dev, &qp_table->altc_table, qpn);
 309
 310err_put_auxc:
 311        mlx4_table_put(dev, &qp_table->auxc_table, qpn);
 312
 313err_put_qp:
 314        mlx4_table_put(dev, &qp_table->qp_table, qpn);
 315
 316err_out:
 317        return err;
 318}
 319
 320static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
 321{
 322        u64 param = 0;
 323
 324        if (mlx4_is_mfunc(dev)) {
 325                set_param_l(&param, qpn);
 326                return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
 327                                    MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
 328                                    MLX4_CMD_WRAPPED);
 329        }
 330        return __mlx4_qp_alloc_icm(dev, qpn);
 331}
 332
 333void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
 334{
 335        struct mlx4_priv *priv = mlx4_priv(dev);
 336        struct mlx4_qp_table *qp_table = &priv->qp_table;
 337
 338        mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
 339        mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
 340        mlx4_table_put(dev, &qp_table->altc_table, qpn);
 341        mlx4_table_put(dev, &qp_table->auxc_table, qpn);
 342        mlx4_table_put(dev, &qp_table->qp_table, qpn);
 343}
 344
 345static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
 346{
 347        u64 in_param = 0;
 348
 349        if (mlx4_is_mfunc(dev)) {
 350                set_param_l(&in_param, qpn);
 351                if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
 352                             MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
 353                             MLX4_CMD_WRAPPED))
 354                        mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
 355        } else
 356                __mlx4_qp_free_icm(dev, qpn);
 357}
 358
 359int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
 360{
 361        struct mlx4_priv *priv = mlx4_priv(dev);
 362        struct mlx4_qp_table *qp_table = &priv->qp_table;
 363        int err;
 364
 365        if (!qpn)
 366                return -EINVAL;
 367
 368        qp->qpn = qpn;
 369
 370        err = mlx4_qp_alloc_icm(dev, qpn);
 371        if (err)
 372                return err;
 373
 374        spin_lock_irq(&qp_table->lock);
 375        err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
 376                                (dev->caps.num_qps - 1), qp);
 377        spin_unlock_irq(&qp_table->lock);
 378        if (err)
 379                goto err_icm;
 380
 381        atomic_set(&qp->refcount, 1);
 382        init_completion(&qp->free);
 383
 384        return 0;
 385
 386err_icm:
 387        mlx4_qp_free_icm(dev, qpn);
 388        return err;
 389}
 390
 391EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
 392
 393void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
 394{
 395        struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
 396        unsigned long flags;
 397
 398        spin_lock_irqsave(&qp_table->lock, flags);
 399        radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
 400        spin_unlock_irqrestore(&qp_table->lock, flags);
 401}
 402EXPORT_SYMBOL_GPL(mlx4_qp_remove);
 403
 404void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
 405{
 406        if (atomic_dec_and_test(&qp->refcount))
 407                complete(&qp->free);
 408        wait_for_completion(&qp->free);
 409
 410        mlx4_qp_free_icm(dev, qp->qpn);
 411}
 412EXPORT_SYMBOL_GPL(mlx4_qp_free);
 413
 414static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
 415{
 416        return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
 417                        MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 418}
 419
 420int mlx4_init_qp_table(struct mlx4_dev *dev)
 421{
 422        struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
 423        int err;
 424        int reserved_from_top = 0;
 425        int k;
 426
 427        spin_lock_init(&qp_table->lock);
 428        INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
 429        if (mlx4_is_slave(dev))
 430                return 0;
 431
 432        /*
 433         * We reserve 2 extra QPs per port for the special QPs.  The
 434         * block of special QPs must be aligned to a multiple of 8, so
 435         * round up.
 436         *
 437         * We also reserve the MSB of the 24-bit QP number to indicate
 438         * that a QP is an XRC QP.
 439         */
 440        dev->phys_caps.base_sqpn =
 441                ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
 442
 443        {
 444                int sort[MLX4_NUM_QP_REGION];
 445                int i, j, tmp;
 446                int last_base = dev->caps.num_qps;
 447
 448                for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
 449                        sort[i] = i;
 450
 451                for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
 452                        for (j = 2; j < i; ++j) {
 453                                if (dev->caps.reserved_qps_cnt[sort[j]] >
 454                                    dev->caps.reserved_qps_cnt[sort[j - 1]]) {
 455                                        tmp             = sort[j];
 456                                        sort[j]         = sort[j - 1];
 457                                        sort[j - 1]     = tmp;
 458                                }
 459                        }
 460                }
 461
 462                for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
 463                        last_base -= dev->caps.reserved_qps_cnt[sort[i]];
 464                        dev->caps.reserved_qps_base[sort[i]] = last_base;
 465                        reserved_from_top +=
 466                                dev->caps.reserved_qps_cnt[sort[i]];
 467                }
 468
 469        }
 470
 471       /* Reserve 8 real SQPs in both native and SRIOV modes.
 472        * In addition, in SRIOV mode, reserve 8 proxy SQPs per function
 473        * (for all PFs and VFs), and 8 corresponding tunnel QPs.
 474        * Each proxy SQP works opposite its own tunnel QP.
 475        *
 476        * The QPs are arranged as follows:
 477        * a. 8 real SQPs
 478        * b. All the proxy SQPs (8 per function)
 479        * c. All the tunnel QPs (8 per function)
 480        */
 481
 482        err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
 483                               (1 << 23) - 1, dev->phys_caps.base_sqpn + 8 +
 484                               16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev),
 485                               reserved_from_top);
 486        if (err)
 487                return err;
 488
 489        if (mlx4_is_mfunc(dev)) {
 490                /* for PPF use */
 491                dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
 492                dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
 493
 494                /* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
 495                 * since the PF does not call mlx4_slave_caps */
 496                dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
 497                dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
 498                dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
 499                dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
 500
 501                if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
 502                    !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
 503                        err = -ENOMEM;
 504                        goto err_mem;
 505                }
 506
 507                for (k = 0; k < dev->caps.num_ports; k++) {
 508                        dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
 509                                8 * mlx4_master_func_num(dev) + k;
 510                        dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
 511                        dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
 512                                8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
 513                        dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
 514                }
 515        }
 516
 517
 518        err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
 519        if (err)
 520                goto err_mem;
 521        return 0;
 522
 523err_mem:
 524        kfree(dev->caps.qp0_tunnel);
 525        kfree(dev->caps.qp0_proxy);
 526        kfree(dev->caps.qp1_tunnel);
 527        kfree(dev->caps.qp1_proxy);
 528        dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
 529                dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
 530        return err;
 531}
 532
 533void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
 534{
 535        if (mlx4_is_slave(dev))
 536                return;
 537
 538        mlx4_CONF_SPECIAL_QP(dev, 0);
 539        mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
 540}
 541
 542int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
 543                  struct mlx4_qp_context *context)
 544{
 545        struct mlx4_cmd_mailbox *mailbox;
 546        int err;
 547
 548        mailbox = mlx4_alloc_cmd_mailbox(dev);
 549        if (IS_ERR(mailbox))
 550                return PTR_ERR(mailbox);
 551
 552        err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
 553                           MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
 554                           MLX4_CMD_WRAPPED);
 555        if (!err)
 556                memcpy(context, mailbox->buf + 8, sizeof *context);
 557
 558        mlx4_free_cmd_mailbox(dev, mailbox);
 559        return err;
 560}
 561EXPORT_SYMBOL_GPL(mlx4_qp_query);
 562
 563int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
 564                     struct mlx4_qp_context *context,
 565                     struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
 566{
 567        int err;
 568        int i;
 569        enum mlx4_qp_state states[] = {
 570                MLX4_QP_STATE_RST,
 571                MLX4_QP_STATE_INIT,
 572                MLX4_QP_STATE_RTR,
 573                MLX4_QP_STATE_RTS
 574        };
 575
 576        for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
 577                context->flags &= cpu_to_be32(~(0xf << 28));
 578                context->flags |= cpu_to_be32(states[i + 1] << 28);
 579                err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
 580                                     context, 0, 0, qp);
 581                if (err) {
 582                        mlx4_err(dev, "Failed to bring QP to state: "
 583                                 "%d with error: %d\n",
 584                                 states[i + 1], err);
 585                        return err;
 586                }
 587
 588                *qp_state = states[i + 1];
 589        }
 590
 591        return 0;
 592}
 593EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
 594