linux/drivers/net/ethernet/mellanox/mlx4/cmd.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/sched.h>
  36#include <linux/slab.h>
  37#include <linux/export.h>
  38#include <linux/pci.h>
  39#include <linux/errno.h>
  40
  41#include <linux/mlx4/cmd.h>
  42#include <linux/mlx4/device.h>
  43#include <linux/semaphore.h>
  44#include <rdma/ib_smi.h>
  45#include <linux/delay.h>
  46#include <linux/etherdevice.h>
  47
  48#include <asm/io.h>
  49
  50#include "mlx4.h"
  51#include "fw.h"
  52#include "fw_qos.h"
  53#include "mlx4_stats.h"
  54
  55#define CMD_POLL_TOKEN 0xffff
  56#define INBOX_MASK      0xffffffffffffff00ULL
  57
  58#define CMD_CHAN_VER 1
  59#define CMD_CHAN_IF_REV 1
  60
  61enum {
  62        /* command completed successfully: */
  63        CMD_STAT_OK             = 0x00,
  64        /* Internal error (such as a bus error) occurred while processing command: */
  65        CMD_STAT_INTERNAL_ERR   = 0x01,
  66        /* Operation/command not supported or opcode modifier not supported: */
  67        CMD_STAT_BAD_OP         = 0x02,
  68        /* Parameter not supported or parameter out of range: */
  69        CMD_STAT_BAD_PARAM      = 0x03,
  70        /* System not enabled or bad system state: */
  71        CMD_STAT_BAD_SYS_STATE  = 0x04,
  72        /* Attempt to access reserved or unallocaterd resource: */
  73        CMD_STAT_BAD_RESOURCE   = 0x05,
  74        /* Requested resource is currently executing a command, or is otherwise busy: */
  75        CMD_STAT_RESOURCE_BUSY  = 0x06,
  76        /* Required capability exceeds device limits: */
  77        CMD_STAT_EXCEED_LIM     = 0x08,
  78        /* Resource is not in the appropriate state or ownership: */
  79        CMD_STAT_BAD_RES_STATE  = 0x09,
  80        /* Index out of range: */
  81        CMD_STAT_BAD_INDEX      = 0x0a,
  82        /* FW image corrupted: */
  83        CMD_STAT_BAD_NVMEM      = 0x0b,
  84        /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
  85        CMD_STAT_ICM_ERROR      = 0x0c,
  86        /* Attempt to modify a QP/EE which is not in the presumed state: */
  87        CMD_STAT_BAD_QP_STATE   = 0x10,
  88        /* Bad segment parameters (Address/Size): */
  89        CMD_STAT_BAD_SEG_PARAM  = 0x20,
  90        /* Memory Region has Memory Windows bound to: */
  91        CMD_STAT_REG_BOUND      = 0x21,
  92        /* HCA local attached memory not present: */
  93        CMD_STAT_LAM_NOT_PRE    = 0x22,
  94        /* Bad management packet (silently discarded): */
  95        CMD_STAT_BAD_PKT        = 0x30,
  96        /* More outstanding CQEs in CQ than new CQ size: */
  97        CMD_STAT_BAD_SIZE       = 0x40,
  98        /* Multi Function device support required: */
  99        CMD_STAT_MULTI_FUNC_REQ = 0x50,
 100};
 101
 102enum {
 103        HCR_IN_PARAM_OFFSET     = 0x00,
 104        HCR_IN_MODIFIER_OFFSET  = 0x08,
 105        HCR_OUT_PARAM_OFFSET    = 0x0c,
 106        HCR_TOKEN_OFFSET        = 0x14,
 107        HCR_STATUS_OFFSET       = 0x18,
 108
 109        HCR_OPMOD_SHIFT         = 12,
 110        HCR_T_BIT               = 21,
 111        HCR_E_BIT               = 22,
 112        HCR_GO_BIT              = 23
 113};
 114
 115enum {
 116        GO_BIT_TIMEOUT_MSECS    = 10000
 117};
 118
 119enum mlx4_vlan_transition {
 120        MLX4_VLAN_TRANSITION_VST_VST = 0,
 121        MLX4_VLAN_TRANSITION_VST_VGT = 1,
 122        MLX4_VLAN_TRANSITION_VGT_VST = 2,
 123        MLX4_VLAN_TRANSITION_VGT_VGT = 3,
 124};
 125
 126
 127struct mlx4_cmd_context {
 128        struct completion       done;
 129        int                     result;
 130        int                     next;
 131        u64                     out_param;
 132        u16                     token;
 133        u8                      fw_status;
 134};
 135
 136static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
 137                                    struct mlx4_vhcr_cmd *in_vhcr);
 138
 139static int mlx4_status_to_errno(u8 status)
 140{
 141        static const int trans_table[] = {
 142                [CMD_STAT_INTERNAL_ERR]   = -EIO,
 143                [CMD_STAT_BAD_OP]         = -EPERM,
 144                [CMD_STAT_BAD_PARAM]      = -EINVAL,
 145                [CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
 146                [CMD_STAT_BAD_RESOURCE]   = -EBADF,
 147                [CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
 148                [CMD_STAT_EXCEED_LIM]     = -ENOMEM,
 149                [CMD_STAT_BAD_RES_STATE]  = -EBADF,
 150                [CMD_STAT_BAD_INDEX]      = -EBADF,
 151                [CMD_STAT_BAD_NVMEM]      = -EFAULT,
 152                [CMD_STAT_ICM_ERROR]      = -ENFILE,
 153                [CMD_STAT_BAD_QP_STATE]   = -EINVAL,
 154                [CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
 155                [CMD_STAT_REG_BOUND]      = -EBUSY,
 156                [CMD_STAT_LAM_NOT_PRE]    = -EAGAIN,
 157                [CMD_STAT_BAD_PKT]        = -EINVAL,
 158                [CMD_STAT_BAD_SIZE]       = -ENOMEM,
 159                [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
 160        };
 161
 162        if (status >= ARRAY_SIZE(trans_table) ||
 163            (status != CMD_STAT_OK && trans_table[status] == 0))
 164                return -EIO;
 165
 166        return trans_table[status];
 167}
 168
 169static u8 mlx4_errno_to_status(int errno)
 170{
 171        switch (errno) {
 172        case -EPERM:
 173                return CMD_STAT_BAD_OP;
 174        case -EINVAL:
 175                return CMD_STAT_BAD_PARAM;
 176        case -ENXIO:
 177                return CMD_STAT_BAD_SYS_STATE;
 178        case -EBUSY:
 179                return CMD_STAT_RESOURCE_BUSY;
 180        case -ENOMEM:
 181                return CMD_STAT_EXCEED_LIM;
 182        case -ENFILE:
 183                return CMD_STAT_ICM_ERROR;
 184        default:
 185                return CMD_STAT_INTERNAL_ERR;
 186        }
 187}
 188
 189static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
 190                                       u8 op_modifier)
 191{
 192        switch (op) {
 193        case MLX4_CMD_UNMAP_ICM:
 194        case MLX4_CMD_UNMAP_ICM_AUX:
 195        case MLX4_CMD_UNMAP_FA:
 196        case MLX4_CMD_2RST_QP:
 197        case MLX4_CMD_HW2SW_EQ:
 198        case MLX4_CMD_HW2SW_CQ:
 199        case MLX4_CMD_HW2SW_SRQ:
 200        case MLX4_CMD_HW2SW_MPT:
 201        case MLX4_CMD_CLOSE_HCA:
 202        case MLX4_QP_FLOW_STEERING_DETACH:
 203        case MLX4_CMD_FREE_RES:
 204        case MLX4_CMD_CLOSE_PORT:
 205                return CMD_STAT_OK;
 206
 207        case MLX4_CMD_QP_ATTACH:
 208                /* On Detach case return success */
 209                if (op_modifier == 0)
 210                        return CMD_STAT_OK;
 211                return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 212
 213        default:
 214                return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 215        }
 216}
 217
 218static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
 219{
 220        /* Any error during the closing commands below is considered fatal */
 221        if (op == MLX4_CMD_CLOSE_HCA ||
 222            op == MLX4_CMD_HW2SW_EQ ||
 223            op == MLX4_CMD_HW2SW_CQ ||
 224            op == MLX4_CMD_2RST_QP ||
 225            op == MLX4_CMD_HW2SW_SRQ ||
 226            op == MLX4_CMD_SYNC_TPT ||
 227            op == MLX4_CMD_UNMAP_ICM ||
 228            op == MLX4_CMD_UNMAP_ICM_AUX ||
 229            op == MLX4_CMD_UNMAP_FA)
 230                return 1;
 231        /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
 232          * CMD_STAT_REG_BOUND.
 233          * This status indicates that memory region has memory windows bound to it
 234          * which may result from invalid user space usage and is not fatal.
 235          */
 236        if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
 237                return 1;
 238        return 0;
 239}
 240
 241static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
 242                               int err)
 243{
 244        /* Only if reset flow is really active return code is based on
 245          * command, otherwise current error code is returned.
 246          */
 247        if (mlx4_internal_err_reset) {
 248                mlx4_enter_error_state(dev->persist);
 249                err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 250        }
 251
 252        return err;
 253}
 254
 255static int comm_pending(struct mlx4_dev *dev)
 256{
 257        struct mlx4_priv *priv = mlx4_priv(dev);
 258        u32 status = readl(&priv->mfunc.comm->slave_read);
 259
 260        return (swab32(status) >> 31) != priv->cmd.comm_toggle;
 261}
 262
 263static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
 264{
 265        struct mlx4_priv *priv = mlx4_priv(dev);
 266        u32 val;
 267
 268        /* To avoid writing to unknown addresses after the device state was
 269         * changed to internal error and the function was rest,
 270         * check the INTERNAL_ERROR flag which is updated under
 271         * device_state_mutex lock.
 272         */
 273        mutex_lock(&dev->persist->device_state_mutex);
 274
 275        if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
 276                mutex_unlock(&dev->persist->device_state_mutex);
 277                return -EIO;
 278        }
 279
 280        priv->cmd.comm_toggle ^= 1;
 281        val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
 282        __raw_writel((__force u32) cpu_to_be32(val),
 283                     &priv->mfunc.comm->slave_write);
 284        mutex_unlock(&dev->persist->device_state_mutex);
 285        return 0;
 286}
 287
 288static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
 289                       unsigned long timeout)
 290{
 291        struct mlx4_priv *priv = mlx4_priv(dev);
 292        unsigned long end;
 293        int err = 0;
 294        int ret_from_pending = 0;
 295
 296        /* First, verify that the master reports correct status */
 297        if (comm_pending(dev)) {
 298                mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
 299                          priv->cmd.comm_toggle, cmd);
 300                return -EAGAIN;
 301        }
 302
 303        /* Write command */
 304        down(&priv->cmd.poll_sem);
 305        if (mlx4_comm_cmd_post(dev, cmd, param)) {
 306                /* Only in case the device state is INTERNAL_ERROR,
 307                 * mlx4_comm_cmd_post returns with an error
 308                 */
 309                err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 310                goto out;
 311        }
 312
 313        end = msecs_to_jiffies(timeout) + jiffies;
 314        while (comm_pending(dev) && time_before(jiffies, end))
 315                cond_resched();
 316        ret_from_pending = comm_pending(dev);
 317        if (ret_from_pending) {
 318                /* check if the slave is trying to boot in the middle of
 319                 * FLR process. The only non-zero result in the RESET command
 320                 * is MLX4_DELAY_RESET_SLAVE*/
 321                if ((MLX4_COMM_CMD_RESET == cmd)) {
 322                        err = MLX4_DELAY_RESET_SLAVE;
 323                        goto out;
 324                } else {
 325                        mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
 326                                  cmd);
 327                        err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 328                }
 329        }
 330
 331        if (err)
 332                mlx4_enter_error_state(dev->persist);
 333out:
 334        up(&priv->cmd.poll_sem);
 335        return err;
 336}
 337
 338static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
 339                              u16 param, u16 op, unsigned long timeout)
 340{
 341        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 342        struct mlx4_cmd_context *context;
 343        unsigned long end;
 344        int err = 0;
 345
 346        down(&cmd->event_sem);
 347
 348        spin_lock(&cmd->context_lock);
 349        BUG_ON(cmd->free_head < 0);
 350        context = &cmd->context[cmd->free_head];
 351        context->token += cmd->token_mask + 1;
 352        cmd->free_head = context->next;
 353        spin_unlock(&cmd->context_lock);
 354
 355        reinit_completion(&context->done);
 356
 357        if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
 358                /* Only in case the device state is INTERNAL_ERROR,
 359                 * mlx4_comm_cmd_post returns with an error
 360                 */
 361                err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 362                goto out;
 363        }
 364
 365        if (!wait_for_completion_timeout(&context->done,
 366                                         msecs_to_jiffies(timeout))) {
 367                mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
 368                          vhcr_cmd, op);
 369                goto out_reset;
 370        }
 371
 372        err = context->result;
 373        if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
 374                mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
 375                         vhcr_cmd, context->fw_status);
 376                if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
 377                        goto out_reset;
 378        }
 379
 380        /* wait for comm channel ready
 381         * this is necessary for prevention the race
 382         * when switching between event to polling mode
 383         * Skipping this section in case the device is in FATAL_ERROR state,
 384         * In this state, no commands are sent via the comm channel until
 385         * the device has returned from reset.
 386         */
 387        if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
 388                end = msecs_to_jiffies(timeout) + jiffies;
 389                while (comm_pending(dev) && time_before(jiffies, end))
 390                        cond_resched();
 391        }
 392        goto out;
 393
 394out_reset:
 395        err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 396        mlx4_enter_error_state(dev->persist);
 397out:
 398        spin_lock(&cmd->context_lock);
 399        context->next = cmd->free_head;
 400        cmd->free_head = context - cmd->context;
 401        spin_unlock(&cmd->context_lock);
 402
 403        up(&cmd->event_sem);
 404        return err;
 405}
 406
 407int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
 408                  u16 op, unsigned long timeout)
 409{
 410        if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 411                return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 412
 413        if (mlx4_priv(dev)->cmd.use_events)
 414                return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
 415        return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
 416}
 417
 418static int cmd_pending(struct mlx4_dev *dev)
 419{
 420        u32 status;
 421
 422        if (pci_channel_offline(dev->persist->pdev))
 423                return -EIO;
 424
 425        status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
 426
 427        return (status & swab32(1 << HCR_GO_BIT)) ||
 428                (mlx4_priv(dev)->cmd.toggle ==
 429                 !!(status & swab32(1 << HCR_T_BIT)));
 430}
 431
 432static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
 433                         u32 in_modifier, u8 op_modifier, u16 op, u16 token,
 434                         int event)
 435{
 436        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 437        u32 __iomem *hcr = cmd->hcr;
 438        int ret = -EIO;
 439        unsigned long end;
 440
 441        mutex_lock(&dev->persist->device_state_mutex);
 442        /* To avoid writing to unknown addresses after the device state was
 443          * changed to internal error and the chip was reset,
 444          * check the INTERNAL_ERROR flag which is updated under
 445          * device_state_mutex lock.
 446          */
 447        if (pci_channel_offline(dev->persist->pdev) ||
 448            (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
 449                /*
 450                 * Device is going through error recovery
 451                 * and cannot accept commands.
 452                 */
 453                goto out;
 454        }
 455
 456        end = jiffies;
 457        if (event)
 458                end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
 459
 460        while (cmd_pending(dev)) {
 461                if (pci_channel_offline(dev->persist->pdev)) {
 462                        /*
 463                         * Device is going through error recovery
 464                         * and cannot accept commands.
 465                         */
 466                        goto out;
 467                }
 468
 469                if (time_after_eq(jiffies, end)) {
 470                        mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
 471                        goto out;
 472                }
 473                cond_resched();
 474        }
 475
 476        /*
 477         * We use writel (instead of something like memcpy_toio)
 478         * because writes of less than 32 bits to the HCR don't work
 479         * (and some architectures such as ia64 implement memcpy_toio
 480         * in terms of writeb).
 481         */
 482        __raw_writel((__force u32) cpu_to_be32(in_param >> 32),           hcr + 0);
 483        __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
 484        __raw_writel((__force u32) cpu_to_be32(in_modifier),              hcr + 2);
 485        __raw_writel((__force u32) cpu_to_be32(out_param >> 32),          hcr + 3);
 486        __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
 487        __raw_writel((__force u32) cpu_to_be32(token << 16),              hcr + 5);
 488
 489        /* __raw_writel may not order writes. */
 490        wmb();
 491
 492        __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
 493                                               (cmd->toggle << HCR_T_BIT)       |
 494                                               (event ? (1 << HCR_E_BIT) : 0)   |
 495                                               (op_modifier << HCR_OPMOD_SHIFT) |
 496                                               op), hcr + 6);
 497
 498        cmd->toggle = cmd->toggle ^ 1;
 499
 500        ret = 0;
 501
 502out:
 503        if (ret)
 504                mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
 505                          op, ret, in_param, in_modifier, op_modifier);
 506        mutex_unlock(&dev->persist->device_state_mutex);
 507
 508        return ret;
 509}
 510
 511static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 512                          int out_is_imm, u32 in_modifier, u8 op_modifier,
 513                          u16 op, unsigned long timeout)
 514{
 515        struct mlx4_priv *priv = mlx4_priv(dev);
 516        struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
 517        int ret;
 518
 519        mutex_lock(&priv->cmd.slave_cmd_mutex);
 520
 521        vhcr->in_param = cpu_to_be64(in_param);
 522        vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
 523        vhcr->in_modifier = cpu_to_be32(in_modifier);
 524        vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
 525        vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
 526        vhcr->status = 0;
 527        vhcr->flags = !!(priv->cmd.use_events) << 6;
 528
 529        if (mlx4_is_master(dev)) {
 530                ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
 531                if (!ret) {
 532                        if (out_is_imm) {
 533                                if (out_param)
 534                                        *out_param =
 535                                                be64_to_cpu(vhcr->out_param);
 536                                else {
 537                                        mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 538                                                 op);
 539                                        vhcr->status = CMD_STAT_BAD_PARAM;
 540                                }
 541                        }
 542                        ret = mlx4_status_to_errno(vhcr->status);
 543                }
 544                if (ret &&
 545                    dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 546                        ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
 547        } else {
 548                ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
 549                                    MLX4_COMM_TIME + timeout);
 550                if (!ret) {
 551                        if (out_is_imm) {
 552                                if (out_param)
 553                                        *out_param =
 554                                                be64_to_cpu(vhcr->out_param);
 555                                else {
 556                                        mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 557                                                 op);
 558                                        vhcr->status = CMD_STAT_BAD_PARAM;
 559                                }
 560                        }
 561                        ret = mlx4_status_to_errno(vhcr->status);
 562                } else {
 563                        if (dev->persist->state &
 564                            MLX4_DEVICE_STATE_INTERNAL_ERROR)
 565                                ret = mlx4_internal_err_ret_value(dev, op,
 566                                                                  op_modifier);
 567                        else
 568                                mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
 569                }
 570        }
 571
 572        mutex_unlock(&priv->cmd.slave_cmd_mutex);
 573        return ret;
 574}
 575
 576static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 577                         int out_is_imm, u32 in_modifier, u8 op_modifier,
 578                         u16 op, unsigned long timeout)
 579{
 580        struct mlx4_priv *priv = mlx4_priv(dev);
 581        void __iomem *hcr = priv->cmd.hcr;
 582        int err = 0;
 583        unsigned long end;
 584        u32 stat;
 585
 586        down(&priv->cmd.poll_sem);
 587
 588        if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
 589                /*
 590                 * Device is going through error recovery
 591                 * and cannot accept commands.
 592                 */
 593                err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 594                goto out;
 595        }
 596
 597        if (out_is_imm && !out_param) {
 598                mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 599                         op);
 600                err = -EINVAL;
 601                goto out;
 602        }
 603
 604        err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
 605                            in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
 606        if (err)
 607                goto out_reset;
 608
 609        end = msecs_to_jiffies(timeout) + jiffies;
 610        while (cmd_pending(dev) && time_before(jiffies, end)) {
 611                if (pci_channel_offline(dev->persist->pdev)) {
 612                        /*
 613                         * Device is going through error recovery
 614                         * and cannot accept commands.
 615                         */
 616                        err = -EIO;
 617                        goto out_reset;
 618                }
 619
 620                if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
 621                        err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 622                        goto out;
 623                }
 624
 625                cond_resched();
 626        }
 627
 628        if (cmd_pending(dev)) {
 629                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
 630                          op);
 631                err = -EIO;
 632                goto out_reset;
 633        }
 634
 635        if (out_is_imm)
 636                *out_param =
 637                        (u64) be32_to_cpu((__force __be32)
 638                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
 639                        (u64) be32_to_cpu((__force __be32)
 640                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
 641        stat = be32_to_cpu((__force __be32)
 642                           __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
 643        err = mlx4_status_to_errno(stat);
 644        if (err) {
 645                mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
 646                         op, stat);
 647                if (mlx4_closing_cmd_fatal_error(op, stat))
 648                        goto out_reset;
 649                goto out;
 650        }
 651
 652out_reset:
 653        if (err)
 654                err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
 655out:
 656        up(&priv->cmd.poll_sem);
 657        return err;
 658}
 659
 660void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
 661{
 662        struct mlx4_priv *priv = mlx4_priv(dev);
 663        struct mlx4_cmd_context *context =
 664                &priv->cmd.context[token & priv->cmd.token_mask];
 665
 666        /* previously timed out command completing at long last */
 667        if (token != context->token)
 668                return;
 669
 670        context->fw_status = status;
 671        context->result    = mlx4_status_to_errno(status);
 672        context->out_param = out_param;
 673
 674        complete(&context->done);
 675}
 676
 677static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 678                         int out_is_imm, u32 in_modifier, u8 op_modifier,
 679                         u16 op, unsigned long timeout)
 680{
 681        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 682        struct mlx4_cmd_context *context;
 683        long ret_wait;
 684        int err = 0;
 685
 686        down(&cmd->event_sem);
 687
 688        spin_lock(&cmd->context_lock);
 689        BUG_ON(cmd->free_head < 0);
 690        context = &cmd->context[cmd->free_head];
 691        context->token += cmd->token_mask + 1;
 692        cmd->free_head = context->next;
 693        spin_unlock(&cmd->context_lock);
 694
 695        if (out_is_imm && !out_param) {
 696                mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 697                         op);
 698                err = -EINVAL;
 699                goto out;
 700        }
 701
 702        reinit_completion(&context->done);
 703
 704        err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
 705                            in_modifier, op_modifier, op, context->token, 1);
 706        if (err)
 707                goto out_reset;
 708
 709        if (op == MLX4_CMD_SENSE_PORT) {
 710                ret_wait =
 711                        wait_for_completion_interruptible_timeout(&context->done,
 712                                                                  msecs_to_jiffies(timeout));
 713                if (ret_wait < 0) {
 714                        context->fw_status = 0;
 715                        context->out_param = 0;
 716                        context->result = 0;
 717                }
 718        } else {
 719                ret_wait = (long)wait_for_completion_timeout(&context->done,
 720                                                             msecs_to_jiffies(timeout));
 721        }
 722        if (!ret_wait) {
 723                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
 724                          op);
 725                if (op == MLX4_CMD_NOP) {
 726                        err = -EBUSY;
 727                        goto out;
 728                } else {
 729                        err = -EIO;
 730                        goto out_reset;
 731                }
 732        }
 733
 734        err = context->result;
 735        if (err) {
 736                /* Since we do not want to have this error message always
 737                 * displayed at driver start when there are ConnectX2 HCAs
 738                 * on the host, we deprecate the error message for this
 739                 * specific command/input_mod/opcode_mod/fw-status to be debug.
 740                 */
 741                if (op == MLX4_CMD_SET_PORT &&
 742                    (in_modifier == 1 || in_modifier == 2) &&
 743                    op_modifier == MLX4_SET_PORT_IB_OPCODE &&
 744                    context->fw_status == CMD_STAT_BAD_SIZE)
 745                        mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
 746                                 op, context->fw_status);
 747                else
 748                        mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
 749                                 op, context->fw_status);
 750                if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 751                        err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 752                else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
 753                        goto out_reset;
 754
 755                goto out;
 756        }
 757
 758        if (out_is_imm)
 759                *out_param = context->out_param;
 760
 761out_reset:
 762        if (err)
 763                err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
 764out:
 765        spin_lock(&cmd->context_lock);
 766        context->next = cmd->free_head;
 767        cmd->free_head = context - cmd->context;
 768        spin_unlock(&cmd->context_lock);
 769
 770        up(&cmd->event_sem);
 771        return err;
 772}
 773
 774int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 775               int out_is_imm, u32 in_modifier, u8 op_modifier,
 776               u16 op, unsigned long timeout, int native)
 777{
 778        if (pci_channel_offline(dev->persist->pdev))
 779                return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
 780
 781        if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
 782                int ret;
 783
 784                if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 785                        return mlx4_internal_err_ret_value(dev, op,
 786                                                          op_modifier);
 787                down_read(&mlx4_priv(dev)->cmd.switch_sem);
 788                if (mlx4_priv(dev)->cmd.use_events)
 789                        ret = mlx4_cmd_wait(dev, in_param, out_param,
 790                                            out_is_imm, in_modifier,
 791                                            op_modifier, op, timeout);
 792                else
 793                        ret = mlx4_cmd_poll(dev, in_param, out_param,
 794                                            out_is_imm, in_modifier,
 795                                            op_modifier, op, timeout);
 796
 797                up_read(&mlx4_priv(dev)->cmd.switch_sem);
 798                return ret;
 799        }
 800        return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
 801                              in_modifier, op_modifier, op, timeout);
 802}
 803EXPORT_SYMBOL_GPL(__mlx4_cmd);
 804
 805
 806int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
 807{
 808        return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
 809                        MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 810}
 811
 812static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
 813                           int slave, u64 slave_addr,
 814                           int size, int is_read)
 815{
 816        u64 in_param;
 817        u64 out_param;
 818
 819        if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
 820            (slave & ~0x7f) | (size & 0xff)) {
 821                mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
 822                         slave_addr, master_addr, slave, size);
 823                return -EINVAL;
 824        }
 825
 826        if (is_read) {
 827                in_param = (u64) slave | slave_addr;
 828                out_param = (u64) dev->caps.function | master_addr;
 829        } else {
 830                in_param = (u64) dev->caps.function | master_addr;
 831                out_param = (u64) slave | slave_addr;
 832        }
 833
 834        return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
 835                            MLX4_CMD_ACCESS_MEM,
 836                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 837}
 838
 839static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
 840                               struct mlx4_cmd_mailbox *inbox,
 841                               struct mlx4_cmd_mailbox *outbox)
 842{
 843        struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
 844        struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
 845        int err;
 846        int i;
 847
 848        if (index & 0x1f)
 849                return -EINVAL;
 850
 851        in_mad->attr_mod = cpu_to_be32(index / 32);
 852
 853        err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
 854                           MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
 855                           MLX4_CMD_NATIVE);
 856        if (err)
 857                return err;
 858
 859        for (i = 0; i < 32; ++i)
 860                pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
 861
 862        return err;
 863}
 864
 865static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
 866                               struct mlx4_cmd_mailbox *inbox,
 867                               struct mlx4_cmd_mailbox *outbox)
 868{
 869        int i;
 870        int err;
 871
 872        for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
 873                err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
 874                if (err)
 875                        return err;
 876        }
 877
 878        return 0;
 879}
 880#define PORT_CAPABILITY_LOCATION_IN_SMP 20
 881#define PORT_STATE_OFFSET 32
 882
 883static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
 884{
 885        if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
 886                return IB_PORT_ACTIVE;
 887        else
 888                return IB_PORT_DOWN;
 889}
 890
 891static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
 892                                struct mlx4_vhcr *vhcr,
 893                                struct mlx4_cmd_mailbox *inbox,
 894                                struct mlx4_cmd_mailbox *outbox,
 895                                struct mlx4_cmd_info *cmd)
 896{
 897        struct ib_smp *smp = inbox->buf;
 898        u32 index;
 899        u8 port, slave_port;
 900        u8 opcode_modifier;
 901        u16 *table;
 902        int err;
 903        int vidx, pidx;
 904        int network_view;
 905        struct mlx4_priv *priv = mlx4_priv(dev);
 906        struct ib_smp *outsmp = outbox->buf;
 907        __be16 *outtab = (__be16 *)(outsmp->data);
 908        __be32 slave_cap_mask;
 909        __be64 slave_node_guid;
 910
 911        slave_port = vhcr->in_modifier;
 912        port = mlx4_slave_convert_port(dev, slave, slave_port);
 913
 914        /* network-view bit is for driver use only, and should not be passed to FW */
 915        opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
 916        network_view = !!(vhcr->op_modifier & 0x8);
 917
 918        if (smp->base_version == 1 &&
 919            smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
 920            smp->class_version == 1) {
 921                /* host view is paravirtualized */
 922                if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
 923                        if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
 924                                index = be32_to_cpu(smp->attr_mod);
 925                                if (port < 1 || port > dev->caps.num_ports)
 926                                        return -EINVAL;
 927                                table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
 928                                                sizeof(*table) * 32, GFP_KERNEL);
 929
 930                                if (!table)
 931                                        return -ENOMEM;
 932                                /* need to get the full pkey table because the paravirtualized
 933                                 * pkeys may be scattered among several pkey blocks.
 934                                 */
 935                                err = get_full_pkey_table(dev, port, table, inbox, outbox);
 936                                if (!err) {
 937                                        for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
 938                                                pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
 939                                                outtab[vidx % 32] = cpu_to_be16(table[pidx]);
 940                                        }
 941                                }
 942                                kfree(table);
 943                                return err;
 944                        }
 945                        if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
 946                                /*get the slave specific caps:*/
 947                                /*do the command */
 948                                smp->attr_mod = cpu_to_be32(port);
 949                                err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
 950                                            port, opcode_modifier,
 951                                            vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 952                                /* modify the response for slaves */
 953                                if (!err && slave != mlx4_master_func_num(dev)) {
 954                                        u8 *state = outsmp->data + PORT_STATE_OFFSET;
 955
 956                                        *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
 957                                        slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
 958                                        memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
 959                                }
 960                                return err;
 961                        }
 962                        if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
 963                                __be64 guid = mlx4_get_admin_guid(dev, slave,
 964                                                                  port);
 965
 966                                /* set the PF admin guid to the FW/HW burned
 967                                 * GUID, if it wasn't yet set
 968                                 */
 969                                if (slave == 0 && guid == 0) {
 970                                        smp->attr_mod = 0;
 971                                        err = mlx4_cmd_box(dev,
 972                                                           inbox->dma,
 973                                                           outbox->dma,
 974                                                           vhcr->in_modifier,
 975                                                           opcode_modifier,
 976                                                           vhcr->op,
 977                                                           MLX4_CMD_TIME_CLASS_C,
 978                                                           MLX4_CMD_NATIVE);
 979                                        if (err)
 980                                                return err;
 981                                        mlx4_set_admin_guid(dev,
 982                                                            *(__be64 *)outsmp->
 983                                                            data, slave, port);
 984                                } else {
 985                                        memcpy(outsmp->data, &guid, 8);
 986                                }
 987
 988                                /* clean all other gids */
 989                                memset(outsmp->data + 8, 0, 56);
 990                                return 0;
 991                        }
 992                        if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
 993                                err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
 994                                             port, opcode_modifier,
 995                                             vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 996                                if (!err) {
 997                                        slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
 998                                        memcpy(outsmp->data + 12, &slave_node_guid, 8);
 999                                }
1000                                return err;
1001                        }
1002                }
1003        }
1004
1005        /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
1006         * These are the MADs used by ib verbs (such as ib_query_gids).
1007         */
1008        if (slave != mlx4_master_func_num(dev) &&
1009            !mlx4_vf_smi_enabled(dev, slave, port)) {
1010                if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
1011                      smp->method == IB_MGMT_METHOD_GET) || network_view) {
1012                        mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1013                                 slave, smp->mgmt_class, smp->method,
1014                                 network_view ? "Network" : "Host",
1015                                 be16_to_cpu(smp->attr_id));
1016                        return -EPERM;
1017                }
1018        }
1019
1020        return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1021                                    vhcr->in_modifier, opcode_modifier,
1022                                    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1023}
1024
1025static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
1026                     struct mlx4_vhcr *vhcr,
1027                     struct mlx4_cmd_mailbox *inbox,
1028                     struct mlx4_cmd_mailbox *outbox,
1029                     struct mlx4_cmd_info *cmd)
1030{
1031        return -EPERM;
1032}
1033
1034int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1035                     struct mlx4_vhcr *vhcr,
1036                     struct mlx4_cmd_mailbox *inbox,
1037                     struct mlx4_cmd_mailbox *outbox,
1038                     struct mlx4_cmd_info *cmd)
1039{
1040        u64 in_param;
1041        u64 out_param;
1042        int err;
1043
1044        in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1045        out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1046        if (cmd->encode_slave_id) {
1047                in_param &= 0xffffffffffffff00ll;
1048                in_param |= slave;
1049        }
1050
1051        err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1052                         vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1053                         MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1054
1055        if (cmd->out_is_imm)
1056                vhcr->out_param = out_param;
1057
1058        return err;
1059}
1060
1061static struct mlx4_cmd_info cmd_info[] = {
1062        {
1063                .opcode = MLX4_CMD_QUERY_FW,
1064                .has_inbox = false,
1065                .has_outbox = true,
1066                .out_is_imm = false,
1067                .encode_slave_id = false,
1068                .verify = NULL,
1069                .wrapper = mlx4_QUERY_FW_wrapper
1070        },
1071        {
1072                .opcode = MLX4_CMD_QUERY_HCA,
1073                .has_inbox = false,
1074                .has_outbox = true,
1075                .out_is_imm = false,
1076                .encode_slave_id = false,
1077                .verify = NULL,
1078                .wrapper = NULL
1079        },
1080        {
1081                .opcode = MLX4_CMD_QUERY_DEV_CAP,
1082                .has_inbox = false,
1083                .has_outbox = true,
1084                .out_is_imm = false,
1085                .encode_slave_id = false,
1086                .verify = NULL,
1087                .wrapper = mlx4_QUERY_DEV_CAP_wrapper
1088        },
1089        {
1090                .opcode = MLX4_CMD_QUERY_FUNC_CAP,
1091                .has_inbox = false,
1092                .has_outbox = true,
1093                .out_is_imm = false,
1094                .encode_slave_id = false,
1095                .verify = NULL,
1096                .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1097        },
1098        {
1099                .opcode = MLX4_CMD_QUERY_ADAPTER,
1100                .has_inbox = false,
1101                .has_outbox = true,
1102                .out_is_imm = false,
1103                .encode_slave_id = false,
1104                .verify = NULL,
1105                .wrapper = NULL
1106        },
1107        {
1108                .opcode = MLX4_CMD_INIT_PORT,
1109                .has_inbox = false,
1110                .has_outbox = false,
1111                .out_is_imm = false,
1112                .encode_slave_id = false,
1113                .verify = NULL,
1114                .wrapper = mlx4_INIT_PORT_wrapper
1115        },
1116        {
1117                .opcode = MLX4_CMD_CLOSE_PORT,
1118                .has_inbox = false,
1119                .has_outbox = false,
1120                .out_is_imm  = false,
1121                .encode_slave_id = false,
1122                .verify = NULL,
1123                .wrapper = mlx4_CLOSE_PORT_wrapper
1124        },
1125        {
1126                .opcode = MLX4_CMD_QUERY_PORT,
1127                .has_inbox = false,
1128                .has_outbox = true,
1129                .out_is_imm = false,
1130                .encode_slave_id = false,
1131                .verify = NULL,
1132                .wrapper = mlx4_QUERY_PORT_wrapper
1133        },
1134        {
1135                .opcode = MLX4_CMD_SET_PORT,
1136                .has_inbox = true,
1137                .has_outbox = false,
1138                .out_is_imm = false,
1139                .encode_slave_id = false,
1140                .verify = NULL,
1141                .wrapper = mlx4_SET_PORT_wrapper
1142        },
1143        {
1144                .opcode = MLX4_CMD_MAP_EQ,
1145                .has_inbox = false,
1146                .has_outbox = false,
1147                .out_is_imm = false,
1148                .encode_slave_id = false,
1149                .verify = NULL,
1150                .wrapper = mlx4_MAP_EQ_wrapper
1151        },
1152        {
1153                .opcode = MLX4_CMD_SW2HW_EQ,
1154                .has_inbox = true,
1155                .has_outbox = false,
1156                .out_is_imm = false,
1157                .encode_slave_id = true,
1158                .verify = NULL,
1159                .wrapper = mlx4_SW2HW_EQ_wrapper
1160        },
1161        {
1162                .opcode = MLX4_CMD_HW_HEALTH_CHECK,
1163                .has_inbox = false,
1164                .has_outbox = false,
1165                .out_is_imm = false,
1166                .encode_slave_id = false,
1167                .verify = NULL,
1168                .wrapper = NULL
1169        },
1170        {
1171                .opcode = MLX4_CMD_NOP,
1172                .has_inbox = false,
1173                .has_outbox = false,
1174                .out_is_imm = false,
1175                .encode_slave_id = false,
1176                .verify = NULL,
1177                .wrapper = NULL
1178        },
1179        {
1180                .opcode = MLX4_CMD_CONFIG_DEV,
1181                .has_inbox = false,
1182                .has_outbox = true,
1183                .out_is_imm = false,
1184                .encode_slave_id = false,
1185                .verify = NULL,
1186                .wrapper = mlx4_CONFIG_DEV_wrapper
1187        },
1188        {
1189                .opcode = MLX4_CMD_ALLOC_RES,
1190                .has_inbox = false,
1191                .has_outbox = false,
1192                .out_is_imm = true,
1193                .encode_slave_id = false,
1194                .verify = NULL,
1195                .wrapper = mlx4_ALLOC_RES_wrapper
1196        },
1197        {
1198                .opcode = MLX4_CMD_FREE_RES,
1199                .has_inbox = false,
1200                .has_outbox = false,
1201                .out_is_imm = false,
1202                .encode_slave_id = false,
1203                .verify = NULL,
1204                .wrapper = mlx4_FREE_RES_wrapper
1205        },
1206        {
1207                .opcode = MLX4_CMD_SW2HW_MPT,
1208                .has_inbox = true,
1209                .has_outbox = false,
1210                .out_is_imm = false,
1211                .encode_slave_id = true,
1212                .verify = NULL,
1213                .wrapper = mlx4_SW2HW_MPT_wrapper
1214        },
1215        {
1216                .opcode = MLX4_CMD_QUERY_MPT,
1217                .has_inbox = false,
1218                .has_outbox = true,
1219                .out_is_imm = false,
1220                .encode_slave_id = false,
1221                .verify = NULL,
1222                .wrapper = mlx4_QUERY_MPT_wrapper
1223        },
1224        {
1225                .opcode = MLX4_CMD_HW2SW_MPT,
1226                .has_inbox = false,
1227                .has_outbox = false,
1228                .out_is_imm = false,
1229                .encode_slave_id = false,
1230                .verify = NULL,
1231                .wrapper = mlx4_HW2SW_MPT_wrapper
1232        },
1233        {
1234                .opcode = MLX4_CMD_READ_MTT,
1235                .has_inbox = false,
1236                .has_outbox = true,
1237                .out_is_imm = false,
1238                .encode_slave_id = false,
1239                .verify = NULL,
1240                .wrapper = NULL
1241        },
1242        {
1243                .opcode = MLX4_CMD_WRITE_MTT,
1244                .has_inbox = true,
1245                .has_outbox = false,
1246                .out_is_imm = false,
1247                .encode_slave_id = false,
1248                .verify = NULL,
1249                .wrapper = mlx4_WRITE_MTT_wrapper
1250        },
1251        {
1252                .opcode = MLX4_CMD_SYNC_TPT,
1253                .has_inbox = true,
1254                .has_outbox = false,
1255                .out_is_imm = false,
1256                .encode_slave_id = false,
1257                .verify = NULL,
1258                .wrapper = NULL
1259        },
1260        {
1261                .opcode = MLX4_CMD_HW2SW_EQ,
1262                .has_inbox = false,
1263                .has_outbox = false,
1264                .out_is_imm = false,
1265                .encode_slave_id = true,
1266                .verify = NULL,
1267                .wrapper = mlx4_HW2SW_EQ_wrapper
1268        },
1269        {
1270                .opcode = MLX4_CMD_QUERY_EQ,
1271                .has_inbox = false,
1272                .has_outbox = true,
1273                .out_is_imm = false,
1274                .encode_slave_id = true,
1275                .verify = NULL,
1276                .wrapper = mlx4_QUERY_EQ_wrapper
1277        },
1278        {
1279                .opcode = MLX4_CMD_SW2HW_CQ,
1280                .has_inbox = true,
1281                .has_outbox = false,
1282                .out_is_imm = false,
1283                .encode_slave_id = true,
1284                .verify = NULL,
1285                .wrapper = mlx4_SW2HW_CQ_wrapper
1286        },
1287        {
1288                .opcode = MLX4_CMD_HW2SW_CQ,
1289                .has_inbox = false,
1290                .has_outbox = false,
1291                .out_is_imm = false,
1292                .encode_slave_id = false,
1293                .verify = NULL,
1294                .wrapper = mlx4_HW2SW_CQ_wrapper
1295        },
1296        {
1297                .opcode = MLX4_CMD_QUERY_CQ,
1298                .has_inbox = false,
1299                .has_outbox = true,
1300                .out_is_imm = false,
1301                .encode_slave_id = false,
1302                .verify = NULL,
1303                .wrapper = mlx4_QUERY_CQ_wrapper
1304        },
1305        {
1306                .opcode = MLX4_CMD_MODIFY_CQ,
1307                .has_inbox = true,
1308                .has_outbox = false,
1309                .out_is_imm = true,
1310                .encode_slave_id = false,
1311                .verify = NULL,
1312                .wrapper = mlx4_MODIFY_CQ_wrapper
1313        },
1314        {
1315                .opcode = MLX4_CMD_SW2HW_SRQ,
1316                .has_inbox = true,
1317                .has_outbox = false,
1318                .out_is_imm = false,
1319                .encode_slave_id = true,
1320                .verify = NULL,
1321                .wrapper = mlx4_SW2HW_SRQ_wrapper
1322        },
1323        {
1324                .opcode = MLX4_CMD_HW2SW_SRQ,
1325                .has_inbox = false,
1326                .has_outbox = false,
1327                .out_is_imm = false,
1328                .encode_slave_id = false,
1329                .verify = NULL,
1330                .wrapper = mlx4_HW2SW_SRQ_wrapper
1331        },
1332        {
1333                .opcode = MLX4_CMD_QUERY_SRQ,
1334                .has_inbox = false,
1335                .has_outbox = true,
1336                .out_is_imm = false,
1337                .encode_slave_id = false,
1338                .verify = NULL,
1339                .wrapper = mlx4_QUERY_SRQ_wrapper
1340        },
1341        {
1342                .opcode = MLX4_CMD_ARM_SRQ,
1343                .has_inbox = false,
1344                .has_outbox = false,
1345                .out_is_imm = false,
1346                .encode_slave_id = false,
1347                .verify = NULL,
1348                .wrapper = mlx4_ARM_SRQ_wrapper
1349        },
1350        {
1351                .opcode = MLX4_CMD_RST2INIT_QP,
1352                .has_inbox = true,
1353                .has_outbox = false,
1354                .out_is_imm = false,
1355                .encode_slave_id = true,
1356                .verify = NULL,
1357                .wrapper = mlx4_RST2INIT_QP_wrapper
1358        },
1359        {
1360                .opcode = MLX4_CMD_INIT2INIT_QP,
1361                .has_inbox = true,
1362                .has_outbox = false,
1363                .out_is_imm = false,
1364                .encode_slave_id = false,
1365                .verify = NULL,
1366                .wrapper = mlx4_INIT2INIT_QP_wrapper
1367        },
1368        {
1369                .opcode = MLX4_CMD_INIT2RTR_QP,
1370                .has_inbox = true,
1371                .has_outbox = false,
1372                .out_is_imm = false,
1373                .encode_slave_id = false,
1374                .verify = NULL,
1375                .wrapper = mlx4_INIT2RTR_QP_wrapper
1376        },
1377        {
1378                .opcode = MLX4_CMD_RTR2RTS_QP,
1379                .has_inbox = true,
1380                .has_outbox = false,
1381                .out_is_imm = false,
1382                .encode_slave_id = false,
1383                .verify = NULL,
1384                .wrapper = mlx4_RTR2RTS_QP_wrapper
1385        },
1386        {
1387                .opcode = MLX4_CMD_RTS2RTS_QP,
1388                .has_inbox = true,
1389                .has_outbox = false,
1390                .out_is_imm = false,
1391                .encode_slave_id = false,
1392                .verify = NULL,
1393                .wrapper = mlx4_RTS2RTS_QP_wrapper
1394        },
1395        {
1396                .opcode = MLX4_CMD_SQERR2RTS_QP,
1397                .has_inbox = true,
1398                .has_outbox = false,
1399                .out_is_imm = false,
1400                .encode_slave_id = false,
1401                .verify = NULL,
1402                .wrapper = mlx4_SQERR2RTS_QP_wrapper
1403        },
1404        {
1405                .opcode = MLX4_CMD_2ERR_QP,
1406                .has_inbox = false,
1407                .has_outbox = false,
1408                .out_is_imm = false,
1409                .encode_slave_id = false,
1410                .verify = NULL,
1411                .wrapper = mlx4_GEN_QP_wrapper
1412        },
1413        {
1414                .opcode = MLX4_CMD_RTS2SQD_QP,
1415                .has_inbox = false,
1416                .has_outbox = false,
1417                .out_is_imm = false,
1418                .encode_slave_id = false,
1419                .verify = NULL,
1420                .wrapper = mlx4_GEN_QP_wrapper
1421        },
1422        {
1423                .opcode = MLX4_CMD_SQD2SQD_QP,
1424                .has_inbox = true,
1425                .has_outbox = false,
1426                .out_is_imm = false,
1427                .encode_slave_id = false,
1428                .verify = NULL,
1429                .wrapper = mlx4_SQD2SQD_QP_wrapper
1430        },
1431        {
1432                .opcode = MLX4_CMD_SQD2RTS_QP,
1433                .has_inbox = true,
1434                .has_outbox = false,
1435                .out_is_imm = false,
1436                .encode_slave_id = false,
1437                .verify = NULL,
1438                .wrapper = mlx4_SQD2RTS_QP_wrapper
1439        },
1440        {
1441                .opcode = MLX4_CMD_2RST_QP,
1442                .has_inbox = false,
1443                .has_outbox = false,
1444                .out_is_imm = false,
1445                .encode_slave_id = false,
1446                .verify = NULL,
1447                .wrapper = mlx4_2RST_QP_wrapper
1448        },
1449        {
1450                .opcode = MLX4_CMD_QUERY_QP,
1451                .has_inbox = false,
1452                .has_outbox = true,
1453                .out_is_imm = false,
1454                .encode_slave_id = false,
1455                .verify = NULL,
1456                .wrapper = mlx4_GEN_QP_wrapper
1457        },
1458        {
1459                .opcode = MLX4_CMD_SUSPEND_QP,
1460                .has_inbox = false,
1461                .has_outbox = false,
1462                .out_is_imm = false,
1463                .encode_slave_id = false,
1464                .verify = NULL,
1465                .wrapper = mlx4_GEN_QP_wrapper
1466        },
1467        {
1468                .opcode = MLX4_CMD_UNSUSPEND_QP,
1469                .has_inbox = false,
1470                .has_outbox = false,
1471                .out_is_imm = false,
1472                .encode_slave_id = false,
1473                .verify = NULL,
1474                .wrapper = mlx4_GEN_QP_wrapper
1475        },
1476        {
1477                .opcode = MLX4_CMD_UPDATE_QP,
1478                .has_inbox = true,
1479                .has_outbox = false,
1480                .out_is_imm = false,
1481                .encode_slave_id = false,
1482                .verify = NULL,
1483                .wrapper = mlx4_UPDATE_QP_wrapper
1484        },
1485        {
1486                .opcode = MLX4_CMD_GET_OP_REQ,
1487                .has_inbox = false,
1488                .has_outbox = false,
1489                .out_is_imm = false,
1490                .encode_slave_id = false,
1491                .verify = NULL,
1492                .wrapper = mlx4_CMD_EPERM_wrapper,
1493        },
1494        {
1495                .opcode = MLX4_CMD_ALLOCATE_VPP,
1496                .has_inbox = false,
1497                .has_outbox = true,
1498                .out_is_imm = false,
1499                .encode_slave_id = false,
1500                .verify = NULL,
1501                .wrapper = mlx4_CMD_EPERM_wrapper,
1502        },
1503        {
1504                .opcode = MLX4_CMD_SET_VPORT_QOS,
1505                .has_inbox = false,
1506                .has_outbox = true,
1507                .out_is_imm = false,
1508                .encode_slave_id = false,
1509                .verify = NULL,
1510                .wrapper = mlx4_CMD_EPERM_wrapper,
1511        },
1512        {
1513                .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1514                .has_inbox = false,
1515                .has_outbox = false,
1516                .out_is_imm = false,
1517                .encode_slave_id = false,
1518                .verify = NULL, /* XXX verify: only demux can do this */
1519                .wrapper = NULL
1520        },
1521        {
1522                .opcode = MLX4_CMD_MAD_IFC,
1523                .has_inbox = true,
1524                .has_outbox = true,
1525                .out_is_imm = false,
1526                .encode_slave_id = false,
1527                .verify = NULL,
1528                .wrapper = mlx4_MAD_IFC_wrapper
1529        },
1530        {
1531                .opcode = MLX4_CMD_MAD_DEMUX,
1532                .has_inbox = false,
1533                .has_outbox = false,
1534                .out_is_imm = false,
1535                .encode_slave_id = false,
1536                .verify = NULL,
1537                .wrapper = mlx4_CMD_EPERM_wrapper
1538        },
1539        {
1540                .opcode = MLX4_CMD_QUERY_IF_STAT,
1541                .has_inbox = false,
1542                .has_outbox = true,
1543                .out_is_imm = false,
1544                .encode_slave_id = false,
1545                .verify = NULL,
1546                .wrapper = mlx4_QUERY_IF_STAT_wrapper
1547        },
1548        {
1549                .opcode = MLX4_CMD_ACCESS_REG,
1550                .has_inbox = true,
1551                .has_outbox = true,
1552                .out_is_imm = false,
1553                .encode_slave_id = false,
1554                .verify = NULL,
1555                .wrapper = mlx4_ACCESS_REG_wrapper,
1556        },
1557        {
1558                .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
1559                .has_inbox = false,
1560                .has_outbox = false,
1561                .out_is_imm = false,
1562                .encode_slave_id = false,
1563                .verify = NULL,
1564                .wrapper = mlx4_CMD_EPERM_wrapper,
1565        },
1566        /* Native multicast commands are not available for guests */
1567        {
1568                .opcode = MLX4_CMD_QP_ATTACH,
1569                .has_inbox = true,
1570                .has_outbox = false,
1571                .out_is_imm = false,
1572                .encode_slave_id = false,
1573                .verify = NULL,
1574                .wrapper = mlx4_QP_ATTACH_wrapper
1575        },
1576        {
1577                .opcode = MLX4_CMD_PROMISC,
1578                .has_inbox = false,
1579                .has_outbox = false,
1580                .out_is_imm = false,
1581                .encode_slave_id = false,
1582                .verify = NULL,
1583                .wrapper = mlx4_PROMISC_wrapper
1584        },
1585        /* Ethernet specific commands */
1586        {
1587                .opcode = MLX4_CMD_SET_VLAN_FLTR,
1588                .has_inbox = true,
1589                .has_outbox = false,
1590                .out_is_imm = false,
1591                .encode_slave_id = false,
1592                .verify = NULL,
1593                .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1594        },
1595        {
1596                .opcode = MLX4_CMD_SET_MCAST_FLTR,
1597                .has_inbox = false,
1598                .has_outbox = false,
1599                .out_is_imm = false,
1600                .encode_slave_id = false,
1601                .verify = NULL,
1602                .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1603        },
1604        {
1605                .opcode = MLX4_CMD_DUMP_ETH_STATS,
1606                .has_inbox = false,
1607                .has_outbox = true,
1608                .out_is_imm = false,
1609                .encode_slave_id = false,
1610                .verify = NULL,
1611                .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1612        },
1613        {
1614                .opcode = MLX4_CMD_INFORM_FLR_DONE,
1615                .has_inbox = false,
1616                .has_outbox = false,
1617                .out_is_imm = false,
1618                .encode_slave_id = false,
1619                .verify = NULL,
1620                .wrapper = NULL
1621        },
1622        /* flow steering commands */
1623        {
1624                .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1625                .has_inbox = true,
1626                .has_outbox = false,
1627                .out_is_imm = true,
1628                .encode_slave_id = false,
1629                .verify = NULL,
1630                .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1631        },
1632        {
1633                .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1634                .has_inbox = false,
1635                .has_outbox = false,
1636                .out_is_imm = false,
1637                .encode_slave_id = false,
1638                .verify = NULL,
1639                .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1640        },
1641        {
1642                .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1643                .has_inbox = false,
1644                .has_outbox = false,
1645                .out_is_imm = false,
1646                .encode_slave_id = false,
1647                .verify = NULL,
1648                .wrapper = mlx4_CMD_EPERM_wrapper
1649        },
1650        {
1651                .opcode = MLX4_CMD_VIRT_PORT_MAP,
1652                .has_inbox = false,
1653                .has_outbox = false,
1654                .out_is_imm = false,
1655                .encode_slave_id = false,
1656                .verify = NULL,
1657                .wrapper = mlx4_CMD_EPERM_wrapper
1658        },
1659};
1660
1661static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1662                                    struct mlx4_vhcr_cmd *in_vhcr)
1663{
1664        struct mlx4_priv *priv = mlx4_priv(dev);
1665        struct mlx4_cmd_info *cmd = NULL;
1666        struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1667        struct mlx4_vhcr *vhcr;
1668        struct mlx4_cmd_mailbox *inbox = NULL;
1669        struct mlx4_cmd_mailbox *outbox = NULL;
1670        u64 in_param;
1671        u64 out_param;
1672        int ret = 0;
1673        int i;
1674        int err = 0;
1675
1676        /* Create sw representation of Virtual HCR */
1677        vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1678        if (!vhcr)
1679                return -ENOMEM;
1680
1681        /* DMA in the vHCR */
1682        if (!in_vhcr) {
1683                ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1684                                      priv->mfunc.master.slave_state[slave].vhcr_dma,
1685                                      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1686                                            MLX4_ACCESS_MEM_ALIGN), 1);
1687                if (ret) {
1688                        if (!(dev->persist->state &
1689                            MLX4_DEVICE_STATE_INTERNAL_ERROR))
1690                                mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1691                                         __func__, ret);
1692                        kfree(vhcr);
1693                        return ret;
1694                }
1695        }
1696
1697        /* Fill SW VHCR fields */
1698        vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1699        vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1700        vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1701        vhcr->token = be16_to_cpu(vhcr_cmd->token);
1702        vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1703        vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1704        vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1705
1706        /* Lookup command */
1707        for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1708                if (vhcr->op == cmd_info[i].opcode) {
1709                        cmd = &cmd_info[i];
1710                        break;
1711                }
1712        }
1713        if (!cmd) {
1714                mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1715                         vhcr->op, slave);
1716                vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1717                goto out_status;
1718        }
1719
1720        /* Read inbox */
1721        if (cmd->has_inbox) {
1722                vhcr->in_param &= INBOX_MASK;
1723                inbox = mlx4_alloc_cmd_mailbox(dev);
1724                if (IS_ERR(inbox)) {
1725                        vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1726                        inbox = NULL;
1727                        goto out_status;
1728                }
1729
1730                ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1731                                      vhcr->in_param,
1732                                      MLX4_MAILBOX_SIZE, 1);
1733                if (ret) {
1734                        if (!(dev->persist->state &
1735                            MLX4_DEVICE_STATE_INTERNAL_ERROR))
1736                                mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1737                                         __func__, cmd->opcode);
1738                        vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1739                        goto out_status;
1740                }
1741        }
1742
1743        /* Apply permission and bound checks if applicable */
1744        if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1745                mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1746                          vhcr->op, slave, vhcr->in_modifier);
1747                vhcr_cmd->status = CMD_STAT_BAD_OP;
1748                goto out_status;
1749        }
1750
1751        /* Allocate outbox */
1752        if (cmd->has_outbox) {
1753                outbox = mlx4_alloc_cmd_mailbox(dev);
1754                if (IS_ERR(outbox)) {
1755                        vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1756                        outbox = NULL;
1757                        goto out_status;
1758                }
1759        }
1760
1761        /* Execute the command! */
1762        if (cmd->wrapper) {
1763                err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1764                                   cmd);
1765                if (cmd->out_is_imm)
1766                        vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1767        } else {
1768                in_param = cmd->has_inbox ? (u64) inbox->dma :
1769                        vhcr->in_param;
1770                out_param = cmd->has_outbox ? (u64) outbox->dma :
1771                        vhcr->out_param;
1772                err = __mlx4_cmd(dev, in_param, &out_param,
1773                                 cmd->out_is_imm, vhcr->in_modifier,
1774                                 vhcr->op_modifier, vhcr->op,
1775                                 MLX4_CMD_TIME_CLASS_A,
1776                                 MLX4_CMD_NATIVE);
1777
1778                if (cmd->out_is_imm) {
1779                        vhcr->out_param = out_param;
1780                        vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1781                }
1782        }
1783
1784        if (err) {
1785                if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
1786                        if (vhcr->op == MLX4_CMD_ALLOC_RES &&
1787                            (vhcr->in_modifier & 0xff) == RES_COUNTER &&
1788                            err == -EDQUOT)
1789                                mlx4_dbg(dev,
1790                                         "Unable to allocate counter for slave %d (%d)\n",
1791                                         slave, err);
1792                        else
1793                                mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1794                                          vhcr->op, slave, vhcr->errno, err);
1795                }
1796                vhcr_cmd->status = mlx4_errno_to_status(err);
1797                goto out_status;
1798        }
1799
1800
1801        /* Write outbox if command completed successfully */
1802        if (cmd->has_outbox && !vhcr_cmd->status) {
1803                ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1804                                      vhcr->out_param,
1805                                      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1806                if (ret) {
1807                        /* If we failed to write back the outbox after the
1808                         *command was successfully executed, we must fail this
1809                         * slave, as it is now in undefined state */
1810                        if (!(dev->persist->state &
1811                            MLX4_DEVICE_STATE_INTERNAL_ERROR))
1812                                mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1813                        goto out;
1814                }
1815        }
1816
1817out_status:
1818        /* DMA back vhcr result */
1819        if (!in_vhcr) {
1820                ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1821                                      priv->mfunc.master.slave_state[slave].vhcr_dma,
1822                                      ALIGN(sizeof(struct mlx4_vhcr),
1823                                            MLX4_ACCESS_MEM_ALIGN),
1824                                      MLX4_CMD_WRAPPED);
1825                if (ret)
1826                        mlx4_err(dev, "%s:Failed writing vhcr result\n",
1827                                 __func__);
1828                else if (vhcr->e_bit &&
1829                         mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1830                                mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1831                                          slave);
1832        }
1833
1834out:
1835        kfree(vhcr);
1836        mlx4_free_cmd_mailbox(dev, inbox);
1837        mlx4_free_cmd_mailbox(dev, outbox);
1838        return ret;
1839}
1840
1841static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1842                                            int slave, int port)
1843{
1844        struct mlx4_vport_oper_state *vp_oper;
1845        struct mlx4_vport_state *vp_admin;
1846        struct mlx4_vf_immed_vlan_work *work;
1847        struct mlx4_dev *dev = &(priv->dev);
1848        int err;
1849        int admin_vlan_ix = NO_INDX;
1850
1851        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1852        vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1853
1854        if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1855            vp_oper->state.default_qos == vp_admin->default_qos &&
1856            vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
1857            vp_oper->state.link_state == vp_admin->link_state &&
1858            vp_oper->state.qos_vport == vp_admin->qos_vport)
1859                return 0;
1860
1861        if (!(priv->mfunc.master.slave_state[slave].active &&
1862              dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1863                /* even if the UPDATE_QP command isn't supported, we still want
1864                 * to set this VF link according to the admin directive
1865                 */
1866                vp_oper->state.link_state = vp_admin->link_state;
1867                return -1;
1868        }
1869
1870        mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1871                 slave, port);
1872        mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1873                 vp_admin->default_vlan, vp_admin->default_qos,
1874                 vp_admin->link_state);
1875
1876        work = kzalloc(sizeof(*work), GFP_KERNEL);
1877        if (!work)
1878                return -ENOMEM;
1879
1880        if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1881                if (MLX4_VGT != vp_admin->default_vlan) {
1882                        err = __mlx4_register_vlan(&priv->dev, port,
1883                                                   vp_admin->default_vlan,
1884                                                   &admin_vlan_ix);
1885                        if (err) {
1886                                kfree(work);
1887                                mlx4_warn(&priv->dev,
1888                                          "No vlan resources slave %d, port %d\n",
1889                                          slave, port);
1890                                return err;
1891                        }
1892                } else {
1893                        admin_vlan_ix = NO_INDX;
1894                }
1895                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1896                mlx4_dbg(&priv->dev,
1897                         "alloc vlan %d idx  %d slave %d port %d\n",
1898                         (int)(vp_admin->default_vlan),
1899                         admin_vlan_ix, slave, port);
1900        }
1901
1902        /* save original vlan ix and vlan id */
1903        work->orig_vlan_id = vp_oper->state.default_vlan;
1904        work->orig_vlan_ix = vp_oper->vlan_idx;
1905
1906        /* handle new qos */
1907        if (vp_oper->state.default_qos != vp_admin->default_qos)
1908                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1909
1910        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1911                vp_oper->vlan_idx = admin_vlan_ix;
1912
1913        vp_oper->state.default_vlan = vp_admin->default_vlan;
1914        vp_oper->state.default_qos = vp_admin->default_qos;
1915        vp_oper->state.vlan_proto = vp_admin->vlan_proto;
1916        vp_oper->state.link_state = vp_admin->link_state;
1917        vp_oper->state.qos_vport = vp_admin->qos_vport;
1918
1919        if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1920                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1921
1922        /* iterate over QPs owned by this slave, using UPDATE_QP */
1923        work->port = port;
1924        work->slave = slave;
1925        work->qos = vp_oper->state.default_qos;
1926        work->qos_vport = vp_oper->state.qos_vport;
1927        work->vlan_id = vp_oper->state.default_vlan;
1928        work->vlan_ix = vp_oper->vlan_idx;
1929        work->vlan_proto = vp_oper->state.vlan_proto;
1930        work->priv = priv;
1931        INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1932        queue_work(priv->mfunc.master.comm_wq, &work->work);
1933
1934        return 0;
1935}
1936
1937static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
1938{
1939        struct mlx4_qos_manager *port_qos_ctl;
1940        struct mlx4_priv *priv = mlx4_priv(dev);
1941
1942        port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
1943        bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
1944
1945        /* Enable only default prio at PF init routine */
1946        set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
1947}
1948
1949static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
1950{
1951        int i;
1952        int err;
1953        int num_vfs;
1954        u16 available_vpp;
1955        u8 vpp_param[MLX4_NUM_UP];
1956        struct mlx4_qos_manager *port_qos;
1957        struct mlx4_priv *priv = mlx4_priv(dev);
1958
1959        err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param);
1960        if (err) {
1961                mlx4_info(dev, "Failed query available VPPs\n");
1962                return;
1963        }
1964
1965        port_qos = &priv->mfunc.master.qos_ctl[port];
1966        num_vfs = (available_vpp /
1967                   bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
1968
1969        for (i = 0; i < MLX4_NUM_UP; i++) {
1970                if (test_bit(i, port_qos->priority_bm))
1971                        vpp_param[i] = num_vfs;
1972        }
1973
1974        err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
1975        if (err) {
1976                mlx4_info(dev, "Failed allocating VPPs\n");
1977                return;
1978        }
1979
1980        /* Query actual allocated VPP, just to make sure */
1981        err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param);
1982        if (err) {
1983                mlx4_info(dev, "Failed query available VPPs\n");
1984                return;
1985        }
1986
1987        port_qos->num_of_qos_vfs = num_vfs;
1988        mlx4_dbg(dev, "Port %d Available VPPs %d\n", port, available_vpp);
1989
1990        for (i = 0; i < MLX4_NUM_UP; i++)
1991                mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
1992                         vpp_param[i]);
1993}
1994
1995static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1996{
1997        int p, port, err;
1998        struct mlx4_vport_state *vp_admin;
1999        struct mlx4_vport_oper_state *vp_oper;
2000        struct mlx4_slave_state *slave_state =
2001                &priv->mfunc.master.slave_state[slave];
2002        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2003                        &priv->dev, slave);
2004
2005        for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
2006                port = p + 1;
2007                priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2008                        priv->mfunc.master.vf_admin[slave].enable_smi[port];
2009                vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2010                vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2011                if (vp_admin->vlan_proto != htons(ETH_P_8021AD) ||
2012                    slave_state->vst_qinq_supported) {
2013                        vp_oper->state.vlan_proto   = vp_admin->vlan_proto;
2014                        vp_oper->state.default_vlan = vp_admin->default_vlan;
2015                        vp_oper->state.default_qos  = vp_admin->default_qos;
2016                }
2017                vp_oper->state.link_state = vp_admin->link_state;
2018                vp_oper->state.mac        = vp_admin->mac;
2019                vp_oper->state.spoofchk   = vp_admin->spoofchk;
2020                vp_oper->state.tx_rate    = vp_admin->tx_rate;
2021                vp_oper->state.qos_vport  = vp_admin->qos_vport;
2022                vp_oper->state.guid       = vp_admin->guid;
2023
2024                if (MLX4_VGT != vp_admin->default_vlan) {
2025                        err = __mlx4_register_vlan(&priv->dev, port,
2026                                                   vp_admin->default_vlan, &(vp_oper->vlan_idx));
2027                        if (err) {
2028                                vp_oper->vlan_idx = NO_INDX;
2029                                vp_oper->state.default_vlan = MLX4_VGT;
2030                                vp_oper->state.vlan_proto = htons(ETH_P_8021Q);
2031                                mlx4_warn(&priv->dev,
2032                                          "No vlan resources slave %d, port %d\n",
2033                                          slave, port);
2034                                return err;
2035                        }
2036                        mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
2037                                 (int)(vp_oper->state.default_vlan),
2038                                 vp_oper->vlan_idx, slave, port);
2039                }
2040                if (vp_admin->spoofchk) {
2041                        vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
2042                                                               port,
2043                                                               vp_admin->mac);
2044                        if (0 > vp_oper->mac_idx) {
2045                                err = vp_oper->mac_idx;
2046                                vp_oper->mac_idx = NO_INDX;
2047                                mlx4_warn(&priv->dev,
2048                                          "No mac resources slave %d, port %d\n",
2049                                          slave, port);
2050                                return err;
2051                        }
2052                        mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
2053                                 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
2054                }
2055        }
2056        return 0;
2057}
2058
2059static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
2060{
2061        int p, port;
2062        struct mlx4_vport_oper_state *vp_oper;
2063        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2064                        &priv->dev, slave);
2065
2066        for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
2067                port = p + 1;
2068                priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2069                        MLX4_VF_SMI_DISABLED;
2070                vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2071                if (NO_INDX != vp_oper->vlan_idx) {
2072                        __mlx4_unregister_vlan(&priv->dev,
2073                                               port, vp_oper->state.default_vlan);
2074                        vp_oper->vlan_idx = NO_INDX;
2075                }
2076                if (NO_INDX != vp_oper->mac_idx) {
2077                        __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
2078                        vp_oper->mac_idx = NO_INDX;
2079                }
2080        }
2081        return;
2082}
2083
2084static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
2085                               u16 param, u8 toggle)
2086{
2087        struct mlx4_priv *priv = mlx4_priv(dev);
2088        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2089        u32 reply;
2090        u8 is_going_down = 0;
2091        int i;
2092        unsigned long flags;
2093
2094        slave_state[slave].comm_toggle ^= 1;
2095        reply = (u32) slave_state[slave].comm_toggle << 31;
2096        if (toggle != slave_state[slave].comm_toggle) {
2097                mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2098                          toggle, slave);
2099                goto reset_slave;
2100        }
2101        if (cmd == MLX4_COMM_CMD_RESET) {
2102                mlx4_warn(dev, "Received reset from slave:%d\n", slave);
2103                slave_state[slave].active = false;
2104                slave_state[slave].old_vlan_api = false;
2105                slave_state[slave].vst_qinq_supported = false;
2106                mlx4_master_deactivate_admin_state(priv, slave);
2107                for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
2108                                slave_state[slave].event_eq[i].eqn = -1;
2109                                slave_state[slave].event_eq[i].token = 0;
2110                }
2111                /*check if we are in the middle of FLR process,
2112                if so return "retry" status to the slave*/
2113                if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
2114                        goto inform_slave_state;
2115
2116                mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
2117
2118                /* write the version in the event field */
2119                reply |= mlx4_comm_get_version();
2120
2121                goto reset_slave;
2122        }
2123        /*command from slave in the middle of FLR*/
2124        if (cmd != MLX4_COMM_CMD_RESET &&
2125            MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
2126                mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2127                          slave, cmd);
2128                return;
2129        }
2130
2131        switch (cmd) {
2132        case MLX4_COMM_CMD_VHCR0:
2133                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
2134                        goto reset_slave;
2135                slave_state[slave].vhcr_dma = ((u64) param) << 48;
2136                priv->mfunc.master.slave_state[slave].cookie = 0;
2137                break;
2138        case MLX4_COMM_CMD_VHCR1:
2139                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
2140                        goto reset_slave;
2141                slave_state[slave].vhcr_dma |= ((u64) param) << 32;
2142                break;
2143        case MLX4_COMM_CMD_VHCR2:
2144                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
2145                        goto reset_slave;
2146                slave_state[slave].vhcr_dma |= ((u64) param) << 16;
2147                break;
2148        case MLX4_COMM_CMD_VHCR_EN:
2149                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
2150                        goto reset_slave;
2151                slave_state[slave].vhcr_dma |= param;
2152                if (mlx4_master_activate_admin_state(priv, slave))
2153                                goto reset_slave;
2154                slave_state[slave].active = true;
2155                mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
2156                break;
2157        case MLX4_COMM_CMD_VHCR_POST:
2158                if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
2159                    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2160                        mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2161                                  slave, cmd, slave_state[slave].last_cmd);
2162                        goto reset_slave;
2163                }
2164
2165                mutex_lock(&priv->cmd.slave_cmd_mutex);
2166                if (mlx4_master_process_vhcr(dev, slave, NULL)) {
2167                        mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
2168                                 slave);
2169                        mutex_unlock(&priv->cmd.slave_cmd_mutex);
2170                        goto reset_slave;
2171                }
2172                mutex_unlock(&priv->cmd.slave_cmd_mutex);
2173                break;
2174        default:
2175                mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
2176                goto reset_slave;
2177        }
2178        spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2179        if (!slave_state[slave].is_slave_going_down)
2180                slave_state[slave].last_cmd = cmd;
2181        else
2182                is_going_down = 1;
2183        spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2184        if (is_going_down) {
2185                mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
2186                          cmd, slave);
2187                return;
2188        }
2189        __raw_writel((__force u32) cpu_to_be32(reply),
2190                     &priv->mfunc.comm[slave].slave_read);
2191
2192        return;
2193
2194reset_slave:
2195        /* cleanup any slave resources */
2196        if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2197                mlx4_delete_all_resources_for_slave(dev, slave);
2198
2199        if (cmd != MLX4_COMM_CMD_RESET) {
2200                mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2201                          slave, cmd);
2202                /* Turn on internal error letting slave reset itself immeditaly,
2203                 * otherwise it might take till timeout on command is passed
2204                 */
2205                reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2206        }
2207
2208        spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2209        if (!slave_state[slave].is_slave_going_down)
2210                slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2211        spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2212        /*with slave in the middle of flr, no need to clean resources again.*/
2213inform_slave_state:
2214        memset(&slave_state[slave].event_eq, 0,
2215               sizeof(struct mlx4_slave_event_eq_info));
2216        __raw_writel((__force u32) cpu_to_be32(reply),
2217                     &priv->mfunc.comm[slave].slave_read);
2218        wmb();
2219}
2220
2221/* master command processing */
2222void mlx4_master_comm_channel(struct work_struct *work)
2223{
2224        struct mlx4_mfunc_master_ctx *master =
2225                container_of(work,
2226                             struct mlx4_mfunc_master_ctx,
2227                             comm_work);
2228        struct mlx4_mfunc *mfunc =
2229                container_of(master, struct mlx4_mfunc, master);
2230        struct mlx4_priv *priv =
2231                container_of(mfunc, struct mlx4_priv, mfunc);
2232        struct mlx4_dev *dev = &priv->dev;
2233        u32 lbit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
2234        u32 nmbr_bits;
2235        u32 comm_cmd;
2236        int i, slave;
2237        int toggle;
2238        bool first = true;
2239        int served = 0;
2240        int reported = 0;
2241        u32 slt;
2242
2243        for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++)
2244                lbit_vec[i] = be32_to_cpu(master->comm_arm_bit_vector[i]);
2245        nmbr_bits = dev->persist->num_vfs + 1;
2246        if (++master->next_slave >= nmbr_bits)
2247                master->next_slave = 0;
2248        slave = master->next_slave;
2249        while (true) {
2250                slave = find_next_bit((const unsigned long *)&lbit_vec, nmbr_bits, slave);
2251                if  (!first && slave >= master->next_slave)
2252                        break;
2253                if (slave == nmbr_bits) {
2254                        if (!first)
2255                                break;
2256                        first = false;
2257                        slave = 0;
2258                        continue;
2259                }
2260                ++reported;
2261                comm_cmd = swab32(readl(&mfunc->comm[slave].slave_write));
2262                slt = swab32(readl(&mfunc->comm[slave].slave_read)) >> 31;
2263                toggle = comm_cmd >> 31;
2264                if (toggle != slt) {
2265                        if (master->slave_state[slave].comm_toggle
2266                            != slt) {
2267                                pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2268                                        slave, slt,
2269                                        master->slave_state[slave].comm_toggle);
2270                                master->slave_state[slave].comm_toggle =
2271                                        slt;
2272                        }
2273                        mlx4_master_do_cmd(dev, slave,
2274                                           comm_cmd >> 16 & 0xff,
2275                                           comm_cmd & 0xffff, toggle);
2276                        ++served;
2277                }
2278                slave++;
2279        }
2280
2281        if (reported && reported != served)
2282                mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
2283                          reported, served);
2284
2285        if (mlx4_ARM_COMM_CHANNEL(dev))
2286                mlx4_warn(dev, "Failed to arm comm channel events\n");
2287}
2288
2289static int sync_toggles(struct mlx4_dev *dev)
2290{
2291        struct mlx4_priv *priv = mlx4_priv(dev);
2292        u32 wr_toggle;
2293        u32 rd_toggle;
2294        unsigned long end;
2295
2296        wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2297        if (wr_toggle == 0xffffffff)
2298                end = jiffies + msecs_to_jiffies(30000);
2299        else
2300                end = jiffies + msecs_to_jiffies(5000);
2301
2302        while (time_before(jiffies, end)) {
2303                rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2304                if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2305                        /* PCI might be offline */
2306
2307                        /* If device removal has been requested,
2308                         * do not continue retrying.
2309                         */
2310                        if (dev->persist->interface_state &
2311                            MLX4_INTERFACE_STATE_NOWAIT) {
2312                                mlx4_warn(dev,
2313                                          "communication channel is offline\n");
2314                                return -EIO;
2315                        }
2316
2317                        msleep(100);
2318                        wr_toggle = swab32(readl(&priv->mfunc.comm->
2319                                           slave_write));
2320                        continue;
2321                }
2322
2323                if (rd_toggle >> 31 == wr_toggle >> 31) {
2324                        priv->cmd.comm_toggle = rd_toggle >> 31;
2325                        return 0;
2326                }
2327
2328                cond_resched();
2329        }
2330
2331        /*
2332         * we could reach here if for example the previous VM using this
2333         * function misbehaved and left the channel with unsynced state. We
2334         * should fix this here and give this VM a chance to use a properly
2335         * synced channel
2336         */
2337        mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2338        __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2339        __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2340        priv->cmd.comm_toggle = 0;
2341
2342        return 0;
2343}
2344
2345int mlx4_multi_func_init(struct mlx4_dev *dev)
2346{
2347        struct mlx4_priv *priv = mlx4_priv(dev);
2348        struct mlx4_slave_state *s_state;
2349        int i, j, err, port;
2350
2351        if (mlx4_is_master(dev))
2352                priv->mfunc.comm =
2353                ioremap(pci_resource_start(dev->persist->pdev,
2354                                           priv->fw.comm_bar) +
2355                        priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2356        else
2357                priv->mfunc.comm =
2358                ioremap(pci_resource_start(dev->persist->pdev, 2) +
2359                        MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2360        if (!priv->mfunc.comm) {
2361                mlx4_err(dev, "Couldn't map communication vector\n");
2362                goto err_vhcr;
2363        }
2364
2365        if (mlx4_is_master(dev)) {
2366                struct mlx4_vf_oper_state *vf_oper;
2367                struct mlx4_vf_admin_state *vf_admin;
2368
2369                priv->mfunc.master.slave_state =
2370                        kcalloc(dev->num_slaves,
2371                                sizeof(struct mlx4_slave_state),
2372                                GFP_KERNEL);
2373                if (!priv->mfunc.master.slave_state)
2374                        goto err_comm;
2375
2376                priv->mfunc.master.vf_admin =
2377                        kcalloc(dev->num_slaves,
2378                                sizeof(struct mlx4_vf_admin_state),
2379                                GFP_KERNEL);
2380                if (!priv->mfunc.master.vf_admin)
2381                        goto err_comm_admin;
2382
2383                priv->mfunc.master.vf_oper =
2384                        kcalloc(dev->num_slaves,
2385                                sizeof(struct mlx4_vf_oper_state),
2386                                GFP_KERNEL);
2387                if (!priv->mfunc.master.vf_oper)
2388                        goto err_comm_oper;
2389
2390                priv->mfunc.master.next_slave = 0;
2391
2392                for (i = 0; i < dev->num_slaves; ++i) {
2393                        vf_admin = &priv->mfunc.master.vf_admin[i];
2394                        vf_oper = &priv->mfunc.master.vf_oper[i];
2395                        s_state = &priv->mfunc.master.slave_state[i];
2396                        s_state->last_cmd = MLX4_COMM_CMD_RESET;
2397                        s_state->vst_qinq_supported = false;
2398                        mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2399                        for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2400                                s_state->event_eq[j].eqn = -1;
2401                        __raw_writel((__force u32) 0,
2402                                     &priv->mfunc.comm[i].slave_write);
2403                        __raw_writel((__force u32) 0,
2404                                     &priv->mfunc.comm[i].slave_read);
2405                        for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2406                                struct mlx4_vport_state *admin_vport;
2407                                struct mlx4_vport_state *oper_vport;
2408
2409                                s_state->vlan_filter[port] =
2410                                        kzalloc(sizeof(struct mlx4_vlan_fltr),
2411                                                GFP_KERNEL);
2412                                if (!s_state->vlan_filter[port]) {
2413                                        if (--port)
2414                                                kfree(s_state->vlan_filter[port]);
2415                                        goto err_slaves;
2416                                }
2417
2418                                admin_vport = &vf_admin->vport[port];
2419                                oper_vport = &vf_oper->vport[port].state;
2420                                INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2421                                admin_vport->default_vlan = MLX4_VGT;
2422                                oper_vport->default_vlan = MLX4_VGT;
2423                                admin_vport->qos_vport =
2424                                                MLX4_VPP_DEFAULT_VPORT;
2425                                oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
2426                                admin_vport->vlan_proto = htons(ETH_P_8021Q);
2427                                oper_vport->vlan_proto = htons(ETH_P_8021Q);
2428                                vf_oper->vport[port].vlan_idx = NO_INDX;
2429                                vf_oper->vport[port].mac_idx = NO_INDX;
2430                                mlx4_set_random_admin_guid(dev, i, port);
2431                        }
2432                        spin_lock_init(&s_state->lock);
2433                }
2434
2435                if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
2436                        for (port = 1; port <= dev->caps.num_ports; port++) {
2437                                if (mlx4_is_eth(dev, port)) {
2438                                        mlx4_set_default_port_qos(dev, port);
2439                                        mlx4_allocate_port_vpps(dev, port);
2440                                }
2441                        }
2442                }
2443
2444                memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
2445                priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2446                INIT_WORK(&priv->mfunc.master.comm_work,
2447                          mlx4_master_comm_channel);
2448                INIT_WORK(&priv->mfunc.master.slave_event_work,
2449                          mlx4_gen_slave_eqe);
2450                INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2451                          mlx4_master_handle_slave_flr);
2452                spin_lock_init(&priv->mfunc.master.slave_state_lock);
2453                spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2454                priv->mfunc.master.comm_wq =
2455                        create_singlethread_workqueue("mlx4_comm");
2456                if (!priv->mfunc.master.comm_wq)
2457                        goto err_slaves;
2458
2459                if (mlx4_init_resource_tracker(dev))
2460                        goto err_thread;
2461
2462        } else {
2463                err = sync_toggles(dev);
2464                if (err) {
2465                        mlx4_err(dev, "Couldn't sync toggles\n");
2466                        goto err_comm;
2467                }
2468        }
2469        return 0;
2470
2471err_thread:
2472        destroy_workqueue(priv->mfunc.master.comm_wq);
2473err_slaves:
2474        while (i--) {
2475                for (port = 1; port <= MLX4_MAX_PORTS; port++)
2476                        kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2477        }
2478        kfree(priv->mfunc.master.vf_oper);
2479err_comm_oper:
2480        kfree(priv->mfunc.master.vf_admin);
2481err_comm_admin:
2482        kfree(priv->mfunc.master.slave_state);
2483err_comm:
2484        iounmap(priv->mfunc.comm);
2485        priv->mfunc.comm = NULL;
2486err_vhcr:
2487        dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2488                          priv->mfunc.vhcr,
2489                          priv->mfunc.vhcr_dma);
2490        priv->mfunc.vhcr = NULL;
2491        return -ENOMEM;
2492}
2493
2494int mlx4_cmd_init(struct mlx4_dev *dev)
2495{
2496        struct mlx4_priv *priv = mlx4_priv(dev);
2497        int flags = 0;
2498
2499        if (!priv->cmd.initialized) {
2500                init_rwsem(&priv->cmd.switch_sem);
2501                mutex_init(&priv->cmd.slave_cmd_mutex);
2502                sema_init(&priv->cmd.poll_sem, 1);
2503                priv->cmd.use_events = 0;
2504                priv->cmd.toggle     = 1;
2505                priv->cmd.initialized = 1;
2506                flags |= MLX4_CMD_CLEANUP_STRUCT;
2507        }
2508
2509        if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2510                priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2511                                        0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2512                if (!priv->cmd.hcr) {
2513                        mlx4_err(dev, "Couldn't map command register\n");
2514                        goto err;
2515                }
2516                flags |= MLX4_CMD_CLEANUP_HCR;
2517        }
2518
2519        if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2520                priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2521                                                      PAGE_SIZE,
2522                                                      &priv->mfunc.vhcr_dma,
2523                                                      GFP_KERNEL);
2524                if (!priv->mfunc.vhcr)
2525                        goto err;
2526
2527                flags |= MLX4_CMD_CLEANUP_VHCR;
2528        }
2529
2530        if (!priv->cmd.pool) {
2531                priv->cmd.pool = dma_pool_create("mlx4_cmd",
2532                                                 &dev->persist->pdev->dev,
2533                                                 MLX4_MAILBOX_SIZE,
2534                                                 MLX4_MAILBOX_SIZE, 0);
2535                if (!priv->cmd.pool)
2536                        goto err;
2537
2538                flags |= MLX4_CMD_CLEANUP_POOL;
2539        }
2540
2541        return 0;
2542
2543err:
2544        mlx4_cmd_cleanup(dev, flags);
2545        return -ENOMEM;
2546}
2547
2548void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2549{
2550        struct mlx4_priv *priv = mlx4_priv(dev);
2551        int slave;
2552        u32 slave_read;
2553
2554        /* If the comm channel has not yet been initialized,
2555         * skip reporting the internal error event to all
2556         * the communication channels.
2557         */
2558        if (!priv->mfunc.comm)
2559                return;
2560
2561        /* Report an internal error event to all
2562         * communication channels.
2563         */
2564        for (slave = 0; slave < dev->num_slaves; slave++) {
2565                slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2566                slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2567                __raw_writel((__force u32)cpu_to_be32(slave_read),
2568                             &priv->mfunc.comm[slave].slave_read);
2569        }
2570}
2571
2572void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2573{
2574        struct mlx4_priv *priv = mlx4_priv(dev);
2575        int i, port;
2576
2577        if (mlx4_is_master(dev)) {
2578                destroy_workqueue(priv->mfunc.master.comm_wq);
2579                for (i = 0; i < dev->num_slaves; i++) {
2580                        for (port = 1; port <= MLX4_MAX_PORTS; port++)
2581                                kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2582                }
2583                kfree(priv->mfunc.master.slave_state);
2584                kfree(priv->mfunc.master.vf_admin);
2585                kfree(priv->mfunc.master.vf_oper);
2586                dev->num_slaves = 0;
2587        }
2588
2589        iounmap(priv->mfunc.comm);
2590        priv->mfunc.comm = NULL;
2591}
2592
2593void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2594{
2595        struct mlx4_priv *priv = mlx4_priv(dev);
2596
2597        if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2598                dma_pool_destroy(priv->cmd.pool);
2599                priv->cmd.pool = NULL;
2600        }
2601
2602        if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2603            (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2604                iounmap(priv->cmd.hcr);
2605                priv->cmd.hcr = NULL;
2606        }
2607        if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2608            (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2609                dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2610                                  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2611                priv->mfunc.vhcr = NULL;
2612        }
2613        if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2614                priv->cmd.initialized = 0;
2615}
2616
2617/*
2618 * Switch to using events to issue FW commands (can only be called
2619 * after event queue for command events has been initialized).
2620 */
2621int mlx4_cmd_use_events(struct mlx4_dev *dev)
2622{
2623        struct mlx4_priv *priv = mlx4_priv(dev);
2624        int i;
2625        int err = 0;
2626
2627        priv->cmd.context = kmalloc_array(priv->cmd.max_cmds,
2628                                          sizeof(struct mlx4_cmd_context),
2629                                          GFP_KERNEL);
2630        if (!priv->cmd.context)
2631                return -ENOMEM;
2632
2633        if (mlx4_is_mfunc(dev))
2634                mutex_lock(&priv->cmd.slave_cmd_mutex);
2635        down_write(&priv->cmd.switch_sem);
2636        for (i = 0; i < priv->cmd.max_cmds; ++i) {
2637                priv->cmd.context[i].token = i;
2638                priv->cmd.context[i].next  = i + 1;
2639                /* To support fatal error flow, initialize all
2640                 * cmd contexts to allow simulating completions
2641                 * with complete() at any time.
2642                 */
2643                init_completion(&priv->cmd.context[i].done);
2644        }
2645
2646        priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2647        priv->cmd.free_head = 0;
2648
2649        sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2650
2651        for (priv->cmd.token_mask = 1;
2652             priv->cmd.token_mask < priv->cmd.max_cmds;
2653             priv->cmd.token_mask <<= 1)
2654                ; /* nothing */
2655        --priv->cmd.token_mask;
2656
2657        down(&priv->cmd.poll_sem);
2658        priv->cmd.use_events = 1;
2659        up_write(&priv->cmd.switch_sem);
2660        if (mlx4_is_mfunc(dev))
2661                mutex_unlock(&priv->cmd.slave_cmd_mutex);
2662
2663        return err;
2664}
2665
2666/*
2667 * Switch back to polling (used when shutting down the device)
2668 */
2669void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2670{
2671        struct mlx4_priv *priv = mlx4_priv(dev);
2672        int i;
2673
2674        if (mlx4_is_mfunc(dev))
2675                mutex_lock(&priv->cmd.slave_cmd_mutex);
2676        down_write(&priv->cmd.switch_sem);
2677        priv->cmd.use_events = 0;
2678
2679        for (i = 0; i < priv->cmd.max_cmds; ++i)
2680                down(&priv->cmd.event_sem);
2681
2682        kfree(priv->cmd.context);
2683        priv->cmd.context = NULL;
2684
2685        up(&priv->cmd.poll_sem);
2686        up_write(&priv->cmd.switch_sem);
2687        if (mlx4_is_mfunc(dev))
2688                mutex_unlock(&priv->cmd.slave_cmd_mutex);
2689}
2690
2691struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2692{
2693        struct mlx4_cmd_mailbox *mailbox;
2694
2695        mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL);
2696        if (!mailbox)
2697                return ERR_PTR(-ENOMEM);
2698
2699        mailbox->buf = dma_pool_zalloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2700                                       &mailbox->dma);
2701        if (!mailbox->buf) {
2702                kfree(mailbox);
2703                return ERR_PTR(-ENOMEM);
2704        }
2705
2706        return mailbox;
2707}
2708EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2709
2710void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2711                           struct mlx4_cmd_mailbox *mailbox)
2712{
2713        if (!mailbox)
2714                return;
2715
2716        dma_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2717        kfree(mailbox);
2718}
2719EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2720
2721u32 mlx4_comm_get_version(void)
2722{
2723         return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2724}
2725
2726static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2727{
2728        if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2729                mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2730                         vf, dev->persist->num_vfs);
2731                return -EINVAL;
2732        }
2733
2734        return vf+1;
2735}
2736
2737int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2738{
2739        if (slave < 1 || slave > dev->persist->num_vfs) {
2740                mlx4_err(dev,
2741                         "Bad slave number:%d (number of activated slaves: %lu)\n",
2742                         slave, dev->num_slaves);
2743                return -EINVAL;
2744        }
2745        return slave - 1;
2746}
2747
2748void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2749{
2750        struct mlx4_priv *priv = mlx4_priv(dev);
2751        struct mlx4_cmd_context *context;
2752        int i;
2753
2754        spin_lock(&priv->cmd.context_lock);
2755        if (priv->cmd.context) {
2756                for (i = 0; i < priv->cmd.max_cmds; ++i) {
2757                        context = &priv->cmd.context[i];
2758                        context->fw_status = CMD_STAT_INTERNAL_ERR;
2759                        context->result    =
2760                                mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2761                        complete(&context->done);
2762                }
2763        }
2764        spin_unlock(&priv->cmd.context_lock);
2765}
2766
2767struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2768{
2769        struct mlx4_active_ports actv_ports;
2770        int vf;
2771
2772        bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2773
2774        if (slave == 0) {
2775                bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2776                return actv_ports;
2777        }
2778
2779        vf = mlx4_get_vf_indx(dev, slave);
2780        if (vf < 0)
2781                return actv_ports;
2782
2783        bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2784                   min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2785                   dev->caps.num_ports));
2786
2787        return actv_ports;
2788}
2789EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2790
2791int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2792{
2793        unsigned n;
2794        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2795        unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2796
2797        if (port <= 0 || port > m)
2798                return -EINVAL;
2799
2800        n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2801        if (port <= n)
2802                port = n + 1;
2803
2804        return port;
2805}
2806EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2807
2808int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2809{
2810        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2811        if (test_bit(port - 1, actv_ports.ports))
2812                return port -
2813                        find_first_bit(actv_ports.ports, dev->caps.num_ports);
2814
2815        return -1;
2816}
2817EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2818
2819struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2820                                                   int port)
2821{
2822        unsigned i;
2823        struct mlx4_slaves_pport slaves_pport;
2824
2825        bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2826
2827        if (port <= 0 || port > dev->caps.num_ports)
2828                return slaves_pport;
2829
2830        for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2831                struct mlx4_active_ports actv_ports =
2832                        mlx4_get_active_ports(dev, i);
2833                if (test_bit(port - 1, actv_ports.ports))
2834                        set_bit(i, slaves_pport.slaves);
2835        }
2836
2837        return slaves_pport;
2838}
2839EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2840
2841struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2842                struct mlx4_dev *dev,
2843                const struct mlx4_active_ports *crit_ports)
2844{
2845        unsigned i;
2846        struct mlx4_slaves_pport slaves_pport;
2847
2848        bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2849
2850        for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2851                struct mlx4_active_ports actv_ports =
2852                        mlx4_get_active_ports(dev, i);
2853                if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2854                                 dev->caps.num_ports))
2855                        set_bit(i, slaves_pport.slaves);
2856        }
2857
2858        return slaves_pport;
2859}
2860EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2861
2862static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2863{
2864        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2865        int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2866                        + 1;
2867        int max_port = min_port +
2868                bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2869
2870        if (port < min_port)
2871                port = min_port;
2872        else if (port >= max_port)
2873                port = max_port - 1;
2874
2875        return port;
2876}
2877
2878static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
2879                              int max_tx_rate)
2880{
2881        int i;
2882        int err;
2883        struct mlx4_qos_manager *port_qos;
2884        struct mlx4_dev *dev = &priv->dev;
2885        struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
2886
2887        port_qos = &priv->mfunc.master.qos_ctl[port];
2888        memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
2889
2890        if (slave > port_qos->num_of_qos_vfs) {
2891                mlx4_info(dev, "No available VPP resources for this VF\n");
2892                return -EINVAL;
2893        }
2894
2895        /* Query for default QoS values from Vport 0 is needed */
2896        err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
2897        if (err) {
2898                mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
2899                return err;
2900        }
2901
2902        for (i = 0; i < MLX4_NUM_UP; i++) {
2903                if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
2904                        vpp_qos[i].max_avg_bw = max_tx_rate;
2905                        vpp_qos[i].enable = 1;
2906                } else {
2907                        /* if user supplied tx_rate == 0, meaning no rate limit
2908                         * configuration is required. so we are leaving the
2909                         * value of max_avg_bw as queried from Vport 0.
2910                         */
2911                        vpp_qos[i].enable = 0;
2912                }
2913        }
2914
2915        err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
2916        if (err) {
2917                mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
2918                return err;
2919        }
2920
2921        return 0;
2922}
2923
2924static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
2925                                        struct mlx4_vport_state *vf_admin)
2926{
2927        struct mlx4_qos_manager *info;
2928        struct mlx4_priv *priv = mlx4_priv(dev);
2929
2930        if (!mlx4_is_master(dev) ||
2931            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2932                return false;
2933
2934        info = &priv->mfunc.master.qos_ctl[port];
2935
2936        if (vf_admin->default_vlan != MLX4_VGT &&
2937            test_bit(vf_admin->default_qos, info->priority_bm))
2938                return true;
2939
2940        return false;
2941}
2942
2943static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
2944                                       struct mlx4_vport_state *vf_admin,
2945                                       int vlan, int qos)
2946{
2947        struct mlx4_vport_state dummy_admin = {0};
2948
2949        if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
2950            !vf_admin->tx_rate)
2951                return true;
2952
2953        dummy_admin.default_qos = qos;
2954        dummy_admin.default_vlan = vlan;
2955
2956        /* VF wants to move to other VST state which is valid with current
2957         * rate limit. Either differnt default vlan in VST or other
2958         * supported QoS priority. Otherwise we don't allow this change when
2959         * the TX rate is still configured.
2960         */
2961        if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
2962                return true;
2963
2964        mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
2965                  (vlan == MLX4_VGT) ? "VGT" : "VST");
2966
2967        if (vlan != MLX4_VGT)
2968                mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
2969
2970        mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
2971
2972        return false;
2973}
2974
2975int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
2976{
2977        struct mlx4_priv *priv = mlx4_priv(dev);
2978        struct mlx4_vport_state *s_info;
2979        int slave;
2980
2981        if (!mlx4_is_master(dev))
2982                return -EPROTONOSUPPORT;
2983
2984        if (is_multicast_ether_addr(mac))
2985                return -EINVAL;
2986
2987        slave = mlx4_get_slave_indx(dev, vf);
2988        if (slave < 0)
2989                return -EINVAL;
2990
2991        port = mlx4_slaves_closest_port(dev, slave, port);
2992        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2993
2994        if (s_info->spoofchk && is_zero_ether_addr(mac)) {
2995                mlx4_info(dev, "MAC invalidation is not allowed when spoofchk is on\n");
2996                return -EPERM;
2997        }
2998
2999        s_info->mac = ether_addr_to_u64(mac);
3000        mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
3001                  vf, port, s_info->mac);
3002        return 0;
3003}
3004EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
3005
3006
3007int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos,
3008                     __be16 proto)
3009{
3010        struct mlx4_priv *priv = mlx4_priv(dev);
3011        struct mlx4_vport_state *vf_admin;
3012        struct mlx4_slave_state *slave_state;
3013        struct mlx4_vport_oper_state *vf_oper;
3014        int slave;
3015
3016        if ((!mlx4_is_master(dev)) ||
3017            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
3018                return -EPROTONOSUPPORT;
3019
3020        if ((vlan > 4095) || (qos > 7))
3021                return -EINVAL;
3022
3023        if (proto == htons(ETH_P_8021AD) &&
3024            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP))
3025                return -EPROTONOSUPPORT;
3026
3027        if (proto != htons(ETH_P_8021Q) &&
3028            proto != htons(ETH_P_8021AD))
3029                return -EINVAL;
3030
3031        if ((proto == htons(ETH_P_8021AD)) &&
3032            ((vlan == 0) || (vlan == MLX4_VGT)))
3033                return -EINVAL;
3034
3035        slave = mlx4_get_slave_indx(dev, vf);
3036        if (slave < 0)
3037                return -EINVAL;
3038
3039        slave_state = &priv->mfunc.master.slave_state[slave];
3040        if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) &&
3041            (!slave_state->vst_qinq_supported)) {
3042                mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf);
3043                return -EPROTONOSUPPORT;
3044        }
3045        port = mlx4_slaves_closest_port(dev, slave, port);
3046        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3047        vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3048
3049        if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
3050                return -EPERM;
3051
3052        if ((0 == vlan) && (0 == qos))
3053                vf_admin->default_vlan = MLX4_VGT;
3054        else
3055                vf_admin->default_vlan = vlan;
3056        vf_admin->default_qos = qos;
3057        vf_admin->vlan_proto = proto;
3058
3059        /* If rate was configured prior to VST, we saved the configured rate
3060         * in vf_admin->rate and now, if priority supported we enforce the QoS
3061         */
3062        if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
3063            vf_admin->tx_rate)
3064                vf_admin->qos_vport = slave;
3065
3066        /* Try to activate new vf state without restart,
3067         * this option is not supported while moving to VST QinQ mode.
3068         */
3069        if ((proto == htons(ETH_P_8021AD) &&
3070             vf_oper->state.vlan_proto != proto) ||
3071            mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3072                mlx4_info(dev,
3073                          "updating vf %d port %d config will take effect on next VF restart\n",
3074                          vf, port);
3075        return 0;
3076}
3077EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
3078
3079int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
3080                     int max_tx_rate)
3081{
3082        int err;
3083        int slave;
3084        struct mlx4_vport_state *vf_admin;
3085        struct mlx4_priv *priv = mlx4_priv(dev);
3086
3087        if (!mlx4_is_master(dev) ||
3088            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
3089                return -EPROTONOSUPPORT;
3090
3091        if (min_tx_rate) {
3092                mlx4_info(dev, "Minimum BW share not supported\n");
3093                return -EPROTONOSUPPORT;
3094        }
3095
3096        slave = mlx4_get_slave_indx(dev, vf);
3097        if (slave < 0)
3098                return -EINVAL;
3099
3100        port = mlx4_slaves_closest_port(dev, slave, port);
3101        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3102
3103        err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
3104        if (err) {
3105                mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
3106                          max_tx_rate);
3107                return err;
3108        }
3109
3110        vf_admin->tx_rate = max_tx_rate;
3111        /* if VF is not in supported mode (VST with supported prio),
3112         * we do not change vport configuration for its QPs, but save
3113         * the rate, so it will be enforced when it moves to supported
3114         * mode next time.
3115         */
3116        if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
3117                mlx4_info(dev,
3118                          "rate set for VF %d when not in valid state\n", vf);
3119
3120                if (vf_admin->default_vlan != MLX4_VGT)
3121                        mlx4_info(dev, "VST priority not supported by QoS\n");
3122                else
3123                        mlx4_info(dev, "VF in VGT mode (needed VST)\n");
3124
3125                mlx4_info(dev,
3126                          "rate %d take affect when VF moves to valid state\n",
3127                          max_tx_rate);
3128                return 0;
3129        }
3130
3131        /* If user sets rate 0 assigning default vport for its QPs */
3132        vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
3133
3134        if (priv->mfunc.master.slave_state[slave].active &&
3135            dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
3136                mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
3137
3138        return 0;
3139}
3140EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
3141
3142 /* mlx4_get_slave_default_vlan -
3143 * return true if VST ( default vlan)
3144 * if VST, will return vlan & qos (if not NULL)
3145 */
3146bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
3147                                 u16 *vlan, u8 *qos)
3148{
3149        struct mlx4_vport_oper_state *vp_oper;
3150        struct mlx4_priv *priv;
3151
3152        priv = mlx4_priv(dev);
3153        port = mlx4_slaves_closest_port(dev, slave, port);
3154        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3155
3156        if (MLX4_VGT != vp_oper->state.default_vlan) {
3157                if (vlan)
3158                        *vlan = vp_oper->state.default_vlan;
3159                if (qos)
3160                        *qos = vp_oper->state.default_qos;
3161                return true;
3162        }
3163        return false;
3164}
3165EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
3166
3167int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
3168{
3169        struct mlx4_priv *priv = mlx4_priv(dev);
3170        struct mlx4_vport_state *s_info;
3171        int slave;
3172        u8 mac[ETH_ALEN];
3173
3174        if ((!mlx4_is_master(dev)) ||
3175            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
3176                return -EPROTONOSUPPORT;
3177
3178        slave = mlx4_get_slave_indx(dev, vf);
3179        if (slave < 0)
3180                return -EINVAL;
3181
3182        port = mlx4_slaves_closest_port(dev, slave, port);
3183        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3184
3185        u64_to_ether_addr(s_info->mac, mac);
3186        if (setting && !is_valid_ether_addr(mac)) {
3187                mlx4_info(dev, "Illegal MAC with spoofchk\n");
3188                return -EPERM;
3189        }
3190
3191        s_info->spoofchk = setting;
3192
3193        return 0;
3194}
3195EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
3196
3197int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
3198{
3199        struct mlx4_priv *priv = mlx4_priv(dev);
3200        struct mlx4_vport_state *s_info;
3201        int slave;
3202
3203        if (!mlx4_is_master(dev))
3204                return -EPROTONOSUPPORT;
3205
3206        slave = mlx4_get_slave_indx(dev, vf);
3207        if (slave < 0)
3208                return -EINVAL;
3209
3210        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3211        ivf->vf = vf;
3212
3213        /* need to convert it to a func */
3214        ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
3215        ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
3216        ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
3217        ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
3218        ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
3219        ivf->mac[5] = ((s_info->mac)  & 0xff);
3220
3221        ivf->vlan               = s_info->default_vlan;
3222        ivf->qos                = s_info->default_qos;
3223        ivf->vlan_proto         = s_info->vlan_proto;
3224
3225        if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
3226                ivf->max_tx_rate = s_info->tx_rate;
3227        else
3228                ivf->max_tx_rate = 0;
3229
3230        ivf->min_tx_rate        = 0;
3231        ivf->spoofchk           = s_info->spoofchk;
3232        ivf->linkstate          = s_info->link_state;
3233
3234        return 0;
3235}
3236EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
3237
3238int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
3239{
3240        struct mlx4_priv *priv = mlx4_priv(dev);
3241        struct mlx4_vport_state *s_info;
3242        int slave;
3243        u8 link_stat_event;
3244
3245        slave = mlx4_get_slave_indx(dev, vf);
3246        if (slave < 0)
3247                return -EINVAL;
3248
3249        port = mlx4_slaves_closest_port(dev, slave, port);
3250        switch (link_state) {
3251        case IFLA_VF_LINK_STATE_AUTO:
3252                /* get current link state */
3253                if (!priv->sense.do_sense_port[port])
3254                        link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3255                else
3256                        link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3257            break;
3258
3259        case IFLA_VF_LINK_STATE_ENABLE:
3260                link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3261            break;
3262
3263        case IFLA_VF_LINK_STATE_DISABLE:
3264                link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3265            break;
3266
3267        default:
3268                mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
3269                          link_state, slave, port);
3270                return -EINVAL;
3271        }
3272        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3273        s_info->link_state = link_state;
3274
3275        /* send event */
3276        mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
3277
3278        if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3279                mlx4_dbg(dev,
3280                         "updating vf %d port %d no link state HW enforcement\n",
3281                         vf, port);
3282        return 0;
3283}
3284EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
3285
3286int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
3287                           struct mlx4_counter *counter_stats, int reset)
3288{
3289        struct mlx4_cmd_mailbox *mailbox = NULL;
3290        struct mlx4_counter *tmp_counter;
3291        int err;
3292        u32 if_stat_in_mod;
3293
3294        if (!counter_stats)
3295                return -EINVAL;
3296
3297        if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
3298                return 0;
3299
3300        mailbox = mlx4_alloc_cmd_mailbox(dev);
3301        if (IS_ERR(mailbox))
3302                return PTR_ERR(mailbox);
3303
3304        memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
3305        if_stat_in_mod = counter_index;
3306        if (reset)
3307                if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
3308        err = mlx4_cmd_box(dev, 0, mailbox->dma,
3309                           if_stat_in_mod, 0,
3310                           MLX4_CMD_QUERY_IF_STAT,
3311                           MLX4_CMD_TIME_CLASS_C,
3312                           MLX4_CMD_NATIVE);
3313        if (err) {
3314                mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
3315                         __func__, counter_index);
3316                goto if_stat_out;
3317        }
3318        tmp_counter = (struct mlx4_counter *)mailbox->buf;
3319        counter_stats->counter_mode = tmp_counter->counter_mode;
3320        if (counter_stats->counter_mode == 0) {
3321                counter_stats->rx_frames =
3322                        cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
3323                                    be64_to_cpu(tmp_counter->rx_frames));
3324                counter_stats->tx_frames =
3325                        cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
3326                                    be64_to_cpu(tmp_counter->tx_frames));
3327                counter_stats->rx_bytes =
3328                        cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
3329                                    be64_to_cpu(tmp_counter->rx_bytes));
3330                counter_stats->tx_bytes =
3331                        cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
3332                                    be64_to_cpu(tmp_counter->tx_bytes));
3333        }
3334
3335if_stat_out:
3336        mlx4_free_cmd_mailbox(dev, mailbox);
3337
3338        return err;
3339}
3340EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
3341
3342int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
3343                      struct ifla_vf_stats *vf_stats)
3344{
3345        struct mlx4_counter tmp_vf_stats;
3346        int slave;
3347        int err = 0;
3348
3349        if (!vf_stats)
3350                return -EINVAL;
3351
3352        if (!mlx4_is_master(dev))
3353                return -EPROTONOSUPPORT;
3354
3355        slave = mlx4_get_slave_indx(dev, vf_idx);
3356        if (slave < 0)
3357                return -EINVAL;
3358
3359        port = mlx4_slaves_closest_port(dev, slave, port);
3360        err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
3361        if (!err && tmp_vf_stats.counter_mode == 0) {
3362                vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
3363                vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
3364                vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
3365                vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
3366        }
3367
3368        return err;
3369}
3370EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);
3371
3372int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
3373{
3374        struct mlx4_priv *priv = mlx4_priv(dev);
3375
3376        if (slave < 1 || slave >= dev->num_slaves ||
3377            port < 1 || port > MLX4_MAX_PORTS)
3378                return 0;
3379
3380        return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
3381                MLX4_VF_SMI_ENABLED;
3382}
3383EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
3384
3385int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
3386{
3387        struct mlx4_priv *priv = mlx4_priv(dev);
3388
3389        if (slave == mlx4_master_func_num(dev))
3390                return 1;
3391
3392        if (slave < 1 || slave >= dev->num_slaves ||
3393            port < 1 || port > MLX4_MAX_PORTS)
3394                return 0;
3395
3396        return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
3397                MLX4_VF_SMI_ENABLED;
3398}
3399EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
3400
3401int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
3402                                 int enabled)
3403{
3404        struct mlx4_priv *priv = mlx4_priv(dev);
3405        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
3406                        &priv->dev, slave);
3407        int min_port = find_first_bit(actv_ports.ports,
3408                                      priv->dev.caps.num_ports) + 1;
3409        int max_port = min_port - 1 +
3410                bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
3411
3412        if (slave == mlx4_master_func_num(dev))
3413                return 0;
3414
3415        if (slave < 1 || slave >= dev->num_slaves ||
3416            port < 1 || port > MLX4_MAX_PORTS ||
3417            enabled < 0 || enabled > 1)
3418                return -EINVAL;
3419
3420        if (min_port == max_port && dev->caps.num_ports > 1) {
3421                mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
3422                return -EPROTONOSUPPORT;
3423        }
3424
3425        priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
3426        return 0;
3427}
3428EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
3429