linux/drivers/net/ethernet/mellanox/mlx4/cmd.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/sched.h>
  36#include <linux/slab.h>
  37#include <linux/export.h>
  38#include <linux/pci.h>
  39#include <linux/errno.h>
  40
  41#include <linux/mlx4/cmd.h>
  42#include <linux/mlx4/device.h>
  43#include <linux/semaphore.h>
  44#include <rdma/ib_smi.h>
  45#include <linux/delay.h>
  46#include <linux/etherdevice.h>
  47
  48#include <asm/io.h>
  49
  50#include "mlx4.h"
  51#include "fw.h"
  52#include "fw_qos.h"
  53#include "mlx4_stats.h"
  54
  55#define CMD_POLL_TOKEN 0xffff
  56#define INBOX_MASK      0xffffffffffffff00ULL
  57
  58#define CMD_CHAN_VER 1
  59#define CMD_CHAN_IF_REV 1
  60
  61enum {
  62        /* command completed successfully: */
  63        CMD_STAT_OK             = 0x00,
  64        /* Internal error (such as a bus error) occurred while processing command: */
  65        CMD_STAT_INTERNAL_ERR   = 0x01,
  66        /* Operation/command not supported or opcode modifier not supported: */
  67        CMD_STAT_BAD_OP         = 0x02,
  68        /* Parameter not supported or parameter out of range: */
  69        CMD_STAT_BAD_PARAM      = 0x03,
  70        /* System not enabled or bad system state: */
  71        CMD_STAT_BAD_SYS_STATE  = 0x04,
  72        /* Attempt to access reserved or unallocaterd resource: */
  73        CMD_STAT_BAD_RESOURCE   = 0x05,
  74        /* Requested resource is currently executing a command, or is otherwise busy: */
  75        CMD_STAT_RESOURCE_BUSY  = 0x06,
  76        /* Required capability exceeds device limits: */
  77        CMD_STAT_EXCEED_LIM     = 0x08,
  78        /* Resource is not in the appropriate state or ownership: */
  79        CMD_STAT_BAD_RES_STATE  = 0x09,
  80        /* Index out of range: */
  81        CMD_STAT_BAD_INDEX      = 0x0a,
  82        /* FW image corrupted: */
  83        CMD_STAT_BAD_NVMEM      = 0x0b,
  84        /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
  85        CMD_STAT_ICM_ERROR      = 0x0c,
  86        /* Attempt to modify a QP/EE which is not in the presumed state: */
  87        CMD_STAT_BAD_QP_STATE   = 0x10,
  88        /* Bad segment parameters (Address/Size): */
  89        CMD_STAT_BAD_SEG_PARAM  = 0x20,
  90        /* Memory Region has Memory Windows bound to: */
  91        CMD_STAT_REG_BOUND      = 0x21,
  92        /* HCA local attached memory not present: */
  93        CMD_STAT_LAM_NOT_PRE    = 0x22,
  94        /* Bad management packet (silently discarded): */
  95        CMD_STAT_BAD_PKT        = 0x30,
  96        /* More outstanding CQEs in CQ than new CQ size: */
  97        CMD_STAT_BAD_SIZE       = 0x40,
  98        /* Multi Function device support required: */
  99        CMD_STAT_MULTI_FUNC_REQ = 0x50,
 100};
 101
 102enum {
 103        HCR_IN_PARAM_OFFSET     = 0x00,
 104        HCR_IN_MODIFIER_OFFSET  = 0x08,
 105        HCR_OUT_PARAM_OFFSET    = 0x0c,
 106        HCR_TOKEN_OFFSET        = 0x14,
 107        HCR_STATUS_OFFSET       = 0x18,
 108
 109        HCR_OPMOD_SHIFT         = 12,
 110        HCR_T_BIT               = 21,
 111        HCR_E_BIT               = 22,
 112        HCR_GO_BIT              = 23
 113};
 114
 115enum {
 116        GO_BIT_TIMEOUT_MSECS    = 10000
 117};
 118
 119enum mlx4_vlan_transition {
 120        MLX4_VLAN_TRANSITION_VST_VST = 0,
 121        MLX4_VLAN_TRANSITION_VST_VGT = 1,
 122        MLX4_VLAN_TRANSITION_VGT_VST = 2,
 123        MLX4_VLAN_TRANSITION_VGT_VGT = 3,
 124};
 125
 126
 127struct mlx4_cmd_context {
 128        struct completion       done;
 129        int                     result;
 130        int                     next;
 131        u64                     out_param;
 132        u16                     token;
 133        u8                      fw_status;
 134};
 135
 136static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
 137                                    struct mlx4_vhcr_cmd *in_vhcr);
 138
 139static int mlx4_status_to_errno(u8 status)
 140{
 141        static const int trans_table[] = {
 142                [CMD_STAT_INTERNAL_ERR]   = -EIO,
 143                [CMD_STAT_BAD_OP]         = -EPERM,
 144                [CMD_STAT_BAD_PARAM]      = -EINVAL,
 145                [CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
 146                [CMD_STAT_BAD_RESOURCE]   = -EBADF,
 147                [CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
 148                [CMD_STAT_EXCEED_LIM]     = -ENOMEM,
 149                [CMD_STAT_BAD_RES_STATE]  = -EBADF,
 150                [CMD_STAT_BAD_INDEX]      = -EBADF,
 151                [CMD_STAT_BAD_NVMEM]      = -EFAULT,
 152                [CMD_STAT_ICM_ERROR]      = -ENFILE,
 153                [CMD_STAT_BAD_QP_STATE]   = -EINVAL,
 154                [CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
 155                [CMD_STAT_REG_BOUND]      = -EBUSY,
 156                [CMD_STAT_LAM_NOT_PRE]    = -EAGAIN,
 157                [CMD_STAT_BAD_PKT]        = -EINVAL,
 158                [CMD_STAT_BAD_SIZE]       = -ENOMEM,
 159                [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
 160        };
 161
 162        if (status >= ARRAY_SIZE(trans_table) ||
 163            (status != CMD_STAT_OK && trans_table[status] == 0))
 164                return -EIO;
 165
 166        return trans_table[status];
 167}
 168
 169static u8 mlx4_errno_to_status(int errno)
 170{
 171        switch (errno) {
 172        case -EPERM:
 173                return CMD_STAT_BAD_OP;
 174        case -EINVAL:
 175                return CMD_STAT_BAD_PARAM;
 176        case -ENXIO:
 177                return CMD_STAT_BAD_SYS_STATE;
 178        case -EBUSY:
 179                return CMD_STAT_RESOURCE_BUSY;
 180        case -ENOMEM:
 181                return CMD_STAT_EXCEED_LIM;
 182        case -ENFILE:
 183                return CMD_STAT_ICM_ERROR;
 184        default:
 185                return CMD_STAT_INTERNAL_ERR;
 186        }
 187}
 188
 189static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
 190                                       u8 op_modifier)
 191{
 192        switch (op) {
 193        case MLX4_CMD_UNMAP_ICM:
 194        case MLX4_CMD_UNMAP_ICM_AUX:
 195        case MLX4_CMD_UNMAP_FA:
 196        case MLX4_CMD_2RST_QP:
 197        case MLX4_CMD_HW2SW_EQ:
 198        case MLX4_CMD_HW2SW_CQ:
 199        case MLX4_CMD_HW2SW_SRQ:
 200        case MLX4_CMD_HW2SW_MPT:
 201        case MLX4_CMD_CLOSE_HCA:
 202        case MLX4_QP_FLOW_STEERING_DETACH:
 203        case MLX4_CMD_FREE_RES:
 204        case MLX4_CMD_CLOSE_PORT:
 205                return CMD_STAT_OK;
 206
 207        case MLX4_CMD_QP_ATTACH:
 208                /* On Detach case return success */
 209                if (op_modifier == 0)
 210                        return CMD_STAT_OK;
 211                return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 212
 213        default:
 214                return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 215        }
 216}
 217
 218static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
 219{
 220        /* Any error during the closing commands below is considered fatal */
 221        if (op == MLX4_CMD_CLOSE_HCA ||
 222            op == MLX4_CMD_HW2SW_EQ ||
 223            op == MLX4_CMD_HW2SW_CQ ||
 224            op == MLX4_CMD_2RST_QP ||
 225            op == MLX4_CMD_HW2SW_SRQ ||
 226            op == MLX4_CMD_SYNC_TPT ||
 227            op == MLX4_CMD_UNMAP_ICM ||
 228            op == MLX4_CMD_UNMAP_ICM_AUX ||
 229            op == MLX4_CMD_UNMAP_FA)
 230                return 1;
 231        /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
 232          * CMD_STAT_REG_BOUND.
 233          * This status indicates that memory region has memory windows bound to it
 234          * which may result from invalid user space usage and is not fatal.
 235          */
 236        if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
 237                return 1;
 238        return 0;
 239}
 240
 241static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
 242                               int err)
 243{
 244        /* Only if reset flow is really active return code is based on
 245          * command, otherwise current error code is returned.
 246          */
 247        if (mlx4_internal_err_reset) {
 248                mlx4_enter_error_state(dev->persist);
 249                err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 250        }
 251
 252        return err;
 253}
 254
 255static int comm_pending(struct mlx4_dev *dev)
 256{
 257        struct mlx4_priv *priv = mlx4_priv(dev);
 258        u32 status = readl(&priv->mfunc.comm->slave_read);
 259
 260        return (swab32(status) >> 31) != priv->cmd.comm_toggle;
 261}
 262
 263static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
 264{
 265        struct mlx4_priv *priv = mlx4_priv(dev);
 266        u32 val;
 267
 268        /* To avoid writing to unknown addresses after the device state was
 269         * changed to internal error and the function was rest,
 270         * check the INTERNAL_ERROR flag which is updated under
 271         * device_state_mutex lock.
 272         */
 273        mutex_lock(&dev->persist->device_state_mutex);
 274
 275        if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
 276                mutex_unlock(&dev->persist->device_state_mutex);
 277                return -EIO;
 278        }
 279
 280        priv->cmd.comm_toggle ^= 1;
 281        val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
 282        __raw_writel((__force u32) cpu_to_be32(val),
 283                     &priv->mfunc.comm->slave_write);
 284        mutex_unlock(&dev->persist->device_state_mutex);
 285        return 0;
 286}
 287
 288static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
 289                       unsigned long timeout)
 290{
 291        struct mlx4_priv *priv = mlx4_priv(dev);
 292        unsigned long end;
 293        int err = 0;
 294        int ret_from_pending = 0;
 295
 296        /* First, verify that the master reports correct status */
 297        if (comm_pending(dev)) {
 298                mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
 299                          priv->cmd.comm_toggle, cmd);
 300                return -EAGAIN;
 301        }
 302
 303        /* Write command */
 304        down(&priv->cmd.poll_sem);
 305        if (mlx4_comm_cmd_post(dev, cmd, param)) {
 306                /* Only in case the device state is INTERNAL_ERROR,
 307                 * mlx4_comm_cmd_post returns with an error
 308                 */
 309                err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 310                goto out;
 311        }
 312
 313        end = msecs_to_jiffies(timeout) + jiffies;
 314        while (comm_pending(dev) && time_before(jiffies, end))
 315                cond_resched();
 316        ret_from_pending = comm_pending(dev);
 317        if (ret_from_pending) {
 318                /* check if the slave is trying to boot in the middle of
 319                 * FLR process. The only non-zero result in the RESET command
 320                 * is MLX4_DELAY_RESET_SLAVE*/
 321                if ((MLX4_COMM_CMD_RESET == cmd)) {
 322                        err = MLX4_DELAY_RESET_SLAVE;
 323                        goto out;
 324                } else {
 325                        mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
 326                                  cmd);
 327                        err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 328                }
 329        }
 330
 331        if (err)
 332                mlx4_enter_error_state(dev->persist);
 333out:
 334        up(&priv->cmd.poll_sem);
 335        return err;
 336}
 337
 338static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
 339                              u16 param, u16 op, unsigned long timeout)
 340{
 341        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 342        struct mlx4_cmd_context *context;
 343        unsigned long end;
 344        int err = 0;
 345
 346        down(&cmd->event_sem);
 347
 348        spin_lock(&cmd->context_lock);
 349        BUG_ON(cmd->free_head < 0);
 350        context = &cmd->context[cmd->free_head];
 351        context->token += cmd->token_mask + 1;
 352        cmd->free_head = context->next;
 353        spin_unlock(&cmd->context_lock);
 354
 355        reinit_completion(&context->done);
 356
 357        if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
 358                /* Only in case the device state is INTERNAL_ERROR,
 359                 * mlx4_comm_cmd_post returns with an error
 360                 */
 361                err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 362                goto out;
 363        }
 364
 365        if (!wait_for_completion_timeout(&context->done,
 366                                         msecs_to_jiffies(timeout))) {
 367                mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
 368                          vhcr_cmd, op);
 369                goto out_reset;
 370        }
 371
 372        err = context->result;
 373        if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
 374                mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
 375                         vhcr_cmd, context->fw_status);
 376                if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
 377                        goto out_reset;
 378        }
 379
 380        /* wait for comm channel ready
 381         * this is necessary for prevention the race
 382         * when switching between event to polling mode
 383         * Skipping this section in case the device is in FATAL_ERROR state,
 384         * In this state, no commands are sent via the comm channel until
 385         * the device has returned from reset.
 386         */
 387        if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
 388                end = msecs_to_jiffies(timeout) + jiffies;
 389                while (comm_pending(dev) && time_before(jiffies, end))
 390                        cond_resched();
 391        }
 392        goto out;
 393
 394out_reset:
 395        err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 396        mlx4_enter_error_state(dev->persist);
 397out:
 398        spin_lock(&cmd->context_lock);
 399        context->next = cmd->free_head;
 400        cmd->free_head = context - cmd->context;
 401        spin_unlock(&cmd->context_lock);
 402
 403        up(&cmd->event_sem);
 404        return err;
 405}
 406
 407int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
 408                  u16 op, unsigned long timeout)
 409{
 410        if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 411                return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 412
 413        if (mlx4_priv(dev)->cmd.use_events)
 414                return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
 415        return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
 416}
 417
 418static int cmd_pending(struct mlx4_dev *dev)
 419{
 420        u32 status;
 421
 422        if (pci_channel_offline(dev->persist->pdev))
 423                return -EIO;
 424
 425        status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
 426
 427        return (status & swab32(1 << HCR_GO_BIT)) ||
 428                (mlx4_priv(dev)->cmd.toggle ==
 429                 !!(status & swab32(1 << HCR_T_BIT)));
 430}
 431
 432static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
 433                         u32 in_modifier, u8 op_modifier, u16 op, u16 token,
 434                         int event)
 435{
 436        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 437        u32 __iomem *hcr = cmd->hcr;
 438        int ret = -EIO;
 439        unsigned long end;
 440
 441        mutex_lock(&dev->persist->device_state_mutex);
 442        /* To avoid writing to unknown addresses after the device state was
 443          * changed to internal error and the chip was reset,
 444          * check the INTERNAL_ERROR flag which is updated under
 445          * device_state_mutex lock.
 446          */
 447        if (pci_channel_offline(dev->persist->pdev) ||
 448            (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
 449                /*
 450                 * Device is going through error recovery
 451                 * and cannot accept commands.
 452                 */
 453                goto out;
 454        }
 455
 456        end = jiffies;
 457        if (event)
 458                end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
 459
 460        while (cmd_pending(dev)) {
 461                if (pci_channel_offline(dev->persist->pdev)) {
 462                        /*
 463                         * Device is going through error recovery
 464                         * and cannot accept commands.
 465                         */
 466                        goto out;
 467                }
 468
 469                if (time_after_eq(jiffies, end)) {
 470                        mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
 471                        goto out;
 472                }
 473                cond_resched();
 474        }
 475
 476        /*
 477         * We use writel (instead of something like memcpy_toio)
 478         * because writes of less than 32 bits to the HCR don't work
 479         * (and some architectures such as ia64 implement memcpy_toio
 480         * in terms of writeb).
 481         */
 482        __raw_writel((__force u32) cpu_to_be32(in_param >> 32),           hcr + 0);
 483        __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
 484        __raw_writel((__force u32) cpu_to_be32(in_modifier),              hcr + 2);
 485        __raw_writel((__force u32) cpu_to_be32(out_param >> 32),          hcr + 3);
 486        __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
 487        __raw_writel((__force u32) cpu_to_be32(token << 16),              hcr + 5);
 488
 489        /* __raw_writel may not order writes. */
 490        wmb();
 491
 492        __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
 493                                               (cmd->toggle << HCR_T_BIT)       |
 494                                               (event ? (1 << HCR_E_BIT) : 0)   |
 495                                               (op_modifier << HCR_OPMOD_SHIFT) |
 496                                               op), hcr + 6);
 497
 498        cmd->toggle = cmd->toggle ^ 1;
 499
 500        ret = 0;
 501
 502out:
 503        if (ret)
 504                mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
 505                          op, ret, in_param, in_modifier, op_modifier);
 506        mutex_unlock(&dev->persist->device_state_mutex);
 507
 508        return ret;
 509}
 510
 511static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 512                          int out_is_imm, u32 in_modifier, u8 op_modifier,
 513                          u16 op, unsigned long timeout)
 514{
 515        struct mlx4_priv *priv = mlx4_priv(dev);
 516        struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
 517        int ret;
 518
 519        mutex_lock(&priv->cmd.slave_cmd_mutex);
 520
 521        vhcr->in_param = cpu_to_be64(in_param);
 522        vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
 523        vhcr->in_modifier = cpu_to_be32(in_modifier);
 524        vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
 525        vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
 526        vhcr->status = 0;
 527        vhcr->flags = !!(priv->cmd.use_events) << 6;
 528
 529        if (mlx4_is_master(dev)) {
 530                ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
 531                if (!ret) {
 532                        if (out_is_imm) {
 533                                if (out_param)
 534                                        *out_param =
 535                                                be64_to_cpu(vhcr->out_param);
 536                                else {
 537                                        mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 538                                                 op);
 539                                        vhcr->status = CMD_STAT_BAD_PARAM;
 540                                }
 541                        }
 542                        ret = mlx4_status_to_errno(vhcr->status);
 543                }
 544                if (ret &&
 545                    dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 546                        ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
 547        } else {
 548                ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
 549                                    MLX4_COMM_TIME + timeout);
 550                if (!ret) {
 551                        if (out_is_imm) {
 552                                if (out_param)
 553                                        *out_param =
 554                                                be64_to_cpu(vhcr->out_param);
 555                                else {
 556                                        mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 557                                                 op);
 558                                        vhcr->status = CMD_STAT_BAD_PARAM;
 559                                }
 560                        }
 561                        ret = mlx4_status_to_errno(vhcr->status);
 562                } else {
 563                        if (dev->persist->state &
 564                            MLX4_DEVICE_STATE_INTERNAL_ERROR)
 565                                ret = mlx4_internal_err_ret_value(dev, op,
 566                                                                  op_modifier);
 567                        else
 568                                mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
 569                }
 570        }
 571
 572        mutex_unlock(&priv->cmd.slave_cmd_mutex);
 573        return ret;
 574}
 575
 576static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 577                         int out_is_imm, u32 in_modifier, u8 op_modifier,
 578                         u16 op, unsigned long timeout)
 579{
 580        struct mlx4_priv *priv = mlx4_priv(dev);
 581        void __iomem *hcr = priv->cmd.hcr;
 582        int err = 0;
 583        unsigned long end;
 584        u32 stat;
 585
 586        down(&priv->cmd.poll_sem);
 587
 588        if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
 589                /*
 590                 * Device is going through error recovery
 591                 * and cannot accept commands.
 592                 */
 593                err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 594                goto out;
 595        }
 596
 597        if (out_is_imm && !out_param) {
 598                mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 599                         op);
 600                err = -EINVAL;
 601                goto out;
 602        }
 603
 604        err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
 605                            in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
 606        if (err)
 607                goto out_reset;
 608
 609        end = msecs_to_jiffies(timeout) + jiffies;
 610        while (cmd_pending(dev) && time_before(jiffies, end)) {
 611                if (pci_channel_offline(dev->persist->pdev)) {
 612                        /*
 613                         * Device is going through error recovery
 614                         * and cannot accept commands.
 615                         */
 616                        err = -EIO;
 617                        goto out_reset;
 618                }
 619
 620                if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
 621                        err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 622                        goto out;
 623                }
 624
 625                cond_resched();
 626        }
 627
 628        if (cmd_pending(dev)) {
 629                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
 630                          op);
 631                err = -EIO;
 632                goto out_reset;
 633        }
 634
 635        if (out_is_imm)
 636                *out_param =
 637                        (u64) be32_to_cpu((__force __be32)
 638                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
 639                        (u64) be32_to_cpu((__force __be32)
 640                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
 641        stat = be32_to_cpu((__force __be32)
 642                           __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
 643        err = mlx4_status_to_errno(stat);
 644        if (err) {
 645                mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
 646                         op, stat);
 647                if (mlx4_closing_cmd_fatal_error(op, stat))
 648                        goto out_reset;
 649                goto out;
 650        }
 651
 652out_reset:
 653        if (err)
 654                err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
 655out:
 656        up(&priv->cmd.poll_sem);
 657        return err;
 658}
 659
 660void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
 661{
 662        struct mlx4_priv *priv = mlx4_priv(dev);
 663        struct mlx4_cmd_context *context =
 664                &priv->cmd.context[token & priv->cmd.token_mask];
 665
 666        /* previously timed out command completing at long last */
 667        if (token != context->token)
 668                return;
 669
 670        context->fw_status = status;
 671        context->result    = mlx4_status_to_errno(status);
 672        context->out_param = out_param;
 673
 674        complete(&context->done);
 675}
 676
 677static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 678                         int out_is_imm, u32 in_modifier, u8 op_modifier,
 679                         u16 op, unsigned long timeout)
 680{
 681        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 682        struct mlx4_cmd_context *context;
 683        long ret_wait;
 684        int err = 0;
 685
 686        down(&cmd->event_sem);
 687
 688        spin_lock(&cmd->context_lock);
 689        BUG_ON(cmd->free_head < 0);
 690        context = &cmd->context[cmd->free_head];
 691        context->token += cmd->token_mask + 1;
 692        cmd->free_head = context->next;
 693        spin_unlock(&cmd->context_lock);
 694
 695        if (out_is_imm && !out_param) {
 696                mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 697                         op);
 698                err = -EINVAL;
 699                goto out;
 700        }
 701
 702        reinit_completion(&context->done);
 703
 704        err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
 705                            in_modifier, op_modifier, op, context->token, 1);
 706        if (err)
 707                goto out_reset;
 708
 709        if (op == MLX4_CMD_SENSE_PORT) {
 710                ret_wait =
 711                        wait_for_completion_interruptible_timeout(&context->done,
 712                                                                  msecs_to_jiffies(timeout));
 713                if (ret_wait < 0) {
 714                        context->fw_status = 0;
 715                        context->out_param = 0;
 716                        context->result = 0;
 717                }
 718        } else {
 719                ret_wait = (long)wait_for_completion_timeout(&context->done,
 720                                                             msecs_to_jiffies(timeout));
 721        }
 722        if (!ret_wait) {
 723                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
 724                          op);
 725                if (op == MLX4_CMD_NOP) {
 726                        err = -EBUSY;
 727                        goto out;
 728                } else {
 729                        err = -EIO;
 730                        goto out_reset;
 731                }
 732        }
 733
 734        err = context->result;
 735        if (err) {
 736                /* Since we do not want to have this error message always
 737                 * displayed at driver start when there are ConnectX2 HCAs
 738                 * on the host, we deprecate the error message for this
 739                 * specific command/input_mod/opcode_mod/fw-status to be debug.
 740                 */
 741                if (op == MLX4_CMD_SET_PORT &&
 742                    (in_modifier == 1 || in_modifier == 2) &&
 743                    op_modifier == MLX4_SET_PORT_IB_OPCODE &&
 744                    context->fw_status == CMD_STAT_BAD_SIZE)
 745                        mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
 746                                 op, context->fw_status);
 747                else
 748                        mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
 749                                 op, context->fw_status);
 750                if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 751                        err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 752                else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
 753                        goto out_reset;
 754
 755                goto out;
 756        }
 757
 758        if (out_is_imm)
 759                *out_param = context->out_param;
 760
 761out_reset:
 762        if (err)
 763                err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
 764out:
 765        spin_lock(&cmd->context_lock);
 766        context->next = cmd->free_head;
 767        cmd->free_head = context - cmd->context;
 768        spin_unlock(&cmd->context_lock);
 769
 770        up(&cmd->event_sem);
 771        return err;
 772}
 773
 774int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 775               int out_is_imm, u32 in_modifier, u8 op_modifier,
 776               u16 op, unsigned long timeout, int native)
 777{
 778        if (pci_channel_offline(dev->persist->pdev))
 779                return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
 780
 781        if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
 782                int ret;
 783
 784                if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 785                        return mlx4_internal_err_ret_value(dev, op,
 786                                                          op_modifier);
 787                down_read(&mlx4_priv(dev)->cmd.switch_sem);
 788                if (mlx4_priv(dev)->cmd.use_events)
 789                        ret = mlx4_cmd_wait(dev, in_param, out_param,
 790                                            out_is_imm, in_modifier,
 791                                            op_modifier, op, timeout);
 792                else
 793                        ret = mlx4_cmd_poll(dev, in_param, out_param,
 794                                            out_is_imm, in_modifier,
 795                                            op_modifier, op, timeout);
 796
 797                up_read(&mlx4_priv(dev)->cmd.switch_sem);
 798                return ret;
 799        }
 800        return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
 801                              in_modifier, op_modifier, op, timeout);
 802}
 803EXPORT_SYMBOL_GPL(__mlx4_cmd);
 804
 805
 806int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
 807{
 808        return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
 809                        MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 810}
 811
 812static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
 813                           int slave, u64 slave_addr,
 814                           int size, int is_read)
 815{
 816        u64 in_param;
 817        u64 out_param;
 818
 819        if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
 820            (slave & ~0x7f) | (size & 0xff)) {
 821                mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
 822                         slave_addr, master_addr, slave, size);
 823                return -EINVAL;
 824        }
 825
 826        if (is_read) {
 827                in_param = (u64) slave | slave_addr;
 828                out_param = (u64) dev->caps.function | master_addr;
 829        } else {
 830                in_param = (u64) dev->caps.function | master_addr;
 831                out_param = (u64) slave | slave_addr;
 832        }
 833
 834        return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
 835                            MLX4_CMD_ACCESS_MEM,
 836                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 837}
 838
 839static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
 840                               struct mlx4_cmd_mailbox *inbox,
 841                               struct mlx4_cmd_mailbox *outbox)
 842{
 843        struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
 844        struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
 845        int err;
 846        int i;
 847
 848        if (index & 0x1f)
 849                return -EINVAL;
 850
 851        in_mad->attr_mod = cpu_to_be32(index / 32);
 852
 853        err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
 854                           MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
 855                           MLX4_CMD_NATIVE);
 856        if (err)
 857                return err;
 858
 859        for (i = 0; i < 32; ++i)
 860                pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
 861
 862        return err;
 863}
 864
 865static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
 866                               struct mlx4_cmd_mailbox *inbox,
 867                               struct mlx4_cmd_mailbox *outbox)
 868{
 869        int i;
 870        int err;
 871
 872        for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
 873                err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
 874                if (err)
 875                        return err;
 876        }
 877
 878        return 0;
 879}
 880#define PORT_CAPABILITY_LOCATION_IN_SMP 20
 881#define PORT_STATE_OFFSET 32
 882
 883static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
 884{
 885        if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
 886                return IB_PORT_ACTIVE;
 887        else
 888                return IB_PORT_DOWN;
 889}
 890
 891static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
 892                                struct mlx4_vhcr *vhcr,
 893                                struct mlx4_cmd_mailbox *inbox,
 894                                struct mlx4_cmd_mailbox *outbox,
 895                                struct mlx4_cmd_info *cmd)
 896{
 897        struct ib_smp *smp = inbox->buf;
 898        u32 index;
 899        u8 port, slave_port;
 900        u8 opcode_modifier;
 901        u16 *table;
 902        int err;
 903        int vidx, pidx;
 904        int network_view;
 905        struct mlx4_priv *priv = mlx4_priv(dev);
 906        struct ib_smp *outsmp = outbox->buf;
 907        __be16 *outtab = (__be16 *)(outsmp->data);
 908        __be32 slave_cap_mask;
 909        __be64 slave_node_guid;
 910
 911        slave_port = vhcr->in_modifier;
 912        port = mlx4_slave_convert_port(dev, slave, slave_port);
 913
 914        /* network-view bit is for driver use only, and should not be passed to FW */
 915        opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
 916        network_view = !!(vhcr->op_modifier & 0x8);
 917
 918        if (smp->base_version == 1 &&
 919            smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
 920            smp->class_version == 1) {
 921                /* host view is paravirtualized */
 922                if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
 923                        if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
 924                                index = be32_to_cpu(smp->attr_mod);
 925                                if (port < 1 || port > dev->caps.num_ports)
 926                                        return -EINVAL;
 927                                table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
 928                                                sizeof(*table) * 32, GFP_KERNEL);
 929
 930                                if (!table)
 931                                        return -ENOMEM;
 932                                /* need to get the full pkey table because the paravirtualized
 933                                 * pkeys may be scattered among several pkey blocks.
 934                                 */
 935                                err = get_full_pkey_table(dev, port, table, inbox, outbox);
 936                                if (!err) {
 937                                        for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
 938                                                pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
 939                                                outtab[vidx % 32] = cpu_to_be16(table[pidx]);
 940                                        }
 941                                }
 942                                kfree(table);
 943                                return err;
 944                        }
 945                        if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
 946                                /*get the slave specific caps:*/
 947                                /*do the command */
 948                                smp->attr_mod = cpu_to_be32(port);
 949                                err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
 950                                            port, opcode_modifier,
 951                                            vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 952                                /* modify the response for slaves */
 953                                if (!err && slave != mlx4_master_func_num(dev)) {
 954                                        u8 *state = outsmp->data + PORT_STATE_OFFSET;
 955
 956                                        *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
 957                                        slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
 958                                        memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
 959                                }
 960                                return err;
 961                        }
 962                        if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
 963                                __be64 guid = mlx4_get_admin_guid(dev, slave,
 964                                                                  port);
 965
 966                                /* set the PF admin guid to the FW/HW burned
 967                                 * GUID, if it wasn't yet set
 968                                 */
 969                                if (slave == 0 && guid == 0) {
 970                                        smp->attr_mod = 0;
 971                                        err = mlx4_cmd_box(dev,
 972                                                           inbox->dma,
 973                                                           outbox->dma,
 974                                                           vhcr->in_modifier,
 975                                                           opcode_modifier,
 976                                                           vhcr->op,
 977                                                           MLX4_CMD_TIME_CLASS_C,
 978                                                           MLX4_CMD_NATIVE);
 979                                        if (err)
 980                                                return err;
 981                                        mlx4_set_admin_guid(dev,
 982                                                            *(__be64 *)outsmp->
 983                                                            data, slave, port);
 984                                } else {
 985                                        memcpy(outsmp->data, &guid, 8);
 986                                }
 987
 988                                /* clean all other gids */
 989                                memset(outsmp->data + 8, 0, 56);
 990                                return 0;
 991                        }
 992                        if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
 993                                err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
 994                                             port, opcode_modifier,
 995                                             vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 996                                if (!err) {
 997                                        slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
 998                                        memcpy(outsmp->data + 12, &slave_node_guid, 8);
 999                                }
1000                                return err;
1001                        }
1002                }
1003        }
1004
1005        /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
1006         * These are the MADs used by ib verbs (such as ib_query_gids).
1007         */
1008        if (slave != mlx4_master_func_num(dev) &&
1009            !mlx4_vf_smi_enabled(dev, slave, port)) {
1010                if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
1011                      smp->method == IB_MGMT_METHOD_GET) || network_view) {
1012                        mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1013                                 slave, smp->mgmt_class, smp->method,
1014                                 network_view ? "Network" : "Host",
1015                                 be16_to_cpu(smp->attr_id));
1016                        return -EPERM;
1017                }
1018        }
1019
1020        return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1021                                    vhcr->in_modifier, opcode_modifier,
1022                                    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1023}
1024
1025static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
1026                     struct mlx4_vhcr *vhcr,
1027                     struct mlx4_cmd_mailbox *inbox,
1028                     struct mlx4_cmd_mailbox *outbox,
1029                     struct mlx4_cmd_info *cmd)
1030{
1031        return -EPERM;
1032}
1033
1034int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1035                     struct mlx4_vhcr *vhcr,
1036                     struct mlx4_cmd_mailbox *inbox,
1037                     struct mlx4_cmd_mailbox *outbox,
1038                     struct mlx4_cmd_info *cmd)
1039{
1040        u64 in_param;
1041        u64 out_param;
1042        int err;
1043
1044        in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1045        out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1046        if (cmd->encode_slave_id) {
1047                in_param &= 0xffffffffffffff00ll;
1048                in_param |= slave;
1049        }
1050
1051        err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1052                         vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1053                         MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1054
1055        if (cmd->out_is_imm)
1056                vhcr->out_param = out_param;
1057
1058        return err;
1059}
1060
1061static struct mlx4_cmd_info cmd_info[] = {
1062        {
1063                .opcode = MLX4_CMD_QUERY_FW,
1064                .has_inbox = false,
1065                .has_outbox = true,
1066                .out_is_imm = false,
1067                .encode_slave_id = false,
1068                .verify = NULL,
1069                .wrapper = mlx4_QUERY_FW_wrapper
1070        },
1071        {
1072                .opcode = MLX4_CMD_QUERY_HCA,
1073                .has_inbox = false,
1074                .has_outbox = true,
1075                .out_is_imm = false,
1076                .encode_slave_id = false,
1077                .verify = NULL,
1078                .wrapper = NULL
1079        },
1080        {
1081                .opcode = MLX4_CMD_QUERY_DEV_CAP,
1082                .has_inbox = false,
1083                .has_outbox = true,
1084                .out_is_imm = false,
1085                .encode_slave_id = false,
1086                .verify = NULL,
1087                .wrapper = mlx4_QUERY_DEV_CAP_wrapper
1088        },
1089        {
1090                .opcode = MLX4_CMD_QUERY_FUNC_CAP,
1091                .has_inbox = false,
1092                .has_outbox = true,
1093                .out_is_imm = false,
1094                .encode_slave_id = false,
1095                .verify = NULL,
1096                .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1097        },
1098        {
1099                .opcode = MLX4_CMD_QUERY_ADAPTER,
1100                .has_inbox = false,
1101                .has_outbox = true,
1102                .out_is_imm = false,
1103                .encode_slave_id = false,
1104                .verify = NULL,
1105                .wrapper = NULL
1106        },
1107        {
1108                .opcode = MLX4_CMD_INIT_PORT,
1109                .has_inbox = false,
1110                .has_outbox = false,
1111                .out_is_imm = false,
1112                .encode_slave_id = false,
1113                .verify = NULL,
1114                .wrapper = mlx4_INIT_PORT_wrapper
1115        },
1116        {
1117                .opcode = MLX4_CMD_CLOSE_PORT,
1118                .has_inbox = false,
1119                .has_outbox = false,
1120                .out_is_imm  = false,
1121                .encode_slave_id = false,
1122                .verify = NULL,
1123                .wrapper = mlx4_CLOSE_PORT_wrapper
1124        },
1125        {
1126                .opcode = MLX4_CMD_QUERY_PORT,
1127                .has_inbox = false,
1128                .has_outbox = true,
1129                .out_is_imm = false,
1130                .encode_slave_id = false,
1131                .verify = NULL,
1132                .wrapper = mlx4_QUERY_PORT_wrapper
1133        },
1134        {
1135                .opcode = MLX4_CMD_SET_PORT,
1136                .has_inbox = true,
1137                .has_outbox = false,
1138                .out_is_imm = false,
1139                .encode_slave_id = false,
1140                .verify = NULL,
1141                .wrapper = mlx4_SET_PORT_wrapper
1142        },
1143        {
1144                .opcode = MLX4_CMD_MAP_EQ,
1145                .has_inbox = false,
1146                .has_outbox = false,
1147                .out_is_imm = false,
1148                .encode_slave_id = false,
1149                .verify = NULL,
1150                .wrapper = mlx4_MAP_EQ_wrapper
1151        },
1152        {
1153                .opcode = MLX4_CMD_SW2HW_EQ,
1154                .has_inbox = true,
1155                .has_outbox = false,
1156                .out_is_imm = false,
1157                .encode_slave_id = true,
1158                .verify = NULL,
1159                .wrapper = mlx4_SW2HW_EQ_wrapper
1160        },
1161        {
1162                .opcode = MLX4_CMD_HW_HEALTH_CHECK,
1163                .has_inbox = false,
1164                .has_outbox = false,
1165                .out_is_imm = false,
1166                .encode_slave_id = false,
1167                .verify = NULL,
1168                .wrapper = NULL
1169        },
1170        {
1171                .opcode = MLX4_CMD_NOP,
1172                .has_inbox = false,
1173                .has_outbox = false,
1174                .out_is_imm = false,
1175                .encode_slave_id = false,
1176                .verify = NULL,
1177                .wrapper = NULL
1178        },
1179        {
1180                .opcode = MLX4_CMD_CONFIG_DEV,
1181                .has_inbox = false,
1182                .has_outbox = true,
1183                .out_is_imm = false,
1184                .encode_slave_id = false,
1185                .verify = NULL,
1186                .wrapper = mlx4_CONFIG_DEV_wrapper
1187        },
1188        {
1189                .opcode = MLX4_CMD_ALLOC_RES,
1190                .has_inbox = false,
1191                .has_outbox = false,
1192                .out_is_imm = true,
1193                .encode_slave_id = false,
1194                .verify = NULL,
1195                .wrapper = mlx4_ALLOC_RES_wrapper
1196        },
1197        {
1198                .opcode = MLX4_CMD_FREE_RES,
1199                .has_inbox = false,
1200                .has_outbox = false,
1201                .out_is_imm = false,
1202                .encode_slave_id = false,
1203                .verify = NULL,
1204                .wrapper = mlx4_FREE_RES_wrapper
1205        },
1206        {
1207                .opcode = MLX4_CMD_SW2HW_MPT,
1208                .has_inbox = true,
1209                .has_outbox = false,
1210                .out_is_imm = false,
1211                .encode_slave_id = true,
1212                .verify = NULL,
1213                .wrapper = mlx4_SW2HW_MPT_wrapper
1214        },
1215        {
1216                .opcode = MLX4_CMD_QUERY_MPT,
1217                .has_inbox = false,
1218                .has_outbox = true,
1219                .out_is_imm = false,
1220                .encode_slave_id = false,
1221                .verify = NULL,
1222                .wrapper = mlx4_QUERY_MPT_wrapper
1223        },
1224        {
1225                .opcode = MLX4_CMD_HW2SW_MPT,
1226                .has_inbox = false,
1227                .has_outbox = false,
1228                .out_is_imm = false,
1229                .encode_slave_id = false,
1230                .verify = NULL,
1231                .wrapper = mlx4_HW2SW_MPT_wrapper
1232        },
1233        {
1234                .opcode = MLX4_CMD_READ_MTT,
1235                .has_inbox = false,
1236                .has_outbox = true,
1237                .out_is_imm = false,
1238                .encode_slave_id = false,
1239                .verify = NULL,
1240                .wrapper = NULL
1241        },
1242        {
1243                .opcode = MLX4_CMD_WRITE_MTT,
1244                .has_inbox = true,
1245                .has_outbox = false,
1246                .out_is_imm = false,
1247                .encode_slave_id = false,
1248                .verify = NULL,
1249                .wrapper = mlx4_WRITE_MTT_wrapper
1250        },
1251        {
1252                .opcode = MLX4_CMD_SYNC_TPT,
1253                .has_inbox = true,
1254                .has_outbox = false,
1255                .out_is_imm = false,
1256                .encode_slave_id = false,
1257                .verify = NULL,
1258                .wrapper = NULL
1259        },
1260        {
1261                .opcode = MLX4_CMD_HW2SW_EQ,
1262                .has_inbox = false,
1263                .has_outbox = false,
1264                .out_is_imm = false,
1265                .encode_slave_id = true,
1266                .verify = NULL,
1267                .wrapper = mlx4_HW2SW_EQ_wrapper
1268        },
1269        {
1270                .opcode = MLX4_CMD_QUERY_EQ,
1271                .has_inbox = false,
1272                .has_outbox = true,
1273                .out_is_imm = false,
1274                .encode_slave_id = true,
1275                .verify = NULL,
1276                .wrapper = mlx4_QUERY_EQ_wrapper
1277        },
1278        {
1279                .opcode = MLX4_CMD_SW2HW_CQ,
1280                .has_inbox = true,
1281                .has_outbox = false,
1282                .out_is_imm = false,
1283                .encode_slave_id = true,
1284                .verify = NULL,
1285                .wrapper = mlx4_SW2HW_CQ_wrapper
1286        },
1287        {
1288                .opcode = MLX4_CMD_HW2SW_CQ,
1289                .has_inbox = false,
1290                .has_outbox = false,
1291                .out_is_imm = false,
1292                .encode_slave_id = false,
1293                .verify = NULL,
1294                .wrapper = mlx4_HW2SW_CQ_wrapper
1295        },
1296        {
1297                .opcode = MLX4_CMD_QUERY_CQ,
1298                .has_inbox = false,
1299                .has_outbox = true,
1300                .out_is_imm = false,
1301                .encode_slave_id = false,
1302                .verify = NULL,
1303                .wrapper = mlx4_QUERY_CQ_wrapper
1304        },
1305        {
1306                .opcode = MLX4_CMD_MODIFY_CQ,
1307                .has_inbox = true,
1308                .has_outbox = false,
1309                .out_is_imm = true,
1310                .encode_slave_id = false,
1311                .verify = NULL,
1312                .wrapper = mlx4_MODIFY_CQ_wrapper
1313        },
1314        {
1315                .opcode = MLX4_CMD_SW2HW_SRQ,
1316                .has_inbox = true,
1317                .has_outbox = false,
1318                .out_is_imm = false,
1319                .encode_slave_id = true,
1320                .verify = NULL,
1321                .wrapper = mlx4_SW2HW_SRQ_wrapper
1322        },
1323        {
1324                .opcode = MLX4_CMD_HW2SW_SRQ,
1325                .has_inbox = false,
1326                .has_outbox = false,
1327                .out_is_imm = false,
1328                .encode_slave_id = false,
1329                .verify = NULL,
1330                .wrapper = mlx4_HW2SW_SRQ_wrapper
1331        },
1332        {
1333                .opcode = MLX4_CMD_QUERY_SRQ,
1334                .has_inbox = false,
1335                .has_outbox = true,
1336                .out_is_imm = false,
1337                .encode_slave_id = false,
1338                .verify = NULL,
1339                .wrapper = mlx4_QUERY_SRQ_wrapper
1340        },
1341        {
1342                .opcode = MLX4_CMD_ARM_SRQ,
1343                .has_inbox = false,
1344                .has_outbox = false,
1345                .out_is_imm = false,
1346                .encode_slave_id = false,
1347                .verify = NULL,
1348                .wrapper = mlx4_ARM_SRQ_wrapper
1349        },
1350        {
1351                .opcode = MLX4_CMD_RST2INIT_QP,
1352                .has_inbox = true,
1353                .has_outbox = false,
1354                .out_is_imm = false,
1355                .encode_slave_id = true,
1356                .verify = NULL,
1357                .wrapper = mlx4_RST2INIT_QP_wrapper
1358        },
1359        {
1360                .opcode = MLX4_CMD_INIT2INIT_QP,
1361                .has_inbox = true,
1362                .has_outbox = false,
1363                .out_is_imm = false,
1364                .encode_slave_id = false,
1365                .verify = NULL,
1366                .wrapper = mlx4_INIT2INIT_QP_wrapper
1367        },
1368        {
1369                .opcode = MLX4_CMD_INIT2RTR_QP,
1370                .has_inbox = true,
1371                .has_outbox = false,
1372                .out_is_imm = false,
1373                .encode_slave_id = false,
1374                .verify = NULL,
1375                .wrapper = mlx4_INIT2RTR_QP_wrapper
1376        },
1377        {
1378                .opcode = MLX4_CMD_RTR2RTS_QP,
1379                .has_inbox = true,
1380                .has_outbox = false,
1381                .out_is_imm = false,
1382                .encode_slave_id = false,
1383                .verify = NULL,
1384                .wrapper = mlx4_RTR2RTS_QP_wrapper
1385        },
1386        {
1387                .opcode = MLX4_CMD_RTS2RTS_QP,
1388                .has_inbox = true,
1389                .has_outbox = false,
1390                .out_is_imm = false,
1391                .encode_slave_id = false,
1392                .verify = NULL,
1393                .wrapper = mlx4_RTS2RTS_QP_wrapper
1394        },
1395        {
1396                .opcode = MLX4_CMD_SQERR2RTS_QP,
1397                .has_inbox = true,
1398                .has_outbox = false,
1399                .out_is_imm = false,
1400                .encode_slave_id = false,
1401                .verify = NULL,
1402                .wrapper = mlx4_SQERR2RTS_QP_wrapper
1403        },
1404        {
1405                .opcode = MLX4_CMD_2ERR_QP,
1406                .has_inbox = false,
1407                .has_outbox = false,
1408                .out_is_imm = false,
1409                .encode_slave_id = false,
1410                .verify = NULL,
1411                .wrapper = mlx4_GEN_QP_wrapper
1412        },
1413        {
1414                .opcode = MLX4_CMD_RTS2SQD_QP,
1415                .has_inbox = false,
1416                .has_outbox = false,
1417                .out_is_imm = false,
1418                .encode_slave_id = false,
1419                .verify = NULL,
1420                .wrapper = mlx4_GEN_QP_wrapper
1421        },
1422        {
1423                .opcode = MLX4_CMD_SQD2SQD_QP,
1424                .has_inbox = true,
1425                .has_outbox = false,
1426                .out_is_imm = false,
1427                .encode_slave_id = false,
1428                .verify = NULL,
1429                .wrapper = mlx4_SQD2SQD_QP_wrapper
1430        },
1431        {
1432                .opcode = MLX4_CMD_SQD2RTS_QP,
1433                .has_inbox = true,
1434                .has_outbox = false,
1435                .out_is_imm = false,
1436                .encode_slave_id = false,
1437                .verify = NULL,
1438                .wrapper = mlx4_SQD2RTS_QP_wrapper
1439        },
1440        {
1441                .opcode = MLX4_CMD_2RST_QP,
1442                .has_inbox = false,
1443                .has_outbox = false,
1444                .out_is_imm = false,
1445                .encode_slave_id = false,
1446                .verify = NULL,
1447                .wrapper = mlx4_2RST_QP_wrapper
1448        },
1449        {
1450                .opcode = MLX4_CMD_QUERY_QP,
1451                .has_inbox = false,
1452                .has_outbox = true,
1453                .out_is_imm = false,
1454                .encode_slave_id = false,
1455                .verify = NULL,
1456                .wrapper = mlx4_GEN_QP_wrapper
1457        },
1458        {
1459                .opcode = MLX4_CMD_SUSPEND_QP,
1460                .has_inbox = false,
1461                .has_outbox = false,
1462                .out_is_imm = false,
1463                .encode_slave_id = false,
1464                .verify = NULL,
1465                .wrapper = mlx4_GEN_QP_wrapper
1466        },
1467        {
1468                .opcode = MLX4_CMD_UNSUSPEND_QP,
1469                .has_inbox = false,
1470                .has_outbox = false,
1471                .out_is_imm = false,
1472                .encode_slave_id = false,
1473                .verify = NULL,
1474                .wrapper = mlx4_GEN_QP_wrapper
1475        },
1476        {
1477                .opcode = MLX4_CMD_UPDATE_QP,
1478                .has_inbox = true,
1479                .has_outbox = false,
1480                .out_is_imm = false,
1481                .encode_slave_id = false,
1482                .verify = NULL,
1483                .wrapper = mlx4_UPDATE_QP_wrapper
1484        },
1485        {
1486                .opcode = MLX4_CMD_GET_OP_REQ,
1487                .has_inbox = false,
1488                .has_outbox = false,
1489                .out_is_imm = false,
1490                .encode_slave_id = false,
1491                .verify = NULL,
1492                .wrapper = mlx4_CMD_EPERM_wrapper,
1493        },
1494        {
1495                .opcode = MLX4_CMD_ALLOCATE_VPP,
1496                .has_inbox = false,
1497                .has_outbox = true,
1498                .out_is_imm = false,
1499                .encode_slave_id = false,
1500                .verify = NULL,
1501                .wrapper = mlx4_CMD_EPERM_wrapper,
1502        },
1503        {
1504                .opcode = MLX4_CMD_SET_VPORT_QOS,
1505                .has_inbox = false,
1506                .has_outbox = true,
1507                .out_is_imm = false,
1508                .encode_slave_id = false,
1509                .verify = NULL,
1510                .wrapper = mlx4_CMD_EPERM_wrapper,
1511        },
1512        {
1513                .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1514                .has_inbox = false,
1515                .has_outbox = false,
1516                .out_is_imm = false,
1517                .encode_slave_id = false,
1518                .verify = NULL, /* XXX verify: only demux can do this */
1519                .wrapper = NULL
1520        },
1521        {
1522                .opcode = MLX4_CMD_MAD_IFC,
1523                .has_inbox = true,
1524                .has_outbox = true,
1525                .out_is_imm = false,
1526                .encode_slave_id = false,
1527                .verify = NULL,
1528                .wrapper = mlx4_MAD_IFC_wrapper
1529        },
1530        {
1531                .opcode = MLX4_CMD_MAD_DEMUX,
1532                .has_inbox = false,
1533                .has_outbox = false,
1534                .out_is_imm = false,
1535                .encode_slave_id = false,
1536                .verify = NULL,
1537                .wrapper = mlx4_CMD_EPERM_wrapper
1538        },
1539        {
1540                .opcode = MLX4_CMD_QUERY_IF_STAT,
1541                .has_inbox = false,
1542                .has_outbox = true,
1543                .out_is_imm = false,
1544                .encode_slave_id = false,
1545                .verify = NULL,
1546                .wrapper = mlx4_QUERY_IF_STAT_wrapper
1547        },
1548        {
1549                .opcode = MLX4_CMD_ACCESS_REG,
1550                .has_inbox = true,
1551                .has_outbox = true,
1552                .out_is_imm = false,
1553                .encode_slave_id = false,
1554                .verify = NULL,
1555                .wrapper = mlx4_ACCESS_REG_wrapper,
1556        },
1557        {
1558                .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
1559                .has_inbox = false,
1560                .has_outbox = false,
1561                .out_is_imm = false,
1562                .encode_slave_id = false,
1563                .verify = NULL,
1564                .wrapper = mlx4_CMD_EPERM_wrapper,
1565        },
1566        /* Native multicast commands are not available for guests */
1567        {
1568                .opcode = MLX4_CMD_QP_ATTACH,
1569                .has_inbox = true,
1570                .has_outbox = false,
1571                .out_is_imm = false,
1572                .encode_slave_id = false,
1573                .verify = NULL,
1574                .wrapper = mlx4_QP_ATTACH_wrapper
1575        },
1576        {
1577                .opcode = MLX4_CMD_PROMISC,
1578                .has_inbox = false,
1579                .has_outbox = false,
1580                .out_is_imm = false,
1581                .encode_slave_id = false,
1582                .verify = NULL,
1583                .wrapper = mlx4_PROMISC_wrapper
1584        },
1585        /* Ethernet specific commands */
1586        {
1587                .opcode = MLX4_CMD_SET_VLAN_FLTR,
1588                .has_inbox = true,
1589                .has_outbox = false,
1590                .out_is_imm = false,
1591                .encode_slave_id = false,
1592                .verify = NULL,
1593                .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1594        },
1595        {
1596                .opcode = MLX4_CMD_SET_MCAST_FLTR,
1597                .has_inbox = false,
1598                .has_outbox = false,
1599                .out_is_imm = false,
1600                .encode_slave_id = false,
1601                .verify = NULL,
1602                .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1603        },
1604        {
1605                .opcode = MLX4_CMD_DUMP_ETH_STATS,
1606                .has_inbox = false,
1607                .has_outbox = true,
1608                .out_is_imm = false,
1609                .encode_slave_id = false,
1610                .verify = NULL,
1611                .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1612        },
1613        {
1614                .opcode = MLX4_CMD_INFORM_FLR_DONE,
1615                .has_inbox = false,
1616                .has_outbox = false,
1617                .out_is_imm = false,
1618                .encode_slave_id = false,
1619                .verify = NULL,
1620                .wrapper = NULL
1621        },
1622        /* flow steering commands */
1623        {
1624                .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1625                .has_inbox = true,
1626                .has_outbox = false,
1627                .out_is_imm = true,
1628                .encode_slave_id = false,
1629                .verify = NULL,
1630                .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1631        },
1632        {
1633                .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1634                .has_inbox = false,
1635                .has_outbox = false,
1636                .out_is_imm = false,
1637                .encode_slave_id = false,
1638                .verify = NULL,
1639                .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1640        },
1641        {
1642                .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1643                .has_inbox = false,
1644                .has_outbox = false,
1645                .out_is_imm = false,
1646                .encode_slave_id = false,
1647                .verify = NULL,
1648                .wrapper = mlx4_CMD_EPERM_wrapper
1649        },
1650        {
1651                .opcode = MLX4_CMD_VIRT_PORT_MAP,
1652                .has_inbox = false,
1653                .has_outbox = false,
1654                .out_is_imm = false,
1655                .encode_slave_id = false,
1656                .verify = NULL,
1657                .wrapper = mlx4_CMD_EPERM_wrapper
1658        },
1659};
1660
1661static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1662                                    struct mlx4_vhcr_cmd *in_vhcr)
1663{
1664        struct mlx4_priv *priv = mlx4_priv(dev);
1665        struct mlx4_cmd_info *cmd = NULL;
1666        struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1667        struct mlx4_vhcr *vhcr;
1668        struct mlx4_cmd_mailbox *inbox = NULL;
1669        struct mlx4_cmd_mailbox *outbox = NULL;
1670        u64 in_param;
1671        u64 out_param;
1672        int ret = 0;
1673        int i;
1674        int err = 0;
1675
1676        /* Create sw representation of Virtual HCR */
1677        vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1678        if (!vhcr)
1679                return -ENOMEM;
1680
1681        /* DMA in the vHCR */
1682        if (!in_vhcr) {
1683                ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1684                                      priv->mfunc.master.slave_state[slave].vhcr_dma,
1685                                      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1686                                            MLX4_ACCESS_MEM_ALIGN), 1);
1687                if (ret) {
1688                        if (!(dev->persist->state &
1689                            MLX4_DEVICE_STATE_INTERNAL_ERROR))
1690                                mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1691                                         __func__, ret);
1692                        kfree(vhcr);
1693                        return ret;
1694                }
1695        }
1696
1697        /* Fill SW VHCR fields */
1698        vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1699        vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1700        vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1701        vhcr->token = be16_to_cpu(vhcr_cmd->token);
1702        vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1703        vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1704        vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1705
1706        /* Lookup command */
1707        for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1708                if (vhcr->op == cmd_info[i].opcode) {
1709                        cmd = &cmd_info[i];
1710                        break;
1711                }
1712        }
1713        if (!cmd) {
1714                mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1715                         vhcr->op, slave);
1716                vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1717                goto out_status;
1718        }
1719
1720        /* Read inbox */
1721        if (cmd->has_inbox) {
1722                vhcr->in_param &= INBOX_MASK;
1723                inbox = mlx4_alloc_cmd_mailbox(dev);
1724                if (IS_ERR(inbox)) {
1725                        vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1726                        inbox = NULL;
1727                        goto out_status;
1728                }
1729
1730                ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1731                                      vhcr->in_param,
1732                                      MLX4_MAILBOX_SIZE, 1);
1733                if (ret) {
1734                        if (!(dev->persist->state &
1735                            MLX4_DEVICE_STATE_INTERNAL_ERROR))
1736                                mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1737                                         __func__, cmd->opcode);
1738                        vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1739                        goto out_status;
1740                }
1741        }
1742
1743        /* Apply permission and bound checks if applicable */
1744        if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1745                mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1746                          vhcr->op, slave, vhcr->in_modifier);
1747                vhcr_cmd->status = CMD_STAT_BAD_OP;
1748                goto out_status;
1749        }
1750
1751        /* Allocate outbox */
1752        if (cmd->has_outbox) {
1753                outbox = mlx4_alloc_cmd_mailbox(dev);
1754                if (IS_ERR(outbox)) {
1755                        vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1756                        outbox = NULL;
1757                        goto out_status;
1758                }
1759        }
1760
1761        /* Execute the command! */
1762        if (cmd->wrapper) {
1763                err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1764                                   cmd);
1765                if (cmd->out_is_imm)
1766                        vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1767        } else {
1768                in_param = cmd->has_inbox ? (u64) inbox->dma :
1769                        vhcr->in_param;
1770                out_param = cmd->has_outbox ? (u64) outbox->dma :
1771                        vhcr->out_param;
1772                err = __mlx4_cmd(dev, in_param, &out_param,
1773                                 cmd->out_is_imm, vhcr->in_modifier,
1774                                 vhcr->op_modifier, vhcr->op,
1775                                 MLX4_CMD_TIME_CLASS_A,
1776                                 MLX4_CMD_NATIVE);
1777
1778                if (cmd->out_is_imm) {
1779                        vhcr->out_param = out_param;
1780                        vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1781                }
1782        }
1783
1784        if (err) {
1785                if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
1786                        if (vhcr->op == MLX4_CMD_ALLOC_RES &&
1787                            (vhcr->in_modifier & 0xff) == RES_COUNTER &&
1788                            err == -EDQUOT)
1789                                mlx4_dbg(dev,
1790                                         "Unable to allocate counter for slave %d (%d)\n",
1791                                         slave, err);
1792                        else
1793                                mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1794                                          vhcr->op, slave, vhcr->errno, err);
1795                }
1796                vhcr_cmd->status = mlx4_errno_to_status(err);
1797                goto out_status;
1798        }
1799
1800
1801        /* Write outbox if command completed successfully */
1802        if (cmd->has_outbox && !vhcr_cmd->status) {
1803                ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1804                                      vhcr->out_param,
1805                                      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1806                if (ret) {
1807                        /* If we failed to write back the outbox after the
1808                         *command was successfully executed, we must fail this
1809                         * slave, as it is now in undefined state */
1810                        if (!(dev->persist->state &
1811                            MLX4_DEVICE_STATE_INTERNAL_ERROR))
1812                                mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1813                        goto out;
1814                }
1815        }
1816
1817out_status:
1818        /* DMA back vhcr result */
1819        if (!in_vhcr) {
1820                ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1821                                      priv->mfunc.master.slave_state[slave].vhcr_dma,
1822                                      ALIGN(sizeof(struct mlx4_vhcr),
1823                                            MLX4_ACCESS_MEM_ALIGN),
1824                                      MLX4_CMD_WRAPPED);
1825                if (ret)
1826                        mlx4_err(dev, "%s:Failed writing vhcr result\n",
1827                                 __func__);
1828                else if (vhcr->e_bit &&
1829                         mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1830                                mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1831                                          slave);
1832        }
1833
1834out:
1835        kfree(vhcr);
1836        mlx4_free_cmd_mailbox(dev, inbox);
1837        mlx4_free_cmd_mailbox(dev, outbox);
1838        return ret;
1839}
1840
1841static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1842                                            int slave, int port)
1843{
1844        struct mlx4_vport_oper_state *vp_oper;
1845        struct mlx4_vport_state *vp_admin;
1846        struct mlx4_vf_immed_vlan_work *work;
1847        struct mlx4_dev *dev = &(priv->dev);
1848        int err;
1849        int admin_vlan_ix = NO_INDX;
1850
1851        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1852        vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1853
1854        if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1855            vp_oper->state.default_qos == vp_admin->default_qos &&
1856            vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
1857            vp_oper->state.link_state == vp_admin->link_state &&
1858            vp_oper->state.qos_vport == vp_admin->qos_vport)
1859                return 0;
1860
1861        if (!(priv->mfunc.master.slave_state[slave].active &&
1862              dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1863                /* even if the UPDATE_QP command isn't supported, we still want
1864                 * to set this VF link according to the admin directive
1865                 */
1866                vp_oper->state.link_state = vp_admin->link_state;
1867                return -1;
1868        }
1869
1870        mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1871                 slave, port);
1872        mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1873                 vp_admin->default_vlan, vp_admin->default_qos,
1874                 vp_admin->link_state);
1875
1876        work = kzalloc(sizeof(*work), GFP_KERNEL);
1877        if (!work)
1878                return -ENOMEM;
1879
1880        if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1881                if (MLX4_VGT != vp_admin->default_vlan) {
1882                        err = __mlx4_register_vlan(&priv->dev, port,
1883                                                   vp_admin->default_vlan,
1884                                                   &admin_vlan_ix);
1885                        if (err) {
1886                                kfree(work);
1887                                mlx4_warn(&priv->dev,
1888                                          "No vlan resources slave %d, port %d\n",
1889                                          slave, port);
1890                                return err;
1891                        }
1892                } else {
1893                        admin_vlan_ix = NO_INDX;
1894                }
1895                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1896                mlx4_dbg(&priv->dev,
1897                         "alloc vlan %d idx  %d slave %d port %d\n",
1898                         (int)(vp_admin->default_vlan),
1899                         admin_vlan_ix, slave, port);
1900        }
1901
1902        /* save original vlan ix and vlan id */
1903        work->orig_vlan_id = vp_oper->state.default_vlan;
1904        work->orig_vlan_ix = vp_oper->vlan_idx;
1905
1906        /* handle new qos */
1907        if (vp_oper->state.default_qos != vp_admin->default_qos)
1908                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1909
1910        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1911                vp_oper->vlan_idx = admin_vlan_ix;
1912
1913        vp_oper->state.default_vlan = vp_admin->default_vlan;
1914        vp_oper->state.default_qos = vp_admin->default_qos;
1915        vp_oper->state.vlan_proto = vp_admin->vlan_proto;
1916        vp_oper->state.link_state = vp_admin->link_state;
1917        vp_oper->state.qos_vport = vp_admin->qos_vport;
1918
1919        if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1920                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1921
1922        /* iterate over QPs owned by this slave, using UPDATE_QP */
1923        work->port = port;
1924        work->slave = slave;
1925        work->qos = vp_oper->state.default_qos;
1926        work->qos_vport = vp_oper->state.qos_vport;
1927        work->vlan_id = vp_oper->state.default_vlan;
1928        work->vlan_ix = vp_oper->vlan_idx;
1929        work->vlan_proto = vp_oper->state.vlan_proto;
1930        work->priv = priv;
1931        INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1932        queue_work(priv->mfunc.master.comm_wq, &work->work);
1933
1934        return 0;
1935}
1936
1937static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
1938{
1939        struct mlx4_qos_manager *port_qos_ctl;
1940        struct mlx4_priv *priv = mlx4_priv(dev);
1941
1942        port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
1943        bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
1944
1945        /* Enable only default prio at PF init routine */
1946        set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
1947}
1948
1949static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
1950{
1951        int i;
1952        int err;
1953        int num_vfs;
1954        u16 available_vpp;
1955        u8 vpp_param[MLX4_NUM_UP];
1956        struct mlx4_qos_manager *port_qos;
1957        struct mlx4_priv *priv = mlx4_priv(dev);
1958
1959        err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param);
1960        if (err) {
1961                mlx4_info(dev, "Failed query available VPPs\n");
1962                return;
1963        }
1964
1965        port_qos = &priv->mfunc.master.qos_ctl[port];
1966        num_vfs = (available_vpp /
1967                   bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
1968
1969        for (i = 0; i < MLX4_NUM_UP; i++) {
1970                if (test_bit(i, port_qos->priority_bm))
1971                        vpp_param[i] = num_vfs;
1972        }
1973
1974        err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
1975        if (err) {
1976                mlx4_info(dev, "Failed allocating VPPs\n");
1977                return;
1978        }
1979
1980        /* Query actual allocated VPP, just to make sure */
1981        err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param);
1982        if (err) {
1983                mlx4_info(dev, "Failed query available VPPs\n");
1984                return;
1985        }
1986
1987        port_qos->num_of_qos_vfs = num_vfs;
1988        mlx4_dbg(dev, "Port %d Available VPPs %d\n", port, available_vpp);
1989
1990        for (i = 0; i < MLX4_NUM_UP; i++)
1991                mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
1992                         vpp_param[i]);
1993}
1994
1995static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1996{
1997        int port, err;
1998        struct mlx4_vport_state *vp_admin;
1999        struct mlx4_vport_oper_state *vp_oper;
2000        struct mlx4_slave_state *slave_state =
2001                &priv->mfunc.master.slave_state[slave];
2002        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2003                        &priv->dev, slave);
2004        int min_port = find_first_bit(actv_ports.ports,
2005                                      priv->dev.caps.num_ports) + 1;
2006        int max_port = min_port - 1 +
2007                bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2008
2009        for (port = min_port; port <= max_port; port++) {
2010                if (!test_bit(port - 1, actv_ports.ports))
2011                        continue;
2012                priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2013                        priv->mfunc.master.vf_admin[slave].enable_smi[port];
2014                vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2015                vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2016                if (vp_admin->vlan_proto != htons(ETH_P_8021AD) ||
2017                    slave_state->vst_qinq_supported) {
2018                        vp_oper->state.vlan_proto   = vp_admin->vlan_proto;
2019                        vp_oper->state.default_vlan = vp_admin->default_vlan;
2020                        vp_oper->state.default_qos  = vp_admin->default_qos;
2021                }
2022                vp_oper->state.link_state = vp_admin->link_state;
2023                vp_oper->state.mac        = vp_admin->mac;
2024                vp_oper->state.spoofchk   = vp_admin->spoofchk;
2025                vp_oper->state.tx_rate    = vp_admin->tx_rate;
2026                vp_oper->state.qos_vport  = vp_admin->qos_vport;
2027                vp_oper->state.guid       = vp_admin->guid;
2028
2029                if (MLX4_VGT != vp_admin->default_vlan) {
2030                        err = __mlx4_register_vlan(&priv->dev, port,
2031                                                   vp_admin->default_vlan, &(vp_oper->vlan_idx));
2032                        if (err) {
2033                                vp_oper->vlan_idx = NO_INDX;
2034                                vp_oper->state.default_vlan = MLX4_VGT;
2035                                vp_oper->state.vlan_proto = htons(ETH_P_8021Q);
2036                                mlx4_warn(&priv->dev,
2037                                          "No vlan resources slave %d, port %d\n",
2038                                          slave, port);
2039                                return err;
2040                        }
2041                        mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
2042                                 (int)(vp_oper->state.default_vlan),
2043                                 vp_oper->vlan_idx, slave, port);
2044                }
2045                if (vp_admin->spoofchk) {
2046                        vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
2047                                                               port,
2048                                                               vp_admin->mac);
2049                        if (0 > vp_oper->mac_idx) {
2050                                err = vp_oper->mac_idx;
2051                                vp_oper->mac_idx = NO_INDX;
2052                                mlx4_warn(&priv->dev,
2053                                          "No mac resources slave %d, port %d\n",
2054                                          slave, port);
2055                                return err;
2056                        }
2057                        mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
2058                                 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
2059                }
2060        }
2061        return 0;
2062}
2063
2064static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
2065{
2066        int port;
2067        struct mlx4_vport_oper_state *vp_oper;
2068        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2069                        &priv->dev, slave);
2070        int min_port = find_first_bit(actv_ports.ports,
2071                                      priv->dev.caps.num_ports) + 1;
2072        int max_port = min_port - 1 +
2073                bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2074
2075
2076        for (port = min_port; port <= max_port; port++) {
2077                if (!test_bit(port - 1, actv_ports.ports))
2078                        continue;
2079                priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2080                        MLX4_VF_SMI_DISABLED;
2081                vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2082                if (NO_INDX != vp_oper->vlan_idx) {
2083                        __mlx4_unregister_vlan(&priv->dev,
2084                                               port, vp_oper->state.default_vlan);
2085                        vp_oper->vlan_idx = NO_INDX;
2086                }
2087                if (NO_INDX != vp_oper->mac_idx) {
2088                        __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
2089                        vp_oper->mac_idx = NO_INDX;
2090                }
2091        }
2092        return;
2093}
2094
2095static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
2096                               u16 param, u8 toggle)
2097{
2098        struct mlx4_priv *priv = mlx4_priv(dev);
2099        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2100        u32 reply;
2101        u8 is_going_down = 0;
2102        int i;
2103        unsigned long flags;
2104
2105        slave_state[slave].comm_toggle ^= 1;
2106        reply = (u32) slave_state[slave].comm_toggle << 31;
2107        if (toggle != slave_state[slave].comm_toggle) {
2108                mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2109                          toggle, slave);
2110                goto reset_slave;
2111        }
2112        if (cmd == MLX4_COMM_CMD_RESET) {
2113                mlx4_warn(dev, "Received reset from slave:%d\n", slave);
2114                slave_state[slave].active = false;
2115                slave_state[slave].old_vlan_api = false;
2116                slave_state[slave].vst_qinq_supported = false;
2117                mlx4_master_deactivate_admin_state(priv, slave);
2118                for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
2119                                slave_state[slave].event_eq[i].eqn = -1;
2120                                slave_state[slave].event_eq[i].token = 0;
2121                }
2122                /*check if we are in the middle of FLR process,
2123                if so return "retry" status to the slave*/
2124                if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
2125                        goto inform_slave_state;
2126
2127                mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
2128
2129                /* write the version in the event field */
2130                reply |= mlx4_comm_get_version();
2131
2132                goto reset_slave;
2133        }
2134        /*command from slave in the middle of FLR*/
2135        if (cmd != MLX4_COMM_CMD_RESET &&
2136            MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
2137                mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2138                          slave, cmd);
2139                return;
2140        }
2141
2142        switch (cmd) {
2143        case MLX4_COMM_CMD_VHCR0:
2144                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
2145                        goto reset_slave;
2146                slave_state[slave].vhcr_dma = ((u64) param) << 48;
2147                priv->mfunc.master.slave_state[slave].cookie = 0;
2148                break;
2149        case MLX4_COMM_CMD_VHCR1:
2150                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
2151                        goto reset_slave;
2152                slave_state[slave].vhcr_dma |= ((u64) param) << 32;
2153                break;
2154        case MLX4_COMM_CMD_VHCR2:
2155                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
2156                        goto reset_slave;
2157                slave_state[slave].vhcr_dma |= ((u64) param) << 16;
2158                break;
2159        case MLX4_COMM_CMD_VHCR_EN:
2160                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
2161                        goto reset_slave;
2162                slave_state[slave].vhcr_dma |= param;
2163                if (mlx4_master_activate_admin_state(priv, slave))
2164                                goto reset_slave;
2165                slave_state[slave].active = true;
2166                mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
2167                break;
2168        case MLX4_COMM_CMD_VHCR_POST:
2169                if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
2170                    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2171                        mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2172                                  slave, cmd, slave_state[slave].last_cmd);
2173                        goto reset_slave;
2174                }
2175
2176                mutex_lock(&priv->cmd.slave_cmd_mutex);
2177                if (mlx4_master_process_vhcr(dev, slave, NULL)) {
2178                        mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
2179                                 slave);
2180                        mutex_unlock(&priv->cmd.slave_cmd_mutex);
2181                        goto reset_slave;
2182                }
2183                mutex_unlock(&priv->cmd.slave_cmd_mutex);
2184                break;
2185        default:
2186                mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
2187                goto reset_slave;
2188        }
2189        spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2190        if (!slave_state[slave].is_slave_going_down)
2191                slave_state[slave].last_cmd = cmd;
2192        else
2193                is_going_down = 1;
2194        spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2195        if (is_going_down) {
2196                mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
2197                          cmd, slave);
2198                return;
2199        }
2200        __raw_writel((__force u32) cpu_to_be32(reply),
2201                     &priv->mfunc.comm[slave].slave_read);
2202
2203        return;
2204
2205reset_slave:
2206        /* cleanup any slave resources */
2207        if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2208                mlx4_delete_all_resources_for_slave(dev, slave);
2209
2210        if (cmd != MLX4_COMM_CMD_RESET) {
2211                mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2212                          slave, cmd);
2213                /* Turn on internal error letting slave reset itself immeditaly,
2214                 * otherwise it might take till timeout on command is passed
2215                 */
2216                reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2217        }
2218
2219        spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2220        if (!slave_state[slave].is_slave_going_down)
2221                slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2222        spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2223        /*with slave in the middle of flr, no need to clean resources again.*/
2224inform_slave_state:
2225        memset(&slave_state[slave].event_eq, 0,
2226               sizeof(struct mlx4_slave_event_eq_info));
2227        __raw_writel((__force u32) cpu_to_be32(reply),
2228                     &priv->mfunc.comm[slave].slave_read);
2229        wmb();
2230}
2231
2232/* master command processing */
2233void mlx4_master_comm_channel(struct work_struct *work)
2234{
2235        struct mlx4_mfunc_master_ctx *master =
2236                container_of(work,
2237                             struct mlx4_mfunc_master_ctx,
2238                             comm_work);
2239        struct mlx4_mfunc *mfunc =
2240                container_of(master, struct mlx4_mfunc, master);
2241        struct mlx4_priv *priv =
2242                container_of(mfunc, struct mlx4_priv, mfunc);
2243        struct mlx4_dev *dev = &priv->dev;
2244        u32 lbit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
2245        u32 nmbr_bits;
2246        u32 comm_cmd;
2247        int i, slave;
2248        int toggle;
2249        bool first = true;
2250        int served = 0;
2251        int reported = 0;
2252        u32 slt;
2253
2254        for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++)
2255                lbit_vec[i] = be32_to_cpu(master->comm_arm_bit_vector[i]);
2256        nmbr_bits = dev->persist->num_vfs + 1;
2257        if (++master->next_slave >= nmbr_bits)
2258                master->next_slave = 0;
2259        slave = master->next_slave;
2260        while (true) {
2261                slave = find_next_bit((const unsigned long *)&lbit_vec, nmbr_bits, slave);
2262                if  (!first && slave >= master->next_slave)
2263                        break;
2264                if (slave == nmbr_bits) {
2265                        if (!first)
2266                                break;
2267                        first = false;
2268                        slave = 0;
2269                        continue;
2270                }
2271                ++reported;
2272                comm_cmd = swab32(readl(&mfunc->comm[slave].slave_write));
2273                slt = swab32(readl(&mfunc->comm[slave].slave_read)) >> 31;
2274                toggle = comm_cmd >> 31;
2275                if (toggle != slt) {
2276                        if (master->slave_state[slave].comm_toggle
2277                            != slt) {
2278                                pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2279                                        slave, slt,
2280                                        master->slave_state[slave].comm_toggle);
2281                                master->slave_state[slave].comm_toggle =
2282                                        slt;
2283                        }
2284                        mlx4_master_do_cmd(dev, slave,
2285                                           comm_cmd >> 16 & 0xff,
2286                                           comm_cmd & 0xffff, toggle);
2287                        ++served;
2288                }
2289                slave++;
2290        }
2291
2292        if (reported && reported != served)
2293                mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
2294                          reported, served);
2295
2296        if (mlx4_ARM_COMM_CHANNEL(dev))
2297                mlx4_warn(dev, "Failed to arm comm channel events\n");
2298}
2299
2300static int sync_toggles(struct mlx4_dev *dev)
2301{
2302        struct mlx4_priv *priv = mlx4_priv(dev);
2303        u32 wr_toggle;
2304        u32 rd_toggle;
2305        unsigned long end;
2306
2307        wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2308        if (wr_toggle == 0xffffffff)
2309                end = jiffies + msecs_to_jiffies(30000);
2310        else
2311                end = jiffies + msecs_to_jiffies(5000);
2312
2313        while (time_before(jiffies, end)) {
2314                rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2315                if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2316                        /* PCI might be offline */
2317
2318                        /* If device removal has been requested,
2319                         * do not continue retrying.
2320                         */
2321                        if (dev->persist->interface_state &
2322                            MLX4_INTERFACE_STATE_NOWAIT) {
2323                                mlx4_warn(dev,
2324                                          "communication channel is offline\n");
2325                                return -EIO;
2326                        }
2327
2328                        msleep(100);
2329                        wr_toggle = swab32(readl(&priv->mfunc.comm->
2330                                           slave_write));
2331                        continue;
2332                }
2333
2334                if (rd_toggle >> 31 == wr_toggle >> 31) {
2335                        priv->cmd.comm_toggle = rd_toggle >> 31;
2336                        return 0;
2337                }
2338
2339                cond_resched();
2340        }
2341
2342        /*
2343         * we could reach here if for example the previous VM using this
2344         * function misbehaved and left the channel with unsynced state. We
2345         * should fix this here and give this VM a chance to use a properly
2346         * synced channel
2347         */
2348        mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2349        __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2350        __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2351        priv->cmd.comm_toggle = 0;
2352
2353        return 0;
2354}
2355
2356int mlx4_multi_func_init(struct mlx4_dev *dev)
2357{
2358        struct mlx4_priv *priv = mlx4_priv(dev);
2359        struct mlx4_slave_state *s_state;
2360        int i, j, err, port;
2361
2362        if (mlx4_is_master(dev))
2363                priv->mfunc.comm =
2364                ioremap(pci_resource_start(dev->persist->pdev,
2365                                           priv->fw.comm_bar) +
2366                        priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2367        else
2368                priv->mfunc.comm =
2369                ioremap(pci_resource_start(dev->persist->pdev, 2) +
2370                        MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2371        if (!priv->mfunc.comm) {
2372                mlx4_err(dev, "Couldn't map communication vector\n");
2373                goto err_vhcr;
2374        }
2375
2376        if (mlx4_is_master(dev)) {
2377                struct mlx4_vf_oper_state *vf_oper;
2378                struct mlx4_vf_admin_state *vf_admin;
2379
2380                priv->mfunc.master.slave_state =
2381                        kcalloc(dev->num_slaves,
2382                                sizeof(struct mlx4_slave_state),
2383                                GFP_KERNEL);
2384                if (!priv->mfunc.master.slave_state)
2385                        goto err_comm;
2386
2387                priv->mfunc.master.vf_admin =
2388                        kcalloc(dev->num_slaves,
2389                                sizeof(struct mlx4_vf_admin_state),
2390                                GFP_KERNEL);
2391                if (!priv->mfunc.master.vf_admin)
2392                        goto err_comm_admin;
2393
2394                priv->mfunc.master.vf_oper =
2395                        kcalloc(dev->num_slaves,
2396                                sizeof(struct mlx4_vf_oper_state),
2397                                GFP_KERNEL);
2398                if (!priv->mfunc.master.vf_oper)
2399                        goto err_comm_oper;
2400
2401                priv->mfunc.master.next_slave = 0;
2402
2403                for (i = 0; i < dev->num_slaves; ++i) {
2404                        vf_admin = &priv->mfunc.master.vf_admin[i];
2405                        vf_oper = &priv->mfunc.master.vf_oper[i];
2406                        s_state = &priv->mfunc.master.slave_state[i];
2407                        s_state->last_cmd = MLX4_COMM_CMD_RESET;
2408                        s_state->vst_qinq_supported = false;
2409                        mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2410                        for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2411                                s_state->event_eq[j].eqn = -1;
2412                        __raw_writel((__force u32) 0,
2413                                     &priv->mfunc.comm[i].slave_write);
2414                        __raw_writel((__force u32) 0,
2415                                     &priv->mfunc.comm[i].slave_read);
2416                        for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2417                                struct mlx4_vport_state *admin_vport;
2418                                struct mlx4_vport_state *oper_vport;
2419
2420                                s_state->vlan_filter[port] =
2421                                        kzalloc(sizeof(struct mlx4_vlan_fltr),
2422                                                GFP_KERNEL);
2423                                if (!s_state->vlan_filter[port]) {
2424                                        if (--port)
2425                                                kfree(s_state->vlan_filter[port]);
2426                                        goto err_slaves;
2427                                }
2428
2429                                admin_vport = &vf_admin->vport[port];
2430                                oper_vport = &vf_oper->vport[port].state;
2431                                INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2432                                admin_vport->default_vlan = MLX4_VGT;
2433                                oper_vport->default_vlan = MLX4_VGT;
2434                                admin_vport->qos_vport =
2435                                                MLX4_VPP_DEFAULT_VPORT;
2436                                oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
2437                                admin_vport->vlan_proto = htons(ETH_P_8021Q);
2438                                oper_vport->vlan_proto = htons(ETH_P_8021Q);
2439                                vf_oper->vport[port].vlan_idx = NO_INDX;
2440                                vf_oper->vport[port].mac_idx = NO_INDX;
2441                                mlx4_set_random_admin_guid(dev, i, port);
2442                        }
2443                        spin_lock_init(&s_state->lock);
2444                }
2445
2446                if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
2447                        for (port = 1; port <= dev->caps.num_ports; port++) {
2448                                if (mlx4_is_eth(dev, port)) {
2449                                        mlx4_set_default_port_qos(dev, port);
2450                                        mlx4_allocate_port_vpps(dev, port);
2451                                }
2452                        }
2453                }
2454
2455                memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
2456                priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2457                INIT_WORK(&priv->mfunc.master.comm_work,
2458                          mlx4_master_comm_channel);
2459                INIT_WORK(&priv->mfunc.master.slave_event_work,
2460                          mlx4_gen_slave_eqe);
2461                INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2462                          mlx4_master_handle_slave_flr);
2463                spin_lock_init(&priv->mfunc.master.slave_state_lock);
2464                spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2465                priv->mfunc.master.comm_wq =
2466                        create_singlethread_workqueue("mlx4_comm");
2467                if (!priv->mfunc.master.comm_wq)
2468                        goto err_slaves;
2469
2470                if (mlx4_init_resource_tracker(dev))
2471                        goto err_thread;
2472
2473        } else {
2474                err = sync_toggles(dev);
2475                if (err) {
2476                        mlx4_err(dev, "Couldn't sync toggles\n");
2477                        goto err_comm;
2478                }
2479        }
2480        return 0;
2481
2482err_thread:
2483        flush_workqueue(priv->mfunc.master.comm_wq);
2484        destroy_workqueue(priv->mfunc.master.comm_wq);
2485err_slaves:
2486        while (i--) {
2487                for (port = 1; port <= MLX4_MAX_PORTS; port++)
2488                        kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2489        }
2490        kfree(priv->mfunc.master.vf_oper);
2491err_comm_oper:
2492        kfree(priv->mfunc.master.vf_admin);
2493err_comm_admin:
2494        kfree(priv->mfunc.master.slave_state);
2495err_comm:
2496        iounmap(priv->mfunc.comm);
2497        priv->mfunc.comm = NULL;
2498err_vhcr:
2499        dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2500                          priv->mfunc.vhcr,
2501                          priv->mfunc.vhcr_dma);
2502        priv->mfunc.vhcr = NULL;
2503        return -ENOMEM;
2504}
2505
2506int mlx4_cmd_init(struct mlx4_dev *dev)
2507{
2508        struct mlx4_priv *priv = mlx4_priv(dev);
2509        int flags = 0;
2510
2511        if (!priv->cmd.initialized) {
2512                init_rwsem(&priv->cmd.switch_sem);
2513                mutex_init(&priv->cmd.slave_cmd_mutex);
2514                sema_init(&priv->cmd.poll_sem, 1);
2515                priv->cmd.use_events = 0;
2516                priv->cmd.toggle     = 1;
2517                priv->cmd.initialized = 1;
2518                flags |= MLX4_CMD_CLEANUP_STRUCT;
2519        }
2520
2521        if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2522                priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2523                                        0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2524                if (!priv->cmd.hcr) {
2525                        mlx4_err(dev, "Couldn't map command register\n");
2526                        goto err;
2527                }
2528                flags |= MLX4_CMD_CLEANUP_HCR;
2529        }
2530
2531        if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2532                priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2533                                                      PAGE_SIZE,
2534                                                      &priv->mfunc.vhcr_dma,
2535                                                      GFP_KERNEL);
2536                if (!priv->mfunc.vhcr)
2537                        goto err;
2538
2539                flags |= MLX4_CMD_CLEANUP_VHCR;
2540        }
2541
2542        if (!priv->cmd.pool) {
2543                priv->cmd.pool = dma_pool_create("mlx4_cmd",
2544                                                 &dev->persist->pdev->dev,
2545                                                 MLX4_MAILBOX_SIZE,
2546                                                 MLX4_MAILBOX_SIZE, 0);
2547                if (!priv->cmd.pool)
2548                        goto err;
2549
2550                flags |= MLX4_CMD_CLEANUP_POOL;
2551        }
2552
2553        return 0;
2554
2555err:
2556        mlx4_cmd_cleanup(dev, flags);
2557        return -ENOMEM;
2558}
2559
2560void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2561{
2562        struct mlx4_priv *priv = mlx4_priv(dev);
2563        int slave;
2564        u32 slave_read;
2565
2566        /* If the comm channel has not yet been initialized,
2567         * skip reporting the internal error event to all
2568         * the communication channels.
2569         */
2570        if (!priv->mfunc.comm)
2571                return;
2572
2573        /* Report an internal error event to all
2574         * communication channels.
2575         */
2576        for (slave = 0; slave < dev->num_slaves; slave++) {
2577                slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2578                slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2579                __raw_writel((__force u32)cpu_to_be32(slave_read),
2580                             &priv->mfunc.comm[slave].slave_read);
2581        }
2582}
2583
2584void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2585{
2586        struct mlx4_priv *priv = mlx4_priv(dev);
2587        int i, port;
2588
2589        if (mlx4_is_master(dev)) {
2590                flush_workqueue(priv->mfunc.master.comm_wq);
2591                destroy_workqueue(priv->mfunc.master.comm_wq);
2592                for (i = 0; i < dev->num_slaves; i++) {
2593                        for (port = 1; port <= MLX4_MAX_PORTS; port++)
2594                                kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2595                }
2596                kfree(priv->mfunc.master.slave_state);
2597                kfree(priv->mfunc.master.vf_admin);
2598                kfree(priv->mfunc.master.vf_oper);
2599                dev->num_slaves = 0;
2600        }
2601
2602        iounmap(priv->mfunc.comm);
2603        priv->mfunc.comm = NULL;
2604}
2605
2606void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2607{
2608        struct mlx4_priv *priv = mlx4_priv(dev);
2609
2610        if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2611                dma_pool_destroy(priv->cmd.pool);
2612                priv->cmd.pool = NULL;
2613        }
2614
2615        if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2616            (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2617                iounmap(priv->cmd.hcr);
2618                priv->cmd.hcr = NULL;
2619        }
2620        if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2621            (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2622                dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2623                                  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2624                priv->mfunc.vhcr = NULL;
2625        }
2626        if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2627                priv->cmd.initialized = 0;
2628}
2629
2630/*
2631 * Switch to using events to issue FW commands (can only be called
2632 * after event queue for command events has been initialized).
2633 */
2634int mlx4_cmd_use_events(struct mlx4_dev *dev)
2635{
2636        struct mlx4_priv *priv = mlx4_priv(dev);
2637        int i;
2638        int err = 0;
2639
2640        priv->cmd.context = kmalloc_array(priv->cmd.max_cmds,
2641                                          sizeof(struct mlx4_cmd_context),
2642                                          GFP_KERNEL);
2643        if (!priv->cmd.context)
2644                return -ENOMEM;
2645
2646        if (mlx4_is_mfunc(dev))
2647                mutex_lock(&priv->cmd.slave_cmd_mutex);
2648        down_write(&priv->cmd.switch_sem);
2649        for (i = 0; i < priv->cmd.max_cmds; ++i) {
2650                priv->cmd.context[i].token = i;
2651                priv->cmd.context[i].next  = i + 1;
2652                /* To support fatal error flow, initialize all
2653                 * cmd contexts to allow simulating completions
2654                 * with complete() at any time.
2655                 */
2656                init_completion(&priv->cmd.context[i].done);
2657        }
2658
2659        priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2660        priv->cmd.free_head = 0;
2661
2662        sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2663
2664        for (priv->cmd.token_mask = 1;
2665             priv->cmd.token_mask < priv->cmd.max_cmds;
2666             priv->cmd.token_mask <<= 1)
2667                ; /* nothing */
2668        --priv->cmd.token_mask;
2669
2670        down(&priv->cmd.poll_sem);
2671        priv->cmd.use_events = 1;
2672        up_write(&priv->cmd.switch_sem);
2673        if (mlx4_is_mfunc(dev))
2674                mutex_unlock(&priv->cmd.slave_cmd_mutex);
2675
2676        return err;
2677}
2678
2679/*
2680 * Switch back to polling (used when shutting down the device)
2681 */
2682void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2683{
2684        struct mlx4_priv *priv = mlx4_priv(dev);
2685        int i;
2686
2687        if (mlx4_is_mfunc(dev))
2688                mutex_lock(&priv->cmd.slave_cmd_mutex);
2689        down_write(&priv->cmd.switch_sem);
2690        priv->cmd.use_events = 0;
2691
2692        for (i = 0; i < priv->cmd.max_cmds; ++i)
2693                down(&priv->cmd.event_sem);
2694
2695        kfree(priv->cmd.context);
2696        priv->cmd.context = NULL;
2697
2698        up(&priv->cmd.poll_sem);
2699        up_write(&priv->cmd.switch_sem);
2700        if (mlx4_is_mfunc(dev))
2701                mutex_unlock(&priv->cmd.slave_cmd_mutex);
2702}
2703
2704struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2705{
2706        struct mlx4_cmd_mailbox *mailbox;
2707
2708        mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL);
2709        if (!mailbox)
2710                return ERR_PTR(-ENOMEM);
2711
2712        mailbox->buf = dma_pool_zalloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2713                                       &mailbox->dma);
2714        if (!mailbox->buf) {
2715                kfree(mailbox);
2716                return ERR_PTR(-ENOMEM);
2717        }
2718
2719        return mailbox;
2720}
2721EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2722
2723void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2724                           struct mlx4_cmd_mailbox *mailbox)
2725{
2726        if (!mailbox)
2727                return;
2728
2729        dma_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2730        kfree(mailbox);
2731}
2732EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2733
2734u32 mlx4_comm_get_version(void)
2735{
2736         return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2737}
2738
2739static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2740{
2741        if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2742                mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2743                         vf, dev->persist->num_vfs);
2744                return -EINVAL;
2745        }
2746
2747        return vf+1;
2748}
2749
2750int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2751{
2752        if (slave < 1 || slave > dev->persist->num_vfs) {
2753                mlx4_err(dev,
2754                         "Bad slave number:%d (number of activated slaves: %lu)\n",
2755                         slave, dev->num_slaves);
2756                return -EINVAL;
2757        }
2758        return slave - 1;
2759}
2760
2761void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2762{
2763        struct mlx4_priv *priv = mlx4_priv(dev);
2764        struct mlx4_cmd_context *context;
2765        int i;
2766
2767        spin_lock(&priv->cmd.context_lock);
2768        if (priv->cmd.context) {
2769                for (i = 0; i < priv->cmd.max_cmds; ++i) {
2770                        context = &priv->cmd.context[i];
2771                        context->fw_status = CMD_STAT_INTERNAL_ERR;
2772                        context->result    =
2773                                mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2774                        complete(&context->done);
2775                }
2776        }
2777        spin_unlock(&priv->cmd.context_lock);
2778}
2779
2780struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2781{
2782        struct mlx4_active_ports actv_ports;
2783        int vf;
2784
2785        bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2786
2787        if (slave == 0) {
2788                bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2789                return actv_ports;
2790        }
2791
2792        vf = mlx4_get_vf_indx(dev, slave);
2793        if (vf < 0)
2794                return actv_ports;
2795
2796        bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2797                   min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2798                   dev->caps.num_ports));
2799
2800        return actv_ports;
2801}
2802EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2803
2804int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2805{
2806        unsigned n;
2807        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2808        unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2809
2810        if (port <= 0 || port > m)
2811                return -EINVAL;
2812
2813        n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2814        if (port <= n)
2815                port = n + 1;
2816
2817        return port;
2818}
2819EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2820
2821int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2822{
2823        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2824        if (test_bit(port - 1, actv_ports.ports))
2825                return port -
2826                        find_first_bit(actv_ports.ports, dev->caps.num_ports);
2827
2828        return -1;
2829}
2830EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2831
2832struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2833                                                   int port)
2834{
2835        unsigned i;
2836        struct mlx4_slaves_pport slaves_pport;
2837
2838        bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2839
2840        if (port <= 0 || port > dev->caps.num_ports)
2841                return slaves_pport;
2842
2843        for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2844                struct mlx4_active_ports actv_ports =
2845                        mlx4_get_active_ports(dev, i);
2846                if (test_bit(port - 1, actv_ports.ports))
2847                        set_bit(i, slaves_pport.slaves);
2848        }
2849
2850        return slaves_pport;
2851}
2852EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2853
2854struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2855                struct mlx4_dev *dev,
2856                const struct mlx4_active_ports *crit_ports)
2857{
2858        unsigned i;
2859        struct mlx4_slaves_pport slaves_pport;
2860
2861        bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2862
2863        for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2864                struct mlx4_active_ports actv_ports =
2865                        mlx4_get_active_ports(dev, i);
2866                if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2867                                 dev->caps.num_ports))
2868                        set_bit(i, slaves_pport.slaves);
2869        }
2870
2871        return slaves_pport;
2872}
2873EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2874
2875static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2876{
2877        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2878        int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2879                        + 1;
2880        int max_port = min_port +
2881                bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2882
2883        if (port < min_port)
2884                port = min_port;
2885        else if (port >= max_port)
2886                port = max_port - 1;
2887
2888        return port;
2889}
2890
2891static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
2892                              int max_tx_rate)
2893{
2894        int i;
2895        int err;
2896        struct mlx4_qos_manager *port_qos;
2897        struct mlx4_dev *dev = &priv->dev;
2898        struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
2899
2900        port_qos = &priv->mfunc.master.qos_ctl[port];
2901        memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
2902
2903        if (slave > port_qos->num_of_qos_vfs) {
2904                mlx4_info(dev, "No available VPP resources for this VF\n");
2905                return -EINVAL;
2906        }
2907
2908        /* Query for default QoS values from Vport 0 is needed */
2909        err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
2910        if (err) {
2911                mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
2912                return err;
2913        }
2914
2915        for (i = 0; i < MLX4_NUM_UP; i++) {
2916                if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
2917                        vpp_qos[i].max_avg_bw = max_tx_rate;
2918                        vpp_qos[i].enable = 1;
2919                } else {
2920                        /* if user supplied tx_rate == 0, meaning no rate limit
2921                         * configuration is required. so we are leaving the
2922                         * value of max_avg_bw as queried from Vport 0.
2923                         */
2924                        vpp_qos[i].enable = 0;
2925                }
2926        }
2927
2928        err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
2929        if (err) {
2930                mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
2931                return err;
2932        }
2933
2934        return 0;
2935}
2936
2937static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
2938                                        struct mlx4_vport_state *vf_admin)
2939{
2940        struct mlx4_qos_manager *info;
2941        struct mlx4_priv *priv = mlx4_priv(dev);
2942
2943        if (!mlx4_is_master(dev) ||
2944            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2945                return false;
2946
2947        info = &priv->mfunc.master.qos_ctl[port];
2948
2949        if (vf_admin->default_vlan != MLX4_VGT &&
2950            test_bit(vf_admin->default_qos, info->priority_bm))
2951                return true;
2952
2953        return false;
2954}
2955
2956static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
2957                                       struct mlx4_vport_state *vf_admin,
2958                                       int vlan, int qos)
2959{
2960        struct mlx4_vport_state dummy_admin = {0};
2961
2962        if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
2963            !vf_admin->tx_rate)
2964                return true;
2965
2966        dummy_admin.default_qos = qos;
2967        dummy_admin.default_vlan = vlan;
2968
2969        /* VF wants to move to other VST state which is valid with current
2970         * rate limit. Either differnt default vlan in VST or other
2971         * supported QoS priority. Otherwise we don't allow this change when
2972         * the TX rate is still configured.
2973         */
2974        if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
2975                return true;
2976
2977        mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
2978                  (vlan == MLX4_VGT) ? "VGT" : "VST");
2979
2980        if (vlan != MLX4_VGT)
2981                mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
2982
2983        mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
2984
2985        return false;
2986}
2987
2988int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
2989{
2990        struct mlx4_priv *priv = mlx4_priv(dev);
2991        struct mlx4_vport_state *s_info;
2992        int slave;
2993
2994        if (!mlx4_is_master(dev))
2995                return -EPROTONOSUPPORT;
2996
2997        if (is_multicast_ether_addr(mac))
2998                return -EINVAL;
2999
3000        slave = mlx4_get_slave_indx(dev, vf);
3001        if (slave < 0)
3002                return -EINVAL;
3003
3004        port = mlx4_slaves_closest_port(dev, slave, port);
3005        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3006
3007        if (s_info->spoofchk && is_zero_ether_addr(mac)) {
3008                mlx4_info(dev, "MAC invalidation is not allowed when spoofchk is on\n");
3009                return -EPERM;
3010        }
3011
3012        s_info->mac = mlx4_mac_to_u64(mac);
3013        mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
3014                  vf, port, s_info->mac);
3015        return 0;
3016}
3017EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
3018
3019
3020int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos,
3021                     __be16 proto)
3022{
3023        struct mlx4_priv *priv = mlx4_priv(dev);
3024        struct mlx4_vport_state *vf_admin;
3025        struct mlx4_slave_state *slave_state;
3026        struct mlx4_vport_oper_state *vf_oper;
3027        int slave;
3028
3029        if ((!mlx4_is_master(dev)) ||
3030            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
3031                return -EPROTONOSUPPORT;
3032
3033        if ((vlan > 4095) || (qos > 7))
3034                return -EINVAL;
3035
3036        if (proto == htons(ETH_P_8021AD) &&
3037            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP))
3038                return -EPROTONOSUPPORT;
3039
3040        if (proto != htons(ETH_P_8021Q) &&
3041            proto != htons(ETH_P_8021AD))
3042                return -EINVAL;
3043
3044        if ((proto == htons(ETH_P_8021AD)) &&
3045            ((vlan == 0) || (vlan == MLX4_VGT)))
3046                return -EINVAL;
3047
3048        slave = mlx4_get_slave_indx(dev, vf);
3049        if (slave < 0)
3050                return -EINVAL;
3051
3052        slave_state = &priv->mfunc.master.slave_state[slave];
3053        if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) &&
3054            (!slave_state->vst_qinq_supported)) {
3055                mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf);
3056                return -EPROTONOSUPPORT;
3057        }
3058        port = mlx4_slaves_closest_port(dev, slave, port);
3059        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3060        vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3061
3062        if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
3063                return -EPERM;
3064
3065        if ((0 == vlan) && (0 == qos))
3066                vf_admin->default_vlan = MLX4_VGT;
3067        else
3068                vf_admin->default_vlan = vlan;
3069        vf_admin->default_qos = qos;
3070        vf_admin->vlan_proto = proto;
3071
3072        /* If rate was configured prior to VST, we saved the configured rate
3073         * in vf_admin->rate and now, if priority supported we enforce the QoS
3074         */
3075        if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
3076            vf_admin->tx_rate)
3077                vf_admin->qos_vport = slave;
3078
3079        /* Try to activate new vf state without restart,
3080         * this option is not supported while moving to VST QinQ mode.
3081         */
3082        if ((proto == htons(ETH_P_8021AD) &&
3083             vf_oper->state.vlan_proto != proto) ||
3084            mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3085                mlx4_info(dev,
3086                          "updating vf %d port %d config will take effect on next VF restart\n",
3087                          vf, port);
3088        return 0;
3089}
3090EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
3091
3092int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
3093                     int max_tx_rate)
3094{
3095        int err;
3096        int slave;
3097        struct mlx4_vport_state *vf_admin;
3098        struct mlx4_priv *priv = mlx4_priv(dev);
3099
3100        if (!mlx4_is_master(dev) ||
3101            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
3102                return -EPROTONOSUPPORT;
3103
3104        if (min_tx_rate) {
3105                mlx4_info(dev, "Minimum BW share not supported\n");
3106                return -EPROTONOSUPPORT;
3107        }
3108
3109        slave = mlx4_get_slave_indx(dev, vf);
3110        if (slave < 0)
3111                return -EINVAL;
3112
3113        port = mlx4_slaves_closest_port(dev, slave, port);
3114        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3115
3116        err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
3117        if (err) {
3118                mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
3119                          max_tx_rate);
3120                return err;
3121        }
3122
3123        vf_admin->tx_rate = max_tx_rate;
3124        /* if VF is not in supported mode (VST with supported prio),
3125         * we do not change vport configuration for its QPs, but save
3126         * the rate, so it will be enforced when it moves to supported
3127         * mode next time.
3128         */
3129        if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
3130                mlx4_info(dev,
3131                          "rate set for VF %d when not in valid state\n", vf);
3132
3133                if (vf_admin->default_vlan != MLX4_VGT)
3134                        mlx4_info(dev, "VST priority not supported by QoS\n");
3135                else
3136                        mlx4_info(dev, "VF in VGT mode (needed VST)\n");
3137
3138                mlx4_info(dev,
3139                          "rate %d take affect when VF moves to valid state\n",
3140                          max_tx_rate);
3141                return 0;
3142        }
3143
3144        /* If user sets rate 0 assigning default vport for its QPs */
3145        vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
3146
3147        if (priv->mfunc.master.slave_state[slave].active &&
3148            dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
3149                mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
3150
3151        return 0;
3152}
3153EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
3154
3155 /* mlx4_get_slave_default_vlan -
3156 * return true if VST ( default vlan)
3157 * if VST, will return vlan & qos (if not NULL)
3158 */
3159bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
3160                                 u16 *vlan, u8 *qos)
3161{
3162        struct mlx4_vport_oper_state *vp_oper;
3163        struct mlx4_priv *priv;
3164
3165        priv = mlx4_priv(dev);
3166        port = mlx4_slaves_closest_port(dev, slave, port);
3167        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3168
3169        if (MLX4_VGT != vp_oper->state.default_vlan) {
3170                if (vlan)
3171                        *vlan = vp_oper->state.default_vlan;
3172                if (qos)
3173                        *qos = vp_oper->state.default_qos;
3174                return true;
3175        }
3176        return false;
3177}
3178EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
3179
3180int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
3181{
3182        struct mlx4_priv *priv = mlx4_priv(dev);
3183        struct mlx4_vport_state *s_info;
3184        int slave;
3185        u8 mac[ETH_ALEN];
3186
3187        if ((!mlx4_is_master(dev)) ||
3188            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
3189                return -EPROTONOSUPPORT;
3190
3191        slave = mlx4_get_slave_indx(dev, vf);
3192        if (slave < 0)
3193                return -EINVAL;
3194
3195        port = mlx4_slaves_closest_port(dev, slave, port);
3196        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3197
3198        mlx4_u64_to_mac(mac, s_info->mac);
3199        if (setting && !is_valid_ether_addr(mac)) {
3200                mlx4_info(dev, "Illegal MAC with spoofchk\n");
3201                return -EPERM;
3202        }
3203
3204        s_info->spoofchk = setting;
3205
3206        return 0;
3207}
3208EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
3209
3210int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
3211{
3212        struct mlx4_priv *priv = mlx4_priv(dev);
3213        struct mlx4_vport_state *s_info;
3214        int slave;
3215
3216        if (!mlx4_is_master(dev))
3217                return -EPROTONOSUPPORT;
3218
3219        slave = mlx4_get_slave_indx(dev, vf);
3220        if (slave < 0)
3221                return -EINVAL;
3222
3223        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3224        ivf->vf = vf;
3225
3226        /* need to convert it to a func */
3227        ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
3228        ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
3229        ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
3230        ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
3231        ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
3232        ivf->mac[5] = ((s_info->mac)  & 0xff);
3233
3234        ivf->vlan               = s_info->default_vlan;
3235        ivf->qos                = s_info->default_qos;
3236        ivf->vlan_proto         = s_info->vlan_proto;
3237
3238        if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
3239                ivf->max_tx_rate = s_info->tx_rate;
3240        else
3241                ivf->max_tx_rate = 0;
3242
3243        ivf->min_tx_rate        = 0;
3244        ivf->spoofchk           = s_info->spoofchk;
3245        ivf->linkstate          = s_info->link_state;
3246
3247        return 0;
3248}
3249EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
3250
3251int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
3252{
3253        struct mlx4_priv *priv = mlx4_priv(dev);
3254        struct mlx4_vport_state *s_info;
3255        int slave;
3256        u8 link_stat_event;
3257
3258        slave = mlx4_get_slave_indx(dev, vf);
3259        if (slave < 0)
3260                return -EINVAL;
3261
3262        port = mlx4_slaves_closest_port(dev, slave, port);
3263        switch (link_state) {
3264        case IFLA_VF_LINK_STATE_AUTO:
3265                /* get current link state */
3266                if (!priv->sense.do_sense_port[port])
3267                        link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3268                else
3269                        link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3270            break;
3271
3272        case IFLA_VF_LINK_STATE_ENABLE:
3273                link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3274            break;
3275
3276        case IFLA_VF_LINK_STATE_DISABLE:
3277                link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3278            break;
3279
3280        default:
3281                mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
3282                          link_state, slave, port);
3283                return -EINVAL;
3284        }
3285        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3286        s_info->link_state = link_state;
3287
3288        /* send event */
3289        mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
3290
3291        if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3292                mlx4_dbg(dev,
3293                         "updating vf %d port %d no link state HW enforcement\n",
3294                         vf, port);
3295        return 0;
3296}
3297EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
3298
3299int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
3300                           struct mlx4_counter *counter_stats, int reset)
3301{
3302        struct mlx4_cmd_mailbox *mailbox = NULL;
3303        struct mlx4_counter *tmp_counter;
3304        int err;
3305        u32 if_stat_in_mod;
3306
3307        if (!counter_stats)
3308                return -EINVAL;
3309
3310        if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
3311                return 0;
3312
3313        mailbox = mlx4_alloc_cmd_mailbox(dev);
3314        if (IS_ERR(mailbox))
3315                return PTR_ERR(mailbox);
3316
3317        memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
3318        if_stat_in_mod = counter_index;
3319        if (reset)
3320                if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
3321        err = mlx4_cmd_box(dev, 0, mailbox->dma,
3322                           if_stat_in_mod, 0,
3323                           MLX4_CMD_QUERY_IF_STAT,
3324                           MLX4_CMD_TIME_CLASS_C,
3325                           MLX4_CMD_NATIVE);
3326        if (err) {
3327                mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
3328                         __func__, counter_index);
3329                goto if_stat_out;
3330        }
3331        tmp_counter = (struct mlx4_counter *)mailbox->buf;
3332        counter_stats->counter_mode = tmp_counter->counter_mode;
3333        if (counter_stats->counter_mode == 0) {
3334                counter_stats->rx_frames =
3335                        cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
3336                                    be64_to_cpu(tmp_counter->rx_frames));
3337                counter_stats->tx_frames =
3338                        cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
3339                                    be64_to_cpu(tmp_counter->tx_frames));
3340                counter_stats->rx_bytes =
3341                        cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
3342                                    be64_to_cpu(tmp_counter->rx_bytes));
3343                counter_stats->tx_bytes =
3344                        cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
3345                                    be64_to_cpu(tmp_counter->tx_bytes));
3346        }
3347
3348if_stat_out:
3349        mlx4_free_cmd_mailbox(dev, mailbox);
3350
3351        return err;
3352}
3353EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
3354
3355int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
3356                      struct ifla_vf_stats *vf_stats)
3357{
3358        struct mlx4_counter tmp_vf_stats;
3359        int slave;
3360        int err = 0;
3361
3362        if (!vf_stats)
3363                return -EINVAL;
3364
3365        if (!mlx4_is_master(dev))
3366                return -EPROTONOSUPPORT;
3367
3368        slave = mlx4_get_slave_indx(dev, vf_idx);
3369        if (slave < 0)
3370                return -EINVAL;
3371
3372        port = mlx4_slaves_closest_port(dev, slave, port);
3373        err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
3374        if (!err && tmp_vf_stats.counter_mode == 0) {
3375                vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
3376                vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
3377                vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
3378                vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
3379        }
3380
3381        return err;
3382}
3383EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);
3384
3385int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
3386{
3387        struct mlx4_priv *priv = mlx4_priv(dev);
3388
3389        if (slave < 1 || slave >= dev->num_slaves ||
3390            port < 1 || port > MLX4_MAX_PORTS)
3391                return 0;
3392
3393        return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
3394                MLX4_VF_SMI_ENABLED;
3395}
3396EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
3397
3398int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
3399{
3400        struct mlx4_priv *priv = mlx4_priv(dev);
3401
3402        if (slave == mlx4_master_func_num(dev))
3403                return 1;
3404
3405        if (slave < 1 || slave >= dev->num_slaves ||
3406            port < 1 || port > MLX4_MAX_PORTS)
3407                return 0;
3408
3409        return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
3410                MLX4_VF_SMI_ENABLED;
3411}
3412EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
3413
3414int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
3415                                 int enabled)
3416{
3417        struct mlx4_priv *priv = mlx4_priv(dev);
3418        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
3419                        &priv->dev, slave);
3420        int min_port = find_first_bit(actv_ports.ports,
3421                                      priv->dev.caps.num_ports) + 1;
3422        int max_port = min_port - 1 +
3423                bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
3424
3425        if (slave == mlx4_master_func_num(dev))
3426                return 0;
3427
3428        if (slave < 1 || slave >= dev->num_slaves ||
3429            port < 1 || port > MLX4_MAX_PORTS ||
3430            enabled < 0 || enabled > 1)
3431                return -EINVAL;
3432
3433        if (min_port == max_port && dev->caps.num_ports > 1) {
3434                mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
3435                return -EPROTONOSUPPORT;
3436        }
3437
3438        priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
3439        return 0;
3440}
3441EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
3442