linux/drivers/net/ethernet/mellanox/mlx4/cmd.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/sched.h>
  36#include <linux/slab.h>
  37#include <linux/export.h>
  38#include <linux/pci.h>
  39#include <linux/errno.h>
  40
  41#include <linux/mlx4/cmd.h>
  42#include <linux/mlx4/device.h>
  43#include <linux/semaphore.h>
  44#include <rdma/ib_smi.h>
  45#include <linux/delay.h>
  46
  47#include <asm/io.h>
  48
  49#include "mlx4.h"
  50#include "fw.h"
  51#include "fw_qos.h"
  52#include "mlx4_stats.h"
  53
  54#define CMD_POLL_TOKEN 0xffff
  55#define INBOX_MASK      0xffffffffffffff00ULL
  56
  57#define CMD_CHAN_VER 1
  58#define CMD_CHAN_IF_REV 1
  59
  60enum {
  61        /* command completed successfully: */
  62        CMD_STAT_OK             = 0x00,
  63        /* Internal error (such as a bus error) occurred while processing command: */
  64        CMD_STAT_INTERNAL_ERR   = 0x01,
  65        /* Operation/command not supported or opcode modifier not supported: */
  66        CMD_STAT_BAD_OP         = 0x02,
  67        /* Parameter not supported or parameter out of range: */
  68        CMD_STAT_BAD_PARAM      = 0x03,
  69        /* System not enabled or bad system state: */
  70        CMD_STAT_BAD_SYS_STATE  = 0x04,
  71        /* Attempt to access reserved or unallocaterd resource: */
  72        CMD_STAT_BAD_RESOURCE   = 0x05,
  73        /* Requested resource is currently executing a command, or is otherwise busy: */
  74        CMD_STAT_RESOURCE_BUSY  = 0x06,
  75        /* Required capability exceeds device limits: */
  76        CMD_STAT_EXCEED_LIM     = 0x08,
  77        /* Resource is not in the appropriate state or ownership: */
  78        CMD_STAT_BAD_RES_STATE  = 0x09,
  79        /* Index out of range: */
  80        CMD_STAT_BAD_INDEX      = 0x0a,
  81        /* FW image corrupted: */
  82        CMD_STAT_BAD_NVMEM      = 0x0b,
  83        /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
  84        CMD_STAT_ICM_ERROR      = 0x0c,
  85        /* Attempt to modify a QP/EE which is not in the presumed state: */
  86        CMD_STAT_BAD_QP_STATE   = 0x10,
  87        /* Bad segment parameters (Address/Size): */
  88        CMD_STAT_BAD_SEG_PARAM  = 0x20,
  89        /* Memory Region has Memory Windows bound to: */
  90        CMD_STAT_REG_BOUND      = 0x21,
  91        /* HCA local attached memory not present: */
  92        CMD_STAT_LAM_NOT_PRE    = 0x22,
  93        /* Bad management packet (silently discarded): */
  94        CMD_STAT_BAD_PKT        = 0x30,
  95        /* More outstanding CQEs in CQ than new CQ size: */
  96        CMD_STAT_BAD_SIZE       = 0x40,
  97        /* Multi Function device support required: */
  98        CMD_STAT_MULTI_FUNC_REQ = 0x50,
  99};
 100
 101enum {
 102        HCR_IN_PARAM_OFFSET     = 0x00,
 103        HCR_IN_MODIFIER_OFFSET  = 0x08,
 104        HCR_OUT_PARAM_OFFSET    = 0x0c,
 105        HCR_TOKEN_OFFSET        = 0x14,
 106        HCR_STATUS_OFFSET       = 0x18,
 107
 108        HCR_OPMOD_SHIFT         = 12,
 109        HCR_T_BIT               = 21,
 110        HCR_E_BIT               = 22,
 111        HCR_GO_BIT              = 23
 112};
 113
 114enum {
 115        GO_BIT_TIMEOUT_MSECS    = 10000
 116};
 117
 118enum mlx4_vlan_transition {
 119        MLX4_VLAN_TRANSITION_VST_VST = 0,
 120        MLX4_VLAN_TRANSITION_VST_VGT = 1,
 121        MLX4_VLAN_TRANSITION_VGT_VST = 2,
 122        MLX4_VLAN_TRANSITION_VGT_VGT = 3,
 123};
 124
 125
 126struct mlx4_cmd_context {
 127        struct completion       done;
 128        int                     result;
 129        int                     next;
 130        u64                     out_param;
 131        u16                     token;
 132        u8                      fw_status;
 133};
 134
 135static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
 136                                    struct mlx4_vhcr_cmd *in_vhcr);
 137
 138static int mlx4_status_to_errno(u8 status)
 139{
 140        static const int trans_table[] = {
 141                [CMD_STAT_INTERNAL_ERR]   = -EIO,
 142                [CMD_STAT_BAD_OP]         = -EPERM,
 143                [CMD_STAT_BAD_PARAM]      = -EINVAL,
 144                [CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
 145                [CMD_STAT_BAD_RESOURCE]   = -EBADF,
 146                [CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
 147                [CMD_STAT_EXCEED_LIM]     = -ENOMEM,
 148                [CMD_STAT_BAD_RES_STATE]  = -EBADF,
 149                [CMD_STAT_BAD_INDEX]      = -EBADF,
 150                [CMD_STAT_BAD_NVMEM]      = -EFAULT,
 151                [CMD_STAT_ICM_ERROR]      = -ENFILE,
 152                [CMD_STAT_BAD_QP_STATE]   = -EINVAL,
 153                [CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
 154                [CMD_STAT_REG_BOUND]      = -EBUSY,
 155                [CMD_STAT_LAM_NOT_PRE]    = -EAGAIN,
 156                [CMD_STAT_BAD_PKT]        = -EINVAL,
 157                [CMD_STAT_BAD_SIZE]       = -ENOMEM,
 158                [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
 159        };
 160
 161        if (status >= ARRAY_SIZE(trans_table) ||
 162            (status != CMD_STAT_OK && trans_table[status] == 0))
 163                return -EIO;
 164
 165        return trans_table[status];
 166}
 167
 168static u8 mlx4_errno_to_status(int errno)
 169{
 170        switch (errno) {
 171        case -EPERM:
 172                return CMD_STAT_BAD_OP;
 173        case -EINVAL:
 174                return CMD_STAT_BAD_PARAM;
 175        case -ENXIO:
 176                return CMD_STAT_BAD_SYS_STATE;
 177        case -EBUSY:
 178                return CMD_STAT_RESOURCE_BUSY;
 179        case -ENOMEM:
 180                return CMD_STAT_EXCEED_LIM;
 181        case -ENFILE:
 182                return CMD_STAT_ICM_ERROR;
 183        default:
 184                return CMD_STAT_INTERNAL_ERR;
 185        }
 186}
 187
 188static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
 189                                       u8 op_modifier)
 190{
 191        switch (op) {
 192        case MLX4_CMD_UNMAP_ICM:
 193        case MLX4_CMD_UNMAP_ICM_AUX:
 194        case MLX4_CMD_UNMAP_FA:
 195        case MLX4_CMD_2RST_QP:
 196        case MLX4_CMD_HW2SW_EQ:
 197        case MLX4_CMD_HW2SW_CQ:
 198        case MLX4_CMD_HW2SW_SRQ:
 199        case MLX4_CMD_HW2SW_MPT:
 200        case MLX4_CMD_CLOSE_HCA:
 201        case MLX4_QP_FLOW_STEERING_DETACH:
 202        case MLX4_CMD_FREE_RES:
 203        case MLX4_CMD_CLOSE_PORT:
 204                return CMD_STAT_OK;
 205
 206        case MLX4_CMD_QP_ATTACH:
 207                /* On Detach case return success */
 208                if (op_modifier == 0)
 209                        return CMD_STAT_OK;
 210                return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 211
 212        default:
 213                return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 214        }
 215}
 216
 217static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
 218{
 219        /* Any error during the closing commands below is considered fatal */
 220        if (op == MLX4_CMD_CLOSE_HCA ||
 221            op == MLX4_CMD_HW2SW_EQ ||
 222            op == MLX4_CMD_HW2SW_CQ ||
 223            op == MLX4_CMD_2RST_QP ||
 224            op == MLX4_CMD_HW2SW_SRQ ||
 225            op == MLX4_CMD_SYNC_TPT ||
 226            op == MLX4_CMD_UNMAP_ICM ||
 227            op == MLX4_CMD_UNMAP_ICM_AUX ||
 228            op == MLX4_CMD_UNMAP_FA)
 229                return 1;
 230        /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
 231          * CMD_STAT_REG_BOUND.
 232          * This status indicates that memory region has memory windows bound to it
 233          * which may result from invalid user space usage and is not fatal.
 234          */
 235        if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
 236                return 1;
 237        return 0;
 238}
 239
 240static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
 241                               int err)
 242{
 243        /* Only if reset flow is really active return code is based on
 244          * command, otherwise current error code is returned.
 245          */
 246        if (mlx4_internal_err_reset) {
 247                mlx4_enter_error_state(dev->persist);
 248                err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 249        }
 250
 251        return err;
 252}
 253
 254static int comm_pending(struct mlx4_dev *dev)
 255{
 256        struct mlx4_priv *priv = mlx4_priv(dev);
 257        u32 status = readl(&priv->mfunc.comm->slave_read);
 258
 259        return (swab32(status) >> 31) != priv->cmd.comm_toggle;
 260}
 261
 262static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
 263{
 264        struct mlx4_priv *priv = mlx4_priv(dev);
 265        u32 val;
 266
 267        /* To avoid writing to unknown addresses after the device state was
 268         * changed to internal error and the function was rest,
 269         * check the INTERNAL_ERROR flag which is updated under
 270         * device_state_mutex lock.
 271         */
 272        mutex_lock(&dev->persist->device_state_mutex);
 273
 274        if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
 275                mutex_unlock(&dev->persist->device_state_mutex);
 276                return -EIO;
 277        }
 278
 279        priv->cmd.comm_toggle ^= 1;
 280        val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
 281        __raw_writel((__force u32) cpu_to_be32(val),
 282                     &priv->mfunc.comm->slave_write);
 283        mmiowb();
 284        mutex_unlock(&dev->persist->device_state_mutex);
 285        return 0;
 286}
 287
 288static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
 289                       unsigned long timeout)
 290{
 291        struct mlx4_priv *priv = mlx4_priv(dev);
 292        unsigned long end;
 293        int err = 0;
 294        int ret_from_pending = 0;
 295
 296        /* First, verify that the master reports correct status */
 297        if (comm_pending(dev)) {
 298                mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
 299                          priv->cmd.comm_toggle, cmd);
 300                return -EAGAIN;
 301        }
 302
 303        /* Write command */
 304        down(&priv->cmd.poll_sem);
 305        if (mlx4_comm_cmd_post(dev, cmd, param)) {
 306                /* Only in case the device state is INTERNAL_ERROR,
 307                 * mlx4_comm_cmd_post returns with an error
 308                 */
 309                err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 310                goto out;
 311        }
 312
 313        end = msecs_to_jiffies(timeout) + jiffies;
 314        while (comm_pending(dev) && time_before(jiffies, end))
 315                cond_resched();
 316        ret_from_pending = comm_pending(dev);
 317        if (ret_from_pending) {
 318                /* check if the slave is trying to boot in the middle of
 319                 * FLR process. The only non-zero result in the RESET command
 320                 * is MLX4_DELAY_RESET_SLAVE*/
 321                if ((MLX4_COMM_CMD_RESET == cmd)) {
 322                        err = MLX4_DELAY_RESET_SLAVE;
 323                        goto out;
 324                } else {
 325                        mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
 326                                  cmd);
 327                        err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 328                }
 329        }
 330
 331        if (err)
 332                mlx4_enter_error_state(dev->persist);
 333out:
 334        up(&priv->cmd.poll_sem);
 335        return err;
 336}
 337
 338static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
 339                              u16 param, u16 op, unsigned long timeout)
 340{
 341        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 342        struct mlx4_cmd_context *context;
 343        unsigned long end;
 344        int err = 0;
 345
 346        down(&cmd->event_sem);
 347
 348        spin_lock(&cmd->context_lock);
 349        BUG_ON(cmd->free_head < 0);
 350        context = &cmd->context[cmd->free_head];
 351        context->token += cmd->token_mask + 1;
 352        cmd->free_head = context->next;
 353        spin_unlock(&cmd->context_lock);
 354
 355        reinit_completion(&context->done);
 356
 357        if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
 358                /* Only in case the device state is INTERNAL_ERROR,
 359                 * mlx4_comm_cmd_post returns with an error
 360                 */
 361                err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 362                goto out;
 363        }
 364
 365        if (!wait_for_completion_timeout(&context->done,
 366                                         msecs_to_jiffies(timeout))) {
 367                mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
 368                          vhcr_cmd, op);
 369                goto out_reset;
 370        }
 371
 372        err = context->result;
 373        if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
 374                mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
 375                         vhcr_cmd, context->fw_status);
 376                if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
 377                        goto out_reset;
 378        }
 379
 380        /* wait for comm channel ready
 381         * this is necessary for prevention the race
 382         * when switching between event to polling mode
 383         * Skipping this section in case the device is in FATAL_ERROR state,
 384         * In this state, no commands are sent via the comm channel until
 385         * the device has returned from reset.
 386         */
 387        if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
 388                end = msecs_to_jiffies(timeout) + jiffies;
 389                while (comm_pending(dev) && time_before(jiffies, end))
 390                        cond_resched();
 391        }
 392        goto out;
 393
 394out_reset:
 395        err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 396        mlx4_enter_error_state(dev->persist);
 397out:
 398        spin_lock(&cmd->context_lock);
 399        context->next = cmd->free_head;
 400        cmd->free_head = context - cmd->context;
 401        spin_unlock(&cmd->context_lock);
 402
 403        up(&cmd->event_sem);
 404        return err;
 405}
 406
 407int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
 408                  u16 op, unsigned long timeout)
 409{
 410        if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 411                return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
 412
 413        if (mlx4_priv(dev)->cmd.use_events)
 414                return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
 415        return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
 416}
 417
 418static int cmd_pending(struct mlx4_dev *dev)
 419{
 420        u32 status;
 421
 422        if (pci_channel_offline(dev->persist->pdev))
 423                return -EIO;
 424
 425        status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
 426
 427        return (status & swab32(1 << HCR_GO_BIT)) ||
 428                (mlx4_priv(dev)->cmd.toggle ==
 429                 !!(status & swab32(1 << HCR_T_BIT)));
 430}
 431
 432static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
 433                         u32 in_modifier, u8 op_modifier, u16 op, u16 token,
 434                         int event)
 435{
 436        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 437        u32 __iomem *hcr = cmd->hcr;
 438        int ret = -EIO;
 439        unsigned long end;
 440
 441        mutex_lock(&dev->persist->device_state_mutex);
 442        /* To avoid writing to unknown addresses after the device state was
 443          * changed to internal error and the chip was reset,
 444          * check the INTERNAL_ERROR flag which is updated under
 445          * device_state_mutex lock.
 446          */
 447        if (pci_channel_offline(dev->persist->pdev) ||
 448            (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
 449                /*
 450                 * Device is going through error recovery
 451                 * and cannot accept commands.
 452                 */
 453                goto out;
 454        }
 455
 456        end = jiffies;
 457        if (event)
 458                end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
 459
 460        while (cmd_pending(dev)) {
 461                if (pci_channel_offline(dev->persist->pdev)) {
 462                        /*
 463                         * Device is going through error recovery
 464                         * and cannot accept commands.
 465                         */
 466                        goto out;
 467                }
 468
 469                if (time_after_eq(jiffies, end)) {
 470                        mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
 471                        goto out;
 472                }
 473                cond_resched();
 474        }
 475
 476        /*
 477         * We use writel (instead of something like memcpy_toio)
 478         * because writes of less than 32 bits to the HCR don't work
 479         * (and some architectures such as ia64 implement memcpy_toio
 480         * in terms of writeb).
 481         */
 482        __raw_writel((__force u32) cpu_to_be32(in_param >> 32),           hcr + 0);
 483        __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
 484        __raw_writel((__force u32) cpu_to_be32(in_modifier),              hcr + 2);
 485        __raw_writel((__force u32) cpu_to_be32(out_param >> 32),          hcr + 3);
 486        __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
 487        __raw_writel((__force u32) cpu_to_be32(token << 16),              hcr + 5);
 488
 489        /* __raw_writel may not order writes. */
 490        wmb();
 491
 492        __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
 493                                               (cmd->toggle << HCR_T_BIT)       |
 494                                               (event ? (1 << HCR_E_BIT) : 0)   |
 495                                               (op_modifier << HCR_OPMOD_SHIFT) |
 496                                               op), hcr + 6);
 497
 498        /*
 499         * Make sure that our HCR writes don't get mixed in with
 500         * writes from another CPU starting a FW command.
 501         */
 502        mmiowb();
 503
 504        cmd->toggle = cmd->toggle ^ 1;
 505
 506        ret = 0;
 507
 508out:
 509        if (ret)
 510                mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
 511                          op, ret, in_param, in_modifier, op_modifier);
 512        mutex_unlock(&dev->persist->device_state_mutex);
 513
 514        return ret;
 515}
 516
 517static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 518                          int out_is_imm, u32 in_modifier, u8 op_modifier,
 519                          u16 op, unsigned long timeout)
 520{
 521        struct mlx4_priv *priv = mlx4_priv(dev);
 522        struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
 523        int ret;
 524
 525        mutex_lock(&priv->cmd.slave_cmd_mutex);
 526
 527        vhcr->in_param = cpu_to_be64(in_param);
 528        vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
 529        vhcr->in_modifier = cpu_to_be32(in_modifier);
 530        vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
 531        vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
 532        vhcr->status = 0;
 533        vhcr->flags = !!(priv->cmd.use_events) << 6;
 534
 535        if (mlx4_is_master(dev)) {
 536                ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
 537                if (!ret) {
 538                        if (out_is_imm) {
 539                                if (out_param)
 540                                        *out_param =
 541                                                be64_to_cpu(vhcr->out_param);
 542                                else {
 543                                        mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 544                                                 op);
 545                                        vhcr->status = CMD_STAT_BAD_PARAM;
 546                                }
 547                        }
 548                        ret = mlx4_status_to_errno(vhcr->status);
 549                }
 550                if (ret &&
 551                    dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 552                        ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
 553        } else {
 554                ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
 555                                    MLX4_COMM_TIME + timeout);
 556                if (!ret) {
 557                        if (out_is_imm) {
 558                                if (out_param)
 559                                        *out_param =
 560                                                be64_to_cpu(vhcr->out_param);
 561                                else {
 562                                        mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 563                                                 op);
 564                                        vhcr->status = CMD_STAT_BAD_PARAM;
 565                                }
 566                        }
 567                        ret = mlx4_status_to_errno(vhcr->status);
 568                } else {
 569                        if (dev->persist->state &
 570                            MLX4_DEVICE_STATE_INTERNAL_ERROR)
 571                                ret = mlx4_internal_err_ret_value(dev, op,
 572                                                                  op_modifier);
 573                        else
 574                                mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
 575                }
 576        }
 577
 578        mutex_unlock(&priv->cmd.slave_cmd_mutex);
 579        return ret;
 580}
 581
 582static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 583                         int out_is_imm, u32 in_modifier, u8 op_modifier,
 584                         u16 op, unsigned long timeout)
 585{
 586        struct mlx4_priv *priv = mlx4_priv(dev);
 587        void __iomem *hcr = priv->cmd.hcr;
 588        int err = 0;
 589        unsigned long end;
 590        u32 stat;
 591
 592        down(&priv->cmd.poll_sem);
 593
 594        if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
 595                /*
 596                 * Device is going through error recovery
 597                 * and cannot accept commands.
 598                 */
 599                err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 600                goto out;
 601        }
 602
 603        if (out_is_imm && !out_param) {
 604                mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 605                         op);
 606                err = -EINVAL;
 607                goto out;
 608        }
 609
 610        err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
 611                            in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
 612        if (err)
 613                goto out_reset;
 614
 615        end = msecs_to_jiffies(timeout) + jiffies;
 616        while (cmd_pending(dev) && time_before(jiffies, end)) {
 617                if (pci_channel_offline(dev->persist->pdev)) {
 618                        /*
 619                         * Device is going through error recovery
 620                         * and cannot accept commands.
 621                         */
 622                        err = -EIO;
 623                        goto out_reset;
 624                }
 625
 626                if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
 627                        err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 628                        goto out;
 629                }
 630
 631                cond_resched();
 632        }
 633
 634        if (cmd_pending(dev)) {
 635                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
 636                          op);
 637                err = -EIO;
 638                goto out_reset;
 639        }
 640
 641        if (out_is_imm)
 642                *out_param =
 643                        (u64) be32_to_cpu((__force __be32)
 644                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
 645                        (u64) be32_to_cpu((__force __be32)
 646                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
 647        stat = be32_to_cpu((__force __be32)
 648                           __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
 649        err = mlx4_status_to_errno(stat);
 650        if (err) {
 651                mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
 652                         op, stat);
 653                if (mlx4_closing_cmd_fatal_error(op, stat))
 654                        goto out_reset;
 655                goto out;
 656        }
 657
 658out_reset:
 659        if (err)
 660                err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
 661out:
 662        up(&priv->cmd.poll_sem);
 663        return err;
 664}
 665
 666void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
 667{
 668        struct mlx4_priv *priv = mlx4_priv(dev);
 669        struct mlx4_cmd_context *context =
 670                &priv->cmd.context[token & priv->cmd.token_mask];
 671
 672        /* previously timed out command completing at long last */
 673        if (token != context->token)
 674                return;
 675
 676        context->fw_status = status;
 677        context->result    = mlx4_status_to_errno(status);
 678        context->out_param = out_param;
 679
 680        complete(&context->done);
 681}
 682
 683static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 684                         int out_is_imm, u32 in_modifier, u8 op_modifier,
 685                         u16 op, unsigned long timeout)
 686{
 687        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 688        struct mlx4_cmd_context *context;
 689        long ret_wait;
 690        int err = 0;
 691
 692        down(&cmd->event_sem);
 693
 694        spin_lock(&cmd->context_lock);
 695        BUG_ON(cmd->free_head < 0);
 696        context = &cmd->context[cmd->free_head];
 697        context->token += cmd->token_mask + 1;
 698        cmd->free_head = context->next;
 699        spin_unlock(&cmd->context_lock);
 700
 701        if (out_is_imm && !out_param) {
 702                mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
 703                         op);
 704                err = -EINVAL;
 705                goto out;
 706        }
 707
 708        reinit_completion(&context->done);
 709
 710        err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
 711                            in_modifier, op_modifier, op, context->token, 1);
 712        if (err)
 713                goto out_reset;
 714
 715        if (op == MLX4_CMD_SENSE_PORT) {
 716                ret_wait =
 717                        wait_for_completion_interruptible_timeout(&context->done,
 718                                                                  msecs_to_jiffies(timeout));
 719                if (ret_wait < 0) {
 720                        context->fw_status = 0;
 721                        context->out_param = 0;
 722                        context->result = 0;
 723                }
 724        } else {
 725                ret_wait = (long)wait_for_completion_timeout(&context->done,
 726                                                             msecs_to_jiffies(timeout));
 727        }
 728        if (!ret_wait) {
 729                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
 730                          op);
 731                if (op == MLX4_CMD_NOP) {
 732                        err = -EBUSY;
 733                        goto out;
 734                } else {
 735                        err = -EIO;
 736                        goto out_reset;
 737                }
 738        }
 739
 740        err = context->result;
 741        if (err) {
 742                /* Since we do not want to have this error message always
 743                 * displayed at driver start when there are ConnectX2 HCAs
 744                 * on the host, we deprecate the error message for this
 745                 * specific command/input_mod/opcode_mod/fw-status to be debug.
 746                 */
 747                if (op == MLX4_CMD_SET_PORT &&
 748                    (in_modifier == 1 || in_modifier == 2) &&
 749                    op_modifier == MLX4_SET_PORT_IB_OPCODE &&
 750                    context->fw_status == CMD_STAT_BAD_SIZE)
 751                        mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
 752                                 op, context->fw_status);
 753                else
 754                        mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
 755                                 op, context->fw_status);
 756                if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 757                        err = mlx4_internal_err_ret_value(dev, op, op_modifier);
 758                else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
 759                        goto out_reset;
 760
 761                goto out;
 762        }
 763
 764        if (out_is_imm)
 765                *out_param = context->out_param;
 766
 767out_reset:
 768        if (err)
 769                err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
 770out:
 771        spin_lock(&cmd->context_lock);
 772        context->next = cmd->free_head;
 773        cmd->free_head = context - cmd->context;
 774        spin_unlock(&cmd->context_lock);
 775
 776        up(&cmd->event_sem);
 777        return err;
 778}
 779
 780int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 781               int out_is_imm, u32 in_modifier, u8 op_modifier,
 782               u16 op, unsigned long timeout, int native)
 783{
 784        if (pci_channel_offline(dev->persist->pdev))
 785                return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
 786
 787        if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
 788                if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
 789                        return mlx4_internal_err_ret_value(dev, op,
 790                                                          op_modifier);
 791                if (mlx4_priv(dev)->cmd.use_events)
 792                        return mlx4_cmd_wait(dev, in_param, out_param,
 793                                             out_is_imm, in_modifier,
 794                                             op_modifier, op, timeout);
 795                else
 796                        return mlx4_cmd_poll(dev, in_param, out_param,
 797                                             out_is_imm, in_modifier,
 798                                             op_modifier, op, timeout);
 799        }
 800        return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
 801                              in_modifier, op_modifier, op, timeout);
 802}
 803EXPORT_SYMBOL_GPL(__mlx4_cmd);
 804
 805
 806int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
 807{
 808        return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
 809                        MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
 810}
 811
 812static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
 813                           int slave, u64 slave_addr,
 814                           int size, int is_read)
 815{
 816        u64 in_param;
 817        u64 out_param;
 818
 819        if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
 820            (slave & ~0x7f) | (size & 0xff)) {
 821                mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
 822                         slave_addr, master_addr, slave, size);
 823                return -EINVAL;
 824        }
 825
 826        if (is_read) {
 827                in_param = (u64) slave | slave_addr;
 828                out_param = (u64) dev->caps.function | master_addr;
 829        } else {
 830                in_param = (u64) dev->caps.function | master_addr;
 831                out_param = (u64) slave | slave_addr;
 832        }
 833
 834        return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
 835                            MLX4_CMD_ACCESS_MEM,
 836                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
 837}
 838
 839static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
 840                               struct mlx4_cmd_mailbox *inbox,
 841                               struct mlx4_cmd_mailbox *outbox)
 842{
 843        struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
 844        struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
 845        int err;
 846        int i;
 847
 848        if (index & 0x1f)
 849                return -EINVAL;
 850
 851        in_mad->attr_mod = cpu_to_be32(index / 32);
 852
 853        err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
 854                           MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
 855                           MLX4_CMD_NATIVE);
 856        if (err)
 857                return err;
 858
 859        for (i = 0; i < 32; ++i)
 860                pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
 861
 862        return err;
 863}
 864
 865static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
 866                               struct mlx4_cmd_mailbox *inbox,
 867                               struct mlx4_cmd_mailbox *outbox)
 868{
 869        int i;
 870        int err;
 871
 872        for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
 873                err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
 874                if (err)
 875                        return err;
 876        }
 877
 878        return 0;
 879}
 880#define PORT_CAPABILITY_LOCATION_IN_SMP 20
 881#define PORT_STATE_OFFSET 32
 882
 883static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
 884{
 885        if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
 886                return IB_PORT_ACTIVE;
 887        else
 888                return IB_PORT_DOWN;
 889}
 890
 891static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
 892                                struct mlx4_vhcr *vhcr,
 893                                struct mlx4_cmd_mailbox *inbox,
 894                                struct mlx4_cmd_mailbox *outbox,
 895                                struct mlx4_cmd_info *cmd)
 896{
 897        struct ib_smp *smp = inbox->buf;
 898        u32 index;
 899        u8 port, slave_port;
 900        u8 opcode_modifier;
 901        u16 *table;
 902        int err;
 903        int vidx, pidx;
 904        int network_view;
 905        struct mlx4_priv *priv = mlx4_priv(dev);
 906        struct ib_smp *outsmp = outbox->buf;
 907        __be16 *outtab = (__be16 *)(outsmp->data);
 908        __be32 slave_cap_mask;
 909        __be64 slave_node_guid;
 910
 911        slave_port = vhcr->in_modifier;
 912        port = mlx4_slave_convert_port(dev, slave, slave_port);
 913
 914        /* network-view bit is for driver use only, and should not be passed to FW */
 915        opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
 916        network_view = !!(vhcr->op_modifier & 0x8);
 917
 918        if (smp->base_version == 1 &&
 919            smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
 920            smp->class_version == 1) {
 921                /* host view is paravirtualized */
 922                if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
 923                        if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
 924                                index = be32_to_cpu(smp->attr_mod);
 925                                if (port < 1 || port > dev->caps.num_ports)
 926                                        return -EINVAL;
 927                                table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
 928                                                sizeof(*table) * 32, GFP_KERNEL);
 929
 930                                if (!table)
 931                                        return -ENOMEM;
 932                                /* need to get the full pkey table because the paravirtualized
 933                                 * pkeys may be scattered among several pkey blocks.
 934                                 */
 935                                err = get_full_pkey_table(dev, port, table, inbox, outbox);
 936                                if (!err) {
 937                                        for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
 938                                                pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
 939                                                outtab[vidx % 32] = cpu_to_be16(table[pidx]);
 940                                        }
 941                                }
 942                                kfree(table);
 943                                return err;
 944                        }
 945                        if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
 946                                /*get the slave specific caps:*/
 947                                /*do the command */
 948                                smp->attr_mod = cpu_to_be32(port);
 949                                err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
 950                                            port, opcode_modifier,
 951                                            vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 952                                /* modify the response for slaves */
 953                                if (!err && slave != mlx4_master_func_num(dev)) {
 954                                        u8 *state = outsmp->data + PORT_STATE_OFFSET;
 955
 956                                        *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
 957                                        slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
 958                                        memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
 959                                }
 960                                return err;
 961                        }
 962                        if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
 963                                __be64 guid = mlx4_get_admin_guid(dev, slave,
 964                                                                  port);
 965
 966                                /* set the PF admin guid to the FW/HW burned
 967                                 * GUID, if it wasn't yet set
 968                                 */
 969                                if (slave == 0 && guid == 0) {
 970                                        smp->attr_mod = 0;
 971                                        err = mlx4_cmd_box(dev,
 972                                                           inbox->dma,
 973                                                           outbox->dma,
 974                                                           vhcr->in_modifier,
 975                                                           opcode_modifier,
 976                                                           vhcr->op,
 977                                                           MLX4_CMD_TIME_CLASS_C,
 978                                                           MLX4_CMD_NATIVE);
 979                                        if (err)
 980                                                return err;
 981                                        mlx4_set_admin_guid(dev,
 982                                                            *(__be64 *)outsmp->
 983                                                            data, slave, port);
 984                                } else {
 985                                        memcpy(outsmp->data, &guid, 8);
 986                                }
 987
 988                                /* clean all other gids */
 989                                memset(outsmp->data + 8, 0, 56);
 990                                return 0;
 991                        }
 992                        if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
 993                                err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
 994                                             port, opcode_modifier,
 995                                             vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
 996                                if (!err) {
 997                                        slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
 998                                        memcpy(outsmp->data + 12, &slave_node_guid, 8);
 999                                }
1000                                return err;
1001                        }
1002                }
1003        }
1004
1005        /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
1006         * These are the MADs used by ib verbs (such as ib_query_gids).
1007         */
1008        if (slave != mlx4_master_func_num(dev) &&
1009            !mlx4_vf_smi_enabled(dev, slave, port)) {
1010                if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
1011                      smp->method == IB_MGMT_METHOD_GET) || network_view) {
1012                        mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1013                                 slave, smp->method, smp->mgmt_class,
1014                                 network_view ? "Network" : "Host",
1015                                 be16_to_cpu(smp->attr_id));
1016                        return -EPERM;
1017                }
1018        }
1019
1020        return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1021                                    vhcr->in_modifier, opcode_modifier,
1022                                    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1023}
1024
1025static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
1026                     struct mlx4_vhcr *vhcr,
1027                     struct mlx4_cmd_mailbox *inbox,
1028                     struct mlx4_cmd_mailbox *outbox,
1029                     struct mlx4_cmd_info *cmd)
1030{
1031        return -EPERM;
1032}
1033
1034int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1035                     struct mlx4_vhcr *vhcr,
1036                     struct mlx4_cmd_mailbox *inbox,
1037                     struct mlx4_cmd_mailbox *outbox,
1038                     struct mlx4_cmd_info *cmd)
1039{
1040        u64 in_param;
1041        u64 out_param;
1042        int err;
1043
1044        in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1045        out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1046        if (cmd->encode_slave_id) {
1047                in_param &= 0xffffffffffffff00ll;
1048                in_param |= slave;
1049        }
1050
1051        err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1052                         vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1053                         MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1054
1055        if (cmd->out_is_imm)
1056                vhcr->out_param = out_param;
1057
1058        return err;
1059}
1060
1061static struct mlx4_cmd_info cmd_info[] = {
1062        {
1063                .opcode = MLX4_CMD_QUERY_FW,
1064                .has_inbox = false,
1065                .has_outbox = true,
1066                .out_is_imm = false,
1067                .encode_slave_id = false,
1068                .verify = NULL,
1069                .wrapper = mlx4_QUERY_FW_wrapper
1070        },
1071        {
1072                .opcode = MLX4_CMD_QUERY_HCA,
1073                .has_inbox = false,
1074                .has_outbox = true,
1075                .out_is_imm = false,
1076                .encode_slave_id = false,
1077                .verify = NULL,
1078                .wrapper = NULL
1079        },
1080        {
1081                .opcode = MLX4_CMD_QUERY_DEV_CAP,
1082                .has_inbox = false,
1083                .has_outbox = true,
1084                .out_is_imm = false,
1085                .encode_slave_id = false,
1086                .verify = NULL,
1087                .wrapper = mlx4_QUERY_DEV_CAP_wrapper
1088        },
1089        {
1090                .opcode = MLX4_CMD_QUERY_FUNC_CAP,
1091                .has_inbox = false,
1092                .has_outbox = true,
1093                .out_is_imm = false,
1094                .encode_slave_id = false,
1095                .verify = NULL,
1096                .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1097        },
1098        {
1099                .opcode = MLX4_CMD_QUERY_ADAPTER,
1100                .has_inbox = false,
1101                .has_outbox = true,
1102                .out_is_imm = false,
1103                .encode_slave_id = false,
1104                .verify = NULL,
1105                .wrapper = NULL
1106        },
1107        {
1108                .opcode = MLX4_CMD_INIT_PORT,
1109                .has_inbox = false,
1110                .has_outbox = false,
1111                .out_is_imm = false,
1112                .encode_slave_id = false,
1113                .verify = NULL,
1114                .wrapper = mlx4_INIT_PORT_wrapper
1115        },
1116        {
1117                .opcode = MLX4_CMD_CLOSE_PORT,
1118                .has_inbox = false,
1119                .has_outbox = false,
1120                .out_is_imm  = false,
1121                .encode_slave_id = false,
1122                .verify = NULL,
1123                .wrapper = mlx4_CLOSE_PORT_wrapper
1124        },
1125        {
1126                .opcode = MLX4_CMD_QUERY_PORT,
1127                .has_inbox = false,
1128                .has_outbox = true,
1129                .out_is_imm = false,
1130                .encode_slave_id = false,
1131                .verify = NULL,
1132                .wrapper = mlx4_QUERY_PORT_wrapper
1133        },
1134        {
1135                .opcode = MLX4_CMD_SET_PORT,
1136                .has_inbox = true,
1137                .has_outbox = false,
1138                .out_is_imm = false,
1139                .encode_slave_id = false,
1140                .verify = NULL,
1141                .wrapper = mlx4_SET_PORT_wrapper
1142        },
1143        {
1144                .opcode = MLX4_CMD_MAP_EQ,
1145                .has_inbox = false,
1146                .has_outbox = false,
1147                .out_is_imm = false,
1148                .encode_slave_id = false,
1149                .verify = NULL,
1150                .wrapper = mlx4_MAP_EQ_wrapper
1151        },
1152        {
1153                .opcode = MLX4_CMD_SW2HW_EQ,
1154                .has_inbox = true,
1155                .has_outbox = false,
1156                .out_is_imm = false,
1157                .encode_slave_id = true,
1158                .verify = NULL,
1159                .wrapper = mlx4_SW2HW_EQ_wrapper
1160        },
1161        {
1162                .opcode = MLX4_CMD_HW_HEALTH_CHECK,
1163                .has_inbox = false,
1164                .has_outbox = false,
1165                .out_is_imm = false,
1166                .encode_slave_id = false,
1167                .verify = NULL,
1168                .wrapper = NULL
1169        },
1170        {
1171                .opcode = MLX4_CMD_NOP,
1172                .has_inbox = false,
1173                .has_outbox = false,
1174                .out_is_imm = false,
1175                .encode_slave_id = false,
1176                .verify = NULL,
1177                .wrapper = NULL
1178        },
1179        {
1180                .opcode = MLX4_CMD_CONFIG_DEV,
1181                .has_inbox = false,
1182                .has_outbox = true,
1183                .out_is_imm = false,
1184                .encode_slave_id = false,
1185                .verify = NULL,
1186                .wrapper = mlx4_CONFIG_DEV_wrapper
1187        },
1188        {
1189                .opcode = MLX4_CMD_ALLOC_RES,
1190                .has_inbox = false,
1191                .has_outbox = false,
1192                .out_is_imm = true,
1193                .encode_slave_id = false,
1194                .verify = NULL,
1195                .wrapper = mlx4_ALLOC_RES_wrapper
1196        },
1197        {
1198                .opcode = MLX4_CMD_FREE_RES,
1199                .has_inbox = false,
1200                .has_outbox = false,
1201                .out_is_imm = false,
1202                .encode_slave_id = false,
1203                .verify = NULL,
1204                .wrapper = mlx4_FREE_RES_wrapper
1205        },
1206        {
1207                .opcode = MLX4_CMD_SW2HW_MPT,
1208                .has_inbox = true,
1209                .has_outbox = false,
1210                .out_is_imm = false,
1211                .encode_slave_id = true,
1212                .verify = NULL,
1213                .wrapper = mlx4_SW2HW_MPT_wrapper
1214        },
1215        {
1216                .opcode = MLX4_CMD_QUERY_MPT,
1217                .has_inbox = false,
1218                .has_outbox = true,
1219                .out_is_imm = false,
1220                .encode_slave_id = false,
1221                .verify = NULL,
1222                .wrapper = mlx4_QUERY_MPT_wrapper
1223        },
1224        {
1225                .opcode = MLX4_CMD_HW2SW_MPT,
1226                .has_inbox = false,
1227                .has_outbox = false,
1228                .out_is_imm = false,
1229                .encode_slave_id = false,
1230                .verify = NULL,
1231                .wrapper = mlx4_HW2SW_MPT_wrapper
1232        },
1233        {
1234                .opcode = MLX4_CMD_READ_MTT,
1235                .has_inbox = false,
1236                .has_outbox = true,
1237                .out_is_imm = false,
1238                .encode_slave_id = false,
1239                .verify = NULL,
1240                .wrapper = NULL
1241        },
1242        {
1243                .opcode = MLX4_CMD_WRITE_MTT,
1244                .has_inbox = true,
1245                .has_outbox = false,
1246                .out_is_imm = false,
1247                .encode_slave_id = false,
1248                .verify = NULL,
1249                .wrapper = mlx4_WRITE_MTT_wrapper
1250        },
1251        {
1252                .opcode = MLX4_CMD_SYNC_TPT,
1253                .has_inbox = true,
1254                .has_outbox = false,
1255                .out_is_imm = false,
1256                .encode_slave_id = false,
1257                .verify = NULL,
1258                .wrapper = NULL
1259        },
1260        {
1261                .opcode = MLX4_CMD_HW2SW_EQ,
1262                .has_inbox = false,
1263                .has_outbox = false,
1264                .out_is_imm = false,
1265                .encode_slave_id = true,
1266                .verify = NULL,
1267                .wrapper = mlx4_HW2SW_EQ_wrapper
1268        },
1269        {
1270                .opcode = MLX4_CMD_QUERY_EQ,
1271                .has_inbox = false,
1272                .has_outbox = true,
1273                .out_is_imm = false,
1274                .encode_slave_id = true,
1275                .verify = NULL,
1276                .wrapper = mlx4_QUERY_EQ_wrapper
1277        },
1278        {
1279                .opcode = MLX4_CMD_SW2HW_CQ,
1280                .has_inbox = true,
1281                .has_outbox = false,
1282                .out_is_imm = false,
1283                .encode_slave_id = true,
1284                .verify = NULL,
1285                .wrapper = mlx4_SW2HW_CQ_wrapper
1286        },
1287        {
1288                .opcode = MLX4_CMD_HW2SW_CQ,
1289                .has_inbox = false,
1290                .has_outbox = false,
1291                .out_is_imm = false,
1292                .encode_slave_id = false,
1293                .verify = NULL,
1294                .wrapper = mlx4_HW2SW_CQ_wrapper
1295        },
1296        {
1297                .opcode = MLX4_CMD_QUERY_CQ,
1298                .has_inbox = false,
1299                .has_outbox = true,
1300                .out_is_imm = false,
1301                .encode_slave_id = false,
1302                .verify = NULL,
1303                .wrapper = mlx4_QUERY_CQ_wrapper
1304        },
1305        {
1306                .opcode = MLX4_CMD_MODIFY_CQ,
1307                .has_inbox = true,
1308                .has_outbox = false,
1309                .out_is_imm = true,
1310                .encode_slave_id = false,
1311                .verify = NULL,
1312                .wrapper = mlx4_MODIFY_CQ_wrapper
1313        },
1314        {
1315                .opcode = MLX4_CMD_SW2HW_SRQ,
1316                .has_inbox = true,
1317                .has_outbox = false,
1318                .out_is_imm = false,
1319                .encode_slave_id = true,
1320                .verify = NULL,
1321                .wrapper = mlx4_SW2HW_SRQ_wrapper
1322        },
1323        {
1324                .opcode = MLX4_CMD_HW2SW_SRQ,
1325                .has_inbox = false,
1326                .has_outbox = false,
1327                .out_is_imm = false,
1328                .encode_slave_id = false,
1329                .verify = NULL,
1330                .wrapper = mlx4_HW2SW_SRQ_wrapper
1331        },
1332        {
1333                .opcode = MLX4_CMD_QUERY_SRQ,
1334                .has_inbox = false,
1335                .has_outbox = true,
1336                .out_is_imm = false,
1337                .encode_slave_id = false,
1338                .verify = NULL,
1339                .wrapper = mlx4_QUERY_SRQ_wrapper
1340        },
1341        {
1342                .opcode = MLX4_CMD_ARM_SRQ,
1343                .has_inbox = false,
1344                .has_outbox = false,
1345                .out_is_imm = false,
1346                .encode_slave_id = false,
1347                .verify = NULL,
1348                .wrapper = mlx4_ARM_SRQ_wrapper
1349        },
1350        {
1351                .opcode = MLX4_CMD_RST2INIT_QP,
1352                .has_inbox = true,
1353                .has_outbox = false,
1354                .out_is_imm = false,
1355                .encode_slave_id = true,
1356                .verify = NULL,
1357                .wrapper = mlx4_RST2INIT_QP_wrapper
1358        },
1359        {
1360                .opcode = MLX4_CMD_INIT2INIT_QP,
1361                .has_inbox = true,
1362                .has_outbox = false,
1363                .out_is_imm = false,
1364                .encode_slave_id = false,
1365                .verify = NULL,
1366                .wrapper = mlx4_INIT2INIT_QP_wrapper
1367        },
1368        {
1369                .opcode = MLX4_CMD_INIT2RTR_QP,
1370                .has_inbox = true,
1371                .has_outbox = false,
1372                .out_is_imm = false,
1373                .encode_slave_id = false,
1374                .verify = NULL,
1375                .wrapper = mlx4_INIT2RTR_QP_wrapper
1376        },
1377        {
1378                .opcode = MLX4_CMD_RTR2RTS_QP,
1379                .has_inbox = true,
1380                .has_outbox = false,
1381                .out_is_imm = false,
1382                .encode_slave_id = false,
1383                .verify = NULL,
1384                .wrapper = mlx4_RTR2RTS_QP_wrapper
1385        },
1386        {
1387                .opcode = MLX4_CMD_RTS2RTS_QP,
1388                .has_inbox = true,
1389                .has_outbox = false,
1390                .out_is_imm = false,
1391                .encode_slave_id = false,
1392                .verify = NULL,
1393                .wrapper = mlx4_RTS2RTS_QP_wrapper
1394        },
1395        {
1396                .opcode = MLX4_CMD_SQERR2RTS_QP,
1397                .has_inbox = true,
1398                .has_outbox = false,
1399                .out_is_imm = false,
1400                .encode_slave_id = false,
1401                .verify = NULL,
1402                .wrapper = mlx4_SQERR2RTS_QP_wrapper
1403        },
1404        {
1405                .opcode = MLX4_CMD_2ERR_QP,
1406                .has_inbox = false,
1407                .has_outbox = false,
1408                .out_is_imm = false,
1409                .encode_slave_id = false,
1410                .verify = NULL,
1411                .wrapper = mlx4_GEN_QP_wrapper
1412        },
1413        {
1414                .opcode = MLX4_CMD_RTS2SQD_QP,
1415                .has_inbox = false,
1416                .has_outbox = false,
1417                .out_is_imm = false,
1418                .encode_slave_id = false,
1419                .verify = NULL,
1420                .wrapper = mlx4_GEN_QP_wrapper
1421        },
1422        {
1423                .opcode = MLX4_CMD_SQD2SQD_QP,
1424                .has_inbox = true,
1425                .has_outbox = false,
1426                .out_is_imm = false,
1427                .encode_slave_id = false,
1428                .verify = NULL,
1429                .wrapper = mlx4_SQD2SQD_QP_wrapper
1430        },
1431        {
1432                .opcode = MLX4_CMD_SQD2RTS_QP,
1433                .has_inbox = true,
1434                .has_outbox = false,
1435                .out_is_imm = false,
1436                .encode_slave_id = false,
1437                .verify = NULL,
1438                .wrapper = mlx4_SQD2RTS_QP_wrapper
1439        },
1440        {
1441                .opcode = MLX4_CMD_2RST_QP,
1442                .has_inbox = false,
1443                .has_outbox = false,
1444                .out_is_imm = false,
1445                .encode_slave_id = false,
1446                .verify = NULL,
1447                .wrapper = mlx4_2RST_QP_wrapper
1448        },
1449        {
1450                .opcode = MLX4_CMD_QUERY_QP,
1451                .has_inbox = false,
1452                .has_outbox = true,
1453                .out_is_imm = false,
1454                .encode_slave_id = false,
1455                .verify = NULL,
1456                .wrapper = mlx4_GEN_QP_wrapper
1457        },
1458        {
1459                .opcode = MLX4_CMD_SUSPEND_QP,
1460                .has_inbox = false,
1461                .has_outbox = false,
1462                .out_is_imm = false,
1463                .encode_slave_id = false,
1464                .verify = NULL,
1465                .wrapper = mlx4_GEN_QP_wrapper
1466        },
1467        {
1468                .opcode = MLX4_CMD_UNSUSPEND_QP,
1469                .has_inbox = false,
1470                .has_outbox = false,
1471                .out_is_imm = false,
1472                .encode_slave_id = false,
1473                .verify = NULL,
1474                .wrapper = mlx4_GEN_QP_wrapper
1475        },
1476        {
1477                .opcode = MLX4_CMD_UPDATE_QP,
1478                .has_inbox = true,
1479                .has_outbox = false,
1480                .out_is_imm = false,
1481                .encode_slave_id = false,
1482                .verify = NULL,
1483                .wrapper = mlx4_UPDATE_QP_wrapper
1484        },
1485        {
1486                .opcode = MLX4_CMD_GET_OP_REQ,
1487                .has_inbox = false,
1488                .has_outbox = false,
1489                .out_is_imm = false,
1490                .encode_slave_id = false,
1491                .verify = NULL,
1492                .wrapper = mlx4_CMD_EPERM_wrapper,
1493        },
1494        {
1495                .opcode = MLX4_CMD_ALLOCATE_VPP,
1496                .has_inbox = false,
1497                .has_outbox = true,
1498                .out_is_imm = false,
1499                .encode_slave_id = false,
1500                .verify = NULL,
1501                .wrapper = mlx4_CMD_EPERM_wrapper,
1502        },
1503        {
1504                .opcode = MLX4_CMD_SET_VPORT_QOS,
1505                .has_inbox = false,
1506                .has_outbox = true,
1507                .out_is_imm = false,
1508                .encode_slave_id = false,
1509                .verify = NULL,
1510                .wrapper = mlx4_CMD_EPERM_wrapper,
1511        },
1512        {
1513                .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1514                .has_inbox = false,
1515                .has_outbox = false,
1516                .out_is_imm = false,
1517                .encode_slave_id = false,
1518                .verify = NULL, /* XXX verify: only demux can do this */
1519                .wrapper = NULL
1520        },
1521        {
1522                .opcode = MLX4_CMD_MAD_IFC,
1523                .has_inbox = true,
1524                .has_outbox = true,
1525                .out_is_imm = false,
1526                .encode_slave_id = false,
1527                .verify = NULL,
1528                .wrapper = mlx4_MAD_IFC_wrapper
1529        },
1530        {
1531                .opcode = MLX4_CMD_MAD_DEMUX,
1532                .has_inbox = false,
1533                .has_outbox = false,
1534                .out_is_imm = false,
1535                .encode_slave_id = false,
1536                .verify = NULL,
1537                .wrapper = mlx4_CMD_EPERM_wrapper
1538        },
1539        {
1540                .opcode = MLX4_CMD_QUERY_IF_STAT,
1541                .has_inbox = false,
1542                .has_outbox = true,
1543                .out_is_imm = false,
1544                .encode_slave_id = false,
1545                .verify = NULL,
1546                .wrapper = mlx4_QUERY_IF_STAT_wrapper
1547        },
1548        {
1549                .opcode = MLX4_CMD_ACCESS_REG,
1550                .has_inbox = true,
1551                .has_outbox = true,
1552                .out_is_imm = false,
1553                .encode_slave_id = false,
1554                .verify = NULL,
1555                .wrapper = mlx4_ACCESS_REG_wrapper,
1556        },
1557        {
1558                .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
1559                .has_inbox = false,
1560                .has_outbox = false,
1561                .out_is_imm = false,
1562                .encode_slave_id = false,
1563                .verify = NULL,
1564                .wrapper = mlx4_CMD_EPERM_wrapper,
1565        },
1566        /* Native multicast commands are not available for guests */
1567        {
1568                .opcode = MLX4_CMD_QP_ATTACH,
1569                .has_inbox = true,
1570                .has_outbox = false,
1571                .out_is_imm = false,
1572                .encode_slave_id = false,
1573                .verify = NULL,
1574                .wrapper = mlx4_QP_ATTACH_wrapper
1575        },
1576        {
1577                .opcode = MLX4_CMD_PROMISC,
1578                .has_inbox = false,
1579                .has_outbox = false,
1580                .out_is_imm = false,
1581                .encode_slave_id = false,
1582                .verify = NULL,
1583                .wrapper = mlx4_PROMISC_wrapper
1584        },
1585        /* Ethernet specific commands */
1586        {
1587                .opcode = MLX4_CMD_SET_VLAN_FLTR,
1588                .has_inbox = true,
1589                .has_outbox = false,
1590                .out_is_imm = false,
1591                .encode_slave_id = false,
1592                .verify = NULL,
1593                .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1594        },
1595        {
1596                .opcode = MLX4_CMD_SET_MCAST_FLTR,
1597                .has_inbox = false,
1598                .has_outbox = false,
1599                .out_is_imm = false,
1600                .encode_slave_id = false,
1601                .verify = NULL,
1602                .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1603        },
1604        {
1605                .opcode = MLX4_CMD_DUMP_ETH_STATS,
1606                .has_inbox = false,
1607                .has_outbox = true,
1608                .out_is_imm = false,
1609                .encode_slave_id = false,
1610                .verify = NULL,
1611                .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1612        },
1613        {
1614                .opcode = MLX4_CMD_INFORM_FLR_DONE,
1615                .has_inbox = false,
1616                .has_outbox = false,
1617                .out_is_imm = false,
1618                .encode_slave_id = false,
1619                .verify = NULL,
1620                .wrapper = NULL
1621        },
1622        /* flow steering commands */
1623        {
1624                .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1625                .has_inbox = true,
1626                .has_outbox = false,
1627                .out_is_imm = true,
1628                .encode_slave_id = false,
1629                .verify = NULL,
1630                .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1631        },
1632        {
1633                .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1634                .has_inbox = false,
1635                .has_outbox = false,
1636                .out_is_imm = false,
1637                .encode_slave_id = false,
1638                .verify = NULL,
1639                .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1640        },
1641        {
1642                .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1643                .has_inbox = false,
1644                .has_outbox = false,
1645                .out_is_imm = false,
1646                .encode_slave_id = false,
1647                .verify = NULL,
1648                .wrapper = mlx4_CMD_EPERM_wrapper
1649        },
1650        {
1651                .opcode = MLX4_CMD_VIRT_PORT_MAP,
1652                .has_inbox = false,
1653                .has_outbox = false,
1654                .out_is_imm = false,
1655                .encode_slave_id = false,
1656                .verify = NULL,
1657                .wrapper = mlx4_CMD_EPERM_wrapper
1658        },
1659};
1660
1661static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1662                                    struct mlx4_vhcr_cmd *in_vhcr)
1663{
1664        struct mlx4_priv *priv = mlx4_priv(dev);
1665        struct mlx4_cmd_info *cmd = NULL;
1666        struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1667        struct mlx4_vhcr *vhcr;
1668        struct mlx4_cmd_mailbox *inbox = NULL;
1669        struct mlx4_cmd_mailbox *outbox = NULL;
1670        u64 in_param;
1671        u64 out_param;
1672        int ret = 0;
1673        int i;
1674        int err = 0;
1675
1676        /* Create sw representation of Virtual HCR */
1677        vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1678        if (!vhcr)
1679                return -ENOMEM;
1680
1681        /* DMA in the vHCR */
1682        if (!in_vhcr) {
1683                ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1684                                      priv->mfunc.master.slave_state[slave].vhcr_dma,
1685                                      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1686                                            MLX4_ACCESS_MEM_ALIGN), 1);
1687                if (ret) {
1688                        if (!(dev->persist->state &
1689                            MLX4_DEVICE_STATE_INTERNAL_ERROR))
1690                                mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1691                                         __func__, ret);
1692                        kfree(vhcr);
1693                        return ret;
1694                }
1695        }
1696
1697        /* Fill SW VHCR fields */
1698        vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1699        vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1700        vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1701        vhcr->token = be16_to_cpu(vhcr_cmd->token);
1702        vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1703        vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1704        vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1705
1706        /* Lookup command */
1707        for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1708                if (vhcr->op == cmd_info[i].opcode) {
1709                        cmd = &cmd_info[i];
1710                        break;
1711                }
1712        }
1713        if (!cmd) {
1714                mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1715                         vhcr->op, slave);
1716                vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1717                goto out_status;
1718        }
1719
1720        /* Read inbox */
1721        if (cmd->has_inbox) {
1722                vhcr->in_param &= INBOX_MASK;
1723                inbox = mlx4_alloc_cmd_mailbox(dev);
1724                if (IS_ERR(inbox)) {
1725                        vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1726                        inbox = NULL;
1727                        goto out_status;
1728                }
1729
1730                ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1731                                      vhcr->in_param,
1732                                      MLX4_MAILBOX_SIZE, 1);
1733                if (ret) {
1734                        if (!(dev->persist->state &
1735                            MLX4_DEVICE_STATE_INTERNAL_ERROR))
1736                                mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1737                                         __func__, cmd->opcode);
1738                        vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1739                        goto out_status;
1740                }
1741        }
1742
1743        /* Apply permission and bound checks if applicable */
1744        if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1745                mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1746                          vhcr->op, slave, vhcr->in_modifier);
1747                vhcr_cmd->status = CMD_STAT_BAD_OP;
1748                goto out_status;
1749        }
1750
1751        /* Allocate outbox */
1752        if (cmd->has_outbox) {
1753                outbox = mlx4_alloc_cmd_mailbox(dev);
1754                if (IS_ERR(outbox)) {
1755                        vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1756                        outbox = NULL;
1757                        goto out_status;
1758                }
1759        }
1760
1761        /* Execute the command! */
1762        if (cmd->wrapper) {
1763                err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1764                                   cmd);
1765                if (cmd->out_is_imm)
1766                        vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1767        } else {
1768                in_param = cmd->has_inbox ? (u64) inbox->dma :
1769                        vhcr->in_param;
1770                out_param = cmd->has_outbox ? (u64) outbox->dma :
1771                        vhcr->out_param;
1772                err = __mlx4_cmd(dev, in_param, &out_param,
1773                                 cmd->out_is_imm, vhcr->in_modifier,
1774                                 vhcr->op_modifier, vhcr->op,
1775                                 MLX4_CMD_TIME_CLASS_A,
1776                                 MLX4_CMD_NATIVE);
1777
1778                if (cmd->out_is_imm) {
1779                        vhcr->out_param = out_param;
1780                        vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1781                }
1782        }
1783
1784        if (err) {
1785                if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
1786                        mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1787                                  vhcr->op, slave, vhcr->errno, err);
1788                vhcr_cmd->status = mlx4_errno_to_status(err);
1789                goto out_status;
1790        }
1791
1792
1793        /* Write outbox if command completed successfully */
1794        if (cmd->has_outbox && !vhcr_cmd->status) {
1795                ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1796                                      vhcr->out_param,
1797                                      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1798                if (ret) {
1799                        /* If we failed to write back the outbox after the
1800                         *command was successfully executed, we must fail this
1801                         * slave, as it is now in undefined state */
1802                        if (!(dev->persist->state &
1803                            MLX4_DEVICE_STATE_INTERNAL_ERROR))
1804                                mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1805                        goto out;
1806                }
1807        }
1808
1809out_status:
1810        /* DMA back vhcr result */
1811        if (!in_vhcr) {
1812                ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1813                                      priv->mfunc.master.slave_state[slave].vhcr_dma,
1814                                      ALIGN(sizeof(struct mlx4_vhcr),
1815                                            MLX4_ACCESS_MEM_ALIGN),
1816                                      MLX4_CMD_WRAPPED);
1817                if (ret)
1818                        mlx4_err(dev, "%s:Failed writing vhcr result\n",
1819                                 __func__);
1820                else if (vhcr->e_bit &&
1821                         mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1822                                mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1823                                          slave);
1824        }
1825
1826out:
1827        kfree(vhcr);
1828        mlx4_free_cmd_mailbox(dev, inbox);
1829        mlx4_free_cmd_mailbox(dev, outbox);
1830        return ret;
1831}
1832
1833static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1834                                            int slave, int port)
1835{
1836        struct mlx4_vport_oper_state *vp_oper;
1837        struct mlx4_vport_state *vp_admin;
1838        struct mlx4_vf_immed_vlan_work *work;
1839        struct mlx4_dev *dev = &(priv->dev);
1840        int err;
1841        int admin_vlan_ix = NO_INDX;
1842
1843        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1844        vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1845
1846        if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1847            vp_oper->state.default_qos == vp_admin->default_qos &&
1848            vp_oper->state.link_state == vp_admin->link_state &&
1849            vp_oper->state.qos_vport == vp_admin->qos_vport)
1850                return 0;
1851
1852        if (!(priv->mfunc.master.slave_state[slave].active &&
1853              dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1854                /* even if the UPDATE_QP command isn't supported, we still want
1855                 * to set this VF link according to the admin directive
1856                 */
1857                vp_oper->state.link_state = vp_admin->link_state;
1858                return -1;
1859        }
1860
1861        mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1862                 slave, port);
1863        mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1864                 vp_admin->default_vlan, vp_admin->default_qos,
1865                 vp_admin->link_state);
1866
1867        work = kzalloc(sizeof(*work), GFP_KERNEL);
1868        if (!work)
1869                return -ENOMEM;
1870
1871        if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1872                if (MLX4_VGT != vp_admin->default_vlan) {
1873                        err = __mlx4_register_vlan(&priv->dev, port,
1874                                                   vp_admin->default_vlan,
1875                                                   &admin_vlan_ix);
1876                        if (err) {
1877                                kfree(work);
1878                                mlx4_warn(&priv->dev,
1879                                          "No vlan resources slave %d, port %d\n",
1880                                          slave, port);
1881                                return err;
1882                        }
1883                } else {
1884                        admin_vlan_ix = NO_INDX;
1885                }
1886                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1887                mlx4_dbg(&priv->dev,
1888                         "alloc vlan %d idx  %d slave %d port %d\n",
1889                         (int)(vp_admin->default_vlan),
1890                         admin_vlan_ix, slave, port);
1891        }
1892
1893        /* save original vlan ix and vlan id */
1894        work->orig_vlan_id = vp_oper->state.default_vlan;
1895        work->orig_vlan_ix = vp_oper->vlan_idx;
1896
1897        /* handle new qos */
1898        if (vp_oper->state.default_qos != vp_admin->default_qos)
1899                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1900
1901        if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1902                vp_oper->vlan_idx = admin_vlan_ix;
1903
1904        vp_oper->state.default_vlan = vp_admin->default_vlan;
1905        vp_oper->state.default_qos = vp_admin->default_qos;
1906        vp_oper->state.link_state = vp_admin->link_state;
1907        vp_oper->state.qos_vport = vp_admin->qos_vport;
1908
1909        if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1910                work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1911
1912        /* iterate over QPs owned by this slave, using UPDATE_QP */
1913        work->port = port;
1914        work->slave = slave;
1915        work->qos = vp_oper->state.default_qos;
1916        work->qos_vport = vp_oper->state.qos_vport;
1917        work->vlan_id = vp_oper->state.default_vlan;
1918        work->vlan_ix = vp_oper->vlan_idx;
1919        work->priv = priv;
1920        INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1921        queue_work(priv->mfunc.master.comm_wq, &work->work);
1922
1923        return 0;
1924}
1925
1926static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
1927{
1928        struct mlx4_qos_manager *port_qos_ctl;
1929        struct mlx4_priv *priv = mlx4_priv(dev);
1930
1931        port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
1932        bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
1933
1934        /* Enable only default prio at PF init routine */
1935        set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
1936}
1937
1938static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
1939{
1940        int i;
1941        int err;
1942        int num_vfs;
1943        u16 availible_vpp;
1944        u8 vpp_param[MLX4_NUM_UP];
1945        struct mlx4_qos_manager *port_qos;
1946        struct mlx4_priv *priv = mlx4_priv(dev);
1947
1948        err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1949        if (err) {
1950                mlx4_info(dev, "Failed query availible VPPs\n");
1951                return;
1952        }
1953
1954        port_qos = &priv->mfunc.master.qos_ctl[port];
1955        num_vfs = (availible_vpp /
1956                   bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
1957
1958        for (i = 0; i < MLX4_NUM_UP; i++) {
1959                if (test_bit(i, port_qos->priority_bm))
1960                        vpp_param[i] = num_vfs;
1961        }
1962
1963        err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
1964        if (err) {
1965                mlx4_info(dev, "Failed allocating VPPs\n");
1966                return;
1967        }
1968
1969        /* Query actual allocated VPP, just to make sure */
1970        err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1971        if (err) {
1972                mlx4_info(dev, "Failed query availible VPPs\n");
1973                return;
1974        }
1975
1976        port_qos->num_of_qos_vfs = num_vfs;
1977        mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp);
1978
1979        for (i = 0; i < MLX4_NUM_UP; i++)
1980                mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
1981                         vpp_param[i]);
1982}
1983
1984static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1985{
1986        int port, err;
1987        struct mlx4_vport_state *vp_admin;
1988        struct mlx4_vport_oper_state *vp_oper;
1989        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1990                        &priv->dev, slave);
1991        int min_port = find_first_bit(actv_ports.ports,
1992                                      priv->dev.caps.num_ports) + 1;
1993        int max_port = min_port - 1 +
1994                bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1995
1996        for (port = min_port; port <= max_port; port++) {
1997                if (!test_bit(port - 1, actv_ports.ports))
1998                        continue;
1999                priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2000                        priv->mfunc.master.vf_admin[slave].enable_smi[port];
2001                vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2002                vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2003                vp_oper->state = *vp_admin;
2004                if (MLX4_VGT != vp_admin->default_vlan) {
2005                        err = __mlx4_register_vlan(&priv->dev, port,
2006                                                   vp_admin->default_vlan, &(vp_oper->vlan_idx));
2007                        if (err) {
2008                                vp_oper->vlan_idx = NO_INDX;
2009                                mlx4_warn(&priv->dev,
2010                                          "No vlan resources slave %d, port %d\n",
2011                                          slave, port);
2012                                return err;
2013                        }
2014                        mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
2015                                 (int)(vp_oper->state.default_vlan),
2016                                 vp_oper->vlan_idx, slave, port);
2017                }
2018                if (vp_admin->spoofchk) {
2019                        vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
2020                                                               port,
2021                                                               vp_admin->mac);
2022                        if (0 > vp_oper->mac_idx) {
2023                                err = vp_oper->mac_idx;
2024                                vp_oper->mac_idx = NO_INDX;
2025                                mlx4_warn(&priv->dev,
2026                                          "No mac resources slave %d, port %d\n",
2027                                          slave, port);
2028                                return err;
2029                        }
2030                        mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
2031                                 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
2032                }
2033        }
2034        return 0;
2035}
2036
2037static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
2038{
2039        int port;
2040        struct mlx4_vport_oper_state *vp_oper;
2041        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2042                        &priv->dev, slave);
2043        int min_port = find_first_bit(actv_ports.ports,
2044                                      priv->dev.caps.num_ports) + 1;
2045        int max_port = min_port - 1 +
2046                bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2047
2048
2049        for (port = min_port; port <= max_port; port++) {
2050                if (!test_bit(port - 1, actv_ports.ports))
2051                        continue;
2052                priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2053                        MLX4_VF_SMI_DISABLED;
2054                vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2055                if (NO_INDX != vp_oper->vlan_idx) {
2056                        __mlx4_unregister_vlan(&priv->dev,
2057                                               port, vp_oper->state.default_vlan);
2058                        vp_oper->vlan_idx = NO_INDX;
2059                }
2060                if (NO_INDX != vp_oper->mac_idx) {
2061                        __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
2062                        vp_oper->mac_idx = NO_INDX;
2063                }
2064        }
2065        return;
2066}
2067
2068static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
2069                               u16 param, u8 toggle)
2070{
2071        struct mlx4_priv *priv = mlx4_priv(dev);
2072        struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2073        u32 reply;
2074        u8 is_going_down = 0;
2075        int i;
2076        unsigned long flags;
2077
2078        slave_state[slave].comm_toggle ^= 1;
2079        reply = (u32) slave_state[slave].comm_toggle << 31;
2080        if (toggle != slave_state[slave].comm_toggle) {
2081                mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2082                          toggle, slave);
2083                goto reset_slave;
2084        }
2085        if (cmd == MLX4_COMM_CMD_RESET) {
2086                mlx4_warn(dev, "Received reset from slave:%d\n", slave);
2087                slave_state[slave].active = false;
2088                slave_state[slave].old_vlan_api = false;
2089                mlx4_master_deactivate_admin_state(priv, slave);
2090                for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
2091                                slave_state[slave].event_eq[i].eqn = -1;
2092                                slave_state[slave].event_eq[i].token = 0;
2093                }
2094                /*check if we are in the middle of FLR process,
2095                if so return "retry" status to the slave*/
2096                if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
2097                        goto inform_slave_state;
2098
2099                mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
2100
2101                /* write the version in the event field */
2102                reply |= mlx4_comm_get_version();
2103
2104                goto reset_slave;
2105        }
2106        /*command from slave in the middle of FLR*/
2107        if (cmd != MLX4_COMM_CMD_RESET &&
2108            MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
2109                mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2110                          slave, cmd);
2111                return;
2112        }
2113
2114        switch (cmd) {
2115        case MLX4_COMM_CMD_VHCR0:
2116                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
2117                        goto reset_slave;
2118                slave_state[slave].vhcr_dma = ((u64) param) << 48;
2119                priv->mfunc.master.slave_state[slave].cookie = 0;
2120                break;
2121        case MLX4_COMM_CMD_VHCR1:
2122                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
2123                        goto reset_slave;
2124                slave_state[slave].vhcr_dma |= ((u64) param) << 32;
2125                break;
2126        case MLX4_COMM_CMD_VHCR2:
2127                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
2128                        goto reset_slave;
2129                slave_state[slave].vhcr_dma |= ((u64) param) << 16;
2130                break;
2131        case MLX4_COMM_CMD_VHCR_EN:
2132                if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
2133                        goto reset_slave;
2134                slave_state[slave].vhcr_dma |= param;
2135                if (mlx4_master_activate_admin_state(priv, slave))
2136                                goto reset_slave;
2137                slave_state[slave].active = true;
2138                mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
2139                break;
2140        case MLX4_COMM_CMD_VHCR_POST:
2141                if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
2142                    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2143                        mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2144                                  slave, cmd, slave_state[slave].last_cmd);
2145                        goto reset_slave;
2146                }
2147
2148                mutex_lock(&priv->cmd.slave_cmd_mutex);
2149                if (mlx4_master_process_vhcr(dev, slave, NULL)) {
2150                        mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
2151                                 slave);
2152                        mutex_unlock(&priv->cmd.slave_cmd_mutex);
2153                        goto reset_slave;
2154                }
2155                mutex_unlock(&priv->cmd.slave_cmd_mutex);
2156                break;
2157        default:
2158                mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
2159                goto reset_slave;
2160        }
2161        spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2162        if (!slave_state[slave].is_slave_going_down)
2163                slave_state[slave].last_cmd = cmd;
2164        else
2165                is_going_down = 1;
2166        spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2167        if (is_going_down) {
2168                mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
2169                          cmd, slave);
2170                return;
2171        }
2172        __raw_writel((__force u32) cpu_to_be32(reply),
2173                     &priv->mfunc.comm[slave].slave_read);
2174        mmiowb();
2175
2176        return;
2177
2178reset_slave:
2179        /* cleanup any slave resources */
2180        if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2181                mlx4_delete_all_resources_for_slave(dev, slave);
2182
2183        if (cmd != MLX4_COMM_CMD_RESET) {
2184                mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2185                          slave, cmd);
2186                /* Turn on internal error letting slave reset itself immeditaly,
2187                 * otherwise it might take till timeout on command is passed
2188                 */
2189                reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2190        }
2191
2192        spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2193        if (!slave_state[slave].is_slave_going_down)
2194                slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2195        spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2196        /*with slave in the middle of flr, no need to clean resources again.*/
2197inform_slave_state:
2198        memset(&slave_state[slave].event_eq, 0,
2199               sizeof(struct mlx4_slave_event_eq_info));
2200        __raw_writel((__force u32) cpu_to_be32(reply),
2201                     &priv->mfunc.comm[slave].slave_read);
2202        wmb();
2203}
2204
2205/* master command processing */
2206void mlx4_master_comm_channel(struct work_struct *work)
2207{
2208        struct mlx4_mfunc_master_ctx *master =
2209                container_of(work,
2210                             struct mlx4_mfunc_master_ctx,
2211                             comm_work);
2212        struct mlx4_mfunc *mfunc =
2213                container_of(master, struct mlx4_mfunc, master);
2214        struct mlx4_priv *priv =
2215                container_of(mfunc, struct mlx4_priv, mfunc);
2216        struct mlx4_dev *dev = &priv->dev;
2217        __be32 *bit_vec;
2218        u32 comm_cmd;
2219        u32 vec;
2220        int i, j, slave;
2221        int toggle;
2222        int served = 0;
2223        int reported = 0;
2224        u32 slt;
2225
2226        bit_vec = master->comm_arm_bit_vector;
2227        for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2228                vec = be32_to_cpu(bit_vec[i]);
2229                for (j = 0; j < 32; j++) {
2230                        if (!(vec & (1 << j)))
2231                                continue;
2232                        ++reported;
2233                        slave = (i * 32) + j;
2234                        comm_cmd = swab32(readl(
2235                                          &mfunc->comm[slave].slave_write));
2236                        slt = swab32(readl(&mfunc->comm[slave].slave_read))
2237                                     >> 31;
2238                        toggle = comm_cmd >> 31;
2239                        if (toggle != slt) {
2240                                if (master->slave_state[slave].comm_toggle
2241                                    != slt) {
2242                                        pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2243                                                slave, slt,
2244                                                master->slave_state[slave].comm_toggle);
2245                                        master->slave_state[slave].comm_toggle =
2246                                                slt;
2247                                }
2248                                mlx4_master_do_cmd(dev, slave,
2249                                                   comm_cmd >> 16 & 0xff,
2250                                                   comm_cmd & 0xffff, toggle);
2251                                ++served;
2252                        }
2253                }
2254        }
2255
2256        if (reported && reported != served)
2257                mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
2258                          reported, served);
2259
2260        if (mlx4_ARM_COMM_CHANNEL(dev))
2261                mlx4_warn(dev, "Failed to arm comm channel events\n");
2262}
2263
2264static int sync_toggles(struct mlx4_dev *dev)
2265{
2266        struct mlx4_priv *priv = mlx4_priv(dev);
2267        u32 wr_toggle;
2268        u32 rd_toggle;
2269        unsigned long end;
2270
2271        wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2272        if (wr_toggle == 0xffffffff)
2273                end = jiffies + msecs_to_jiffies(30000);
2274        else
2275                end = jiffies + msecs_to_jiffies(5000);
2276
2277        while (time_before(jiffies, end)) {
2278                rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2279                if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2280                        /* PCI might be offline */
2281                        msleep(100);
2282                        wr_toggle = swab32(readl(&priv->mfunc.comm->
2283                                           slave_write));
2284                        continue;
2285                }
2286
2287                if (rd_toggle >> 31 == wr_toggle >> 31) {
2288                        priv->cmd.comm_toggle = rd_toggle >> 31;
2289                        return 0;
2290                }
2291
2292                cond_resched();
2293        }
2294
2295        /*
2296         * we could reach here if for example the previous VM using this
2297         * function misbehaved and left the channel with unsynced state. We
2298         * should fix this here and give this VM a chance to use a properly
2299         * synced channel
2300         */
2301        mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2302        __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2303        __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2304        priv->cmd.comm_toggle = 0;
2305
2306        return 0;
2307}
2308
2309int mlx4_multi_func_init(struct mlx4_dev *dev)
2310{
2311        struct mlx4_priv *priv = mlx4_priv(dev);
2312        struct mlx4_slave_state *s_state;
2313        int i, j, err, port;
2314
2315        if (mlx4_is_master(dev))
2316                priv->mfunc.comm =
2317                ioremap(pci_resource_start(dev->persist->pdev,
2318                                           priv->fw.comm_bar) +
2319                        priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2320        else
2321                priv->mfunc.comm =
2322                ioremap(pci_resource_start(dev->persist->pdev, 2) +
2323                        MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2324        if (!priv->mfunc.comm) {
2325                mlx4_err(dev, "Couldn't map communication vector\n");
2326                goto err_vhcr;
2327        }
2328
2329        if (mlx4_is_master(dev)) {
2330                struct mlx4_vf_oper_state *vf_oper;
2331                struct mlx4_vf_admin_state *vf_admin;
2332
2333                priv->mfunc.master.slave_state =
2334                        kzalloc(dev->num_slaves *
2335                                sizeof(struct mlx4_slave_state), GFP_KERNEL);
2336                if (!priv->mfunc.master.slave_state)
2337                        goto err_comm;
2338
2339                priv->mfunc.master.vf_admin =
2340                        kzalloc(dev->num_slaves *
2341                                sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2342                if (!priv->mfunc.master.vf_admin)
2343                        goto err_comm_admin;
2344
2345                priv->mfunc.master.vf_oper =
2346                        kzalloc(dev->num_slaves *
2347                                sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2348                if (!priv->mfunc.master.vf_oper)
2349                        goto err_comm_oper;
2350
2351                for (i = 0; i < dev->num_slaves; ++i) {
2352                        vf_admin = &priv->mfunc.master.vf_admin[i];
2353                        vf_oper = &priv->mfunc.master.vf_oper[i];
2354                        s_state = &priv->mfunc.master.slave_state[i];
2355                        s_state->last_cmd = MLX4_COMM_CMD_RESET;
2356                        mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2357                        for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2358                                s_state->event_eq[j].eqn = -1;
2359                        __raw_writel((__force u32) 0,
2360                                     &priv->mfunc.comm[i].slave_write);
2361                        __raw_writel((__force u32) 0,
2362                                     &priv->mfunc.comm[i].slave_read);
2363                        mmiowb();
2364                        for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2365                                struct mlx4_vport_state *admin_vport;
2366                                struct mlx4_vport_state *oper_vport;
2367
2368                                s_state->vlan_filter[port] =
2369                                        kzalloc(sizeof(struct mlx4_vlan_fltr),
2370                                                GFP_KERNEL);
2371                                if (!s_state->vlan_filter[port]) {
2372                                        if (--port)
2373                                                kfree(s_state->vlan_filter[port]);
2374                                        goto err_slaves;
2375                                }
2376
2377                                admin_vport = &vf_admin->vport[port];
2378                                oper_vport = &vf_oper->vport[port].state;
2379                                INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2380                                admin_vport->default_vlan = MLX4_VGT;
2381                                oper_vport->default_vlan = MLX4_VGT;
2382                                admin_vport->qos_vport =
2383                                                MLX4_VPP_DEFAULT_VPORT;
2384                                oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
2385                                vf_oper->vport[port].vlan_idx = NO_INDX;
2386                                vf_oper->vport[port].mac_idx = NO_INDX;
2387                                mlx4_set_random_admin_guid(dev, i, port);
2388                        }
2389                        spin_lock_init(&s_state->lock);
2390                }
2391
2392                if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
2393                        for (port = 1; port <= dev->caps.num_ports; port++) {
2394                                if (mlx4_is_eth(dev, port)) {
2395                                        mlx4_set_default_port_qos(dev, port);
2396                                        mlx4_allocate_port_vpps(dev, port);
2397                                }
2398                        }
2399                }
2400
2401                memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
2402                priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2403                INIT_WORK(&priv->mfunc.master.comm_work,
2404                          mlx4_master_comm_channel);
2405                INIT_WORK(&priv->mfunc.master.slave_event_work,
2406                          mlx4_gen_slave_eqe);
2407                INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2408                          mlx4_master_handle_slave_flr);
2409                spin_lock_init(&priv->mfunc.master.slave_state_lock);
2410                spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2411                priv->mfunc.master.comm_wq =
2412                        create_singlethread_workqueue("mlx4_comm");
2413                if (!priv->mfunc.master.comm_wq)
2414                        goto err_slaves;
2415
2416                if (mlx4_init_resource_tracker(dev))
2417                        goto err_thread;
2418
2419        } else {
2420                err = sync_toggles(dev);
2421                if (err) {
2422                        mlx4_err(dev, "Couldn't sync toggles\n");
2423                        goto err_comm;
2424                }
2425        }
2426        return 0;
2427
2428err_thread:
2429        flush_workqueue(priv->mfunc.master.comm_wq);
2430        destroy_workqueue(priv->mfunc.master.comm_wq);
2431err_slaves:
2432        while (--i) {
2433                for (port = 1; port <= MLX4_MAX_PORTS; port++)
2434                        kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2435        }
2436        kfree(priv->mfunc.master.vf_oper);
2437err_comm_oper:
2438        kfree(priv->mfunc.master.vf_admin);
2439err_comm_admin:
2440        kfree(priv->mfunc.master.slave_state);
2441err_comm:
2442        iounmap(priv->mfunc.comm);
2443err_vhcr:
2444        dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2445                          priv->mfunc.vhcr,
2446                          priv->mfunc.vhcr_dma);
2447        priv->mfunc.vhcr = NULL;
2448        return -ENOMEM;
2449}
2450
2451int mlx4_cmd_init(struct mlx4_dev *dev)
2452{
2453        struct mlx4_priv *priv = mlx4_priv(dev);
2454        int flags = 0;
2455
2456        if (!priv->cmd.initialized) {
2457                mutex_init(&priv->cmd.slave_cmd_mutex);
2458                sema_init(&priv->cmd.poll_sem, 1);
2459                priv->cmd.use_events = 0;
2460                priv->cmd.toggle     = 1;
2461                priv->cmd.initialized = 1;
2462                flags |= MLX4_CMD_CLEANUP_STRUCT;
2463        }
2464
2465        if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2466                priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2467                                        0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2468                if (!priv->cmd.hcr) {
2469                        mlx4_err(dev, "Couldn't map command register\n");
2470                        goto err;
2471                }
2472                flags |= MLX4_CMD_CLEANUP_HCR;
2473        }
2474
2475        if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2476                priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2477                                                      PAGE_SIZE,
2478                                                      &priv->mfunc.vhcr_dma,
2479                                                      GFP_KERNEL);
2480                if (!priv->mfunc.vhcr)
2481                        goto err;
2482
2483                flags |= MLX4_CMD_CLEANUP_VHCR;
2484        }
2485
2486        if (!priv->cmd.pool) {
2487                priv->cmd.pool = pci_pool_create("mlx4_cmd",
2488                                                 dev->persist->pdev,
2489                                                 MLX4_MAILBOX_SIZE,
2490                                                 MLX4_MAILBOX_SIZE, 0);
2491                if (!priv->cmd.pool)
2492                        goto err;
2493
2494                flags |= MLX4_CMD_CLEANUP_POOL;
2495        }
2496
2497        return 0;
2498
2499err:
2500        mlx4_cmd_cleanup(dev, flags);
2501        return -ENOMEM;
2502}
2503
2504void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2505{
2506        struct mlx4_priv *priv = mlx4_priv(dev);
2507        int slave;
2508        u32 slave_read;
2509
2510        /* Report an internal error event to all
2511         * communication channels.
2512         */
2513        for (slave = 0; slave < dev->num_slaves; slave++) {
2514                slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2515                slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2516                __raw_writel((__force u32)cpu_to_be32(slave_read),
2517                             &priv->mfunc.comm[slave].slave_read);
2518                /* Make sure that our comm channel write doesn't
2519                 * get mixed in with writes from another CPU.
2520                 */
2521                mmiowb();
2522        }
2523}
2524
2525void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2526{
2527        struct mlx4_priv *priv = mlx4_priv(dev);
2528        int i, port;
2529
2530        if (mlx4_is_master(dev)) {
2531                flush_workqueue(priv->mfunc.master.comm_wq);
2532                destroy_workqueue(priv->mfunc.master.comm_wq);
2533                for (i = 0; i < dev->num_slaves; i++) {
2534                        for (port = 1; port <= MLX4_MAX_PORTS; port++)
2535                                kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2536                }
2537                kfree(priv->mfunc.master.slave_state);
2538                kfree(priv->mfunc.master.vf_admin);
2539                kfree(priv->mfunc.master.vf_oper);
2540                dev->num_slaves = 0;
2541        }
2542
2543        iounmap(priv->mfunc.comm);
2544}
2545
2546void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2547{
2548        struct mlx4_priv *priv = mlx4_priv(dev);
2549
2550        if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2551                pci_pool_destroy(priv->cmd.pool);
2552                priv->cmd.pool = NULL;
2553        }
2554
2555        if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2556            (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2557                iounmap(priv->cmd.hcr);
2558                priv->cmd.hcr = NULL;
2559        }
2560        if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2561            (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2562                dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2563                                  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2564                priv->mfunc.vhcr = NULL;
2565        }
2566        if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2567                priv->cmd.initialized = 0;
2568}
2569
2570/*
2571 * Switch to using events to issue FW commands (can only be called
2572 * after event queue for command events has been initialized).
2573 */
2574int mlx4_cmd_use_events(struct mlx4_dev *dev)
2575{
2576        struct mlx4_priv *priv = mlx4_priv(dev);
2577        int i;
2578        int err = 0;
2579
2580        priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2581                                   sizeof (struct mlx4_cmd_context),
2582                                   GFP_KERNEL);
2583        if (!priv->cmd.context)
2584                return -ENOMEM;
2585
2586        for (i = 0; i < priv->cmd.max_cmds; ++i) {
2587                priv->cmd.context[i].token = i;
2588                priv->cmd.context[i].next  = i + 1;
2589                /* To support fatal error flow, initialize all
2590                 * cmd contexts to allow simulating completions
2591                 * with complete() at any time.
2592                 */
2593                init_completion(&priv->cmd.context[i].done);
2594        }
2595
2596        priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2597        priv->cmd.free_head = 0;
2598
2599        sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2600        spin_lock_init(&priv->cmd.context_lock);
2601
2602        for (priv->cmd.token_mask = 1;
2603             priv->cmd.token_mask < priv->cmd.max_cmds;
2604             priv->cmd.token_mask <<= 1)
2605                ; /* nothing */
2606        --priv->cmd.token_mask;
2607
2608        down(&priv->cmd.poll_sem);
2609        priv->cmd.use_events = 1;
2610
2611        return err;
2612}
2613
2614/*
2615 * Switch back to polling (used when shutting down the device)
2616 */
2617void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2618{
2619        struct mlx4_priv *priv = mlx4_priv(dev);
2620        int i;
2621
2622        priv->cmd.use_events = 0;
2623
2624        for (i = 0; i < priv->cmd.max_cmds; ++i)
2625                down(&priv->cmd.event_sem);
2626
2627        kfree(priv->cmd.context);
2628
2629        up(&priv->cmd.poll_sem);
2630}
2631
2632struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2633{
2634        struct mlx4_cmd_mailbox *mailbox;
2635
2636        mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2637        if (!mailbox)
2638                return ERR_PTR(-ENOMEM);
2639
2640        mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2641                                      &mailbox->dma);
2642        if (!mailbox->buf) {
2643                kfree(mailbox);
2644                return ERR_PTR(-ENOMEM);
2645        }
2646
2647        memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2648
2649        return mailbox;
2650}
2651EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2652
2653void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2654                           struct mlx4_cmd_mailbox *mailbox)
2655{
2656        if (!mailbox)
2657                return;
2658
2659        pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2660        kfree(mailbox);
2661}
2662EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2663
2664u32 mlx4_comm_get_version(void)
2665{
2666         return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2667}
2668
2669static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2670{
2671        if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2672                mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2673                         vf, dev->persist->num_vfs);
2674                return -EINVAL;
2675        }
2676
2677        return vf+1;
2678}
2679
2680int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2681{
2682        if (slave < 1 || slave > dev->persist->num_vfs) {
2683                mlx4_err(dev,
2684                         "Bad slave number:%d (number of activated slaves: %lu)\n",
2685                         slave, dev->num_slaves);
2686                return -EINVAL;
2687        }
2688        return slave - 1;
2689}
2690
2691void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2692{
2693        struct mlx4_priv *priv = mlx4_priv(dev);
2694        struct mlx4_cmd_context *context;
2695        int i;
2696
2697        spin_lock(&priv->cmd.context_lock);
2698        if (priv->cmd.context) {
2699                for (i = 0; i < priv->cmd.max_cmds; ++i) {
2700                        context = &priv->cmd.context[i];
2701                        context->fw_status = CMD_STAT_INTERNAL_ERR;
2702                        context->result    =
2703                                mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2704                        complete(&context->done);
2705                }
2706        }
2707        spin_unlock(&priv->cmd.context_lock);
2708}
2709
2710struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2711{
2712        struct mlx4_active_ports actv_ports;
2713        int vf;
2714
2715        bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2716
2717        if (slave == 0) {
2718                bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2719                return actv_ports;
2720        }
2721
2722        vf = mlx4_get_vf_indx(dev, slave);
2723        if (vf < 0)
2724                return actv_ports;
2725
2726        bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2727                   min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2728                   dev->caps.num_ports));
2729
2730        return actv_ports;
2731}
2732EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2733
2734int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2735{
2736        unsigned n;
2737        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2738        unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2739
2740        if (port <= 0 || port > m)
2741                return -EINVAL;
2742
2743        n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2744        if (port <= n)
2745                port = n + 1;
2746
2747        return port;
2748}
2749EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2750
2751int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2752{
2753        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2754        if (test_bit(port - 1, actv_ports.ports))
2755                return port -
2756                        find_first_bit(actv_ports.ports, dev->caps.num_ports);
2757
2758        return -1;
2759}
2760EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2761
2762struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2763                                                   int port)
2764{
2765        unsigned i;
2766        struct mlx4_slaves_pport slaves_pport;
2767
2768        bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2769
2770        if (port <= 0 || port > dev->caps.num_ports)
2771                return slaves_pport;
2772
2773        for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2774                struct mlx4_active_ports actv_ports =
2775                        mlx4_get_active_ports(dev, i);
2776                if (test_bit(port - 1, actv_ports.ports))
2777                        set_bit(i, slaves_pport.slaves);
2778        }
2779
2780        return slaves_pport;
2781}
2782EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2783
2784struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2785                struct mlx4_dev *dev,
2786                const struct mlx4_active_ports *crit_ports)
2787{
2788        unsigned i;
2789        struct mlx4_slaves_pport slaves_pport;
2790
2791        bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2792
2793        for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2794                struct mlx4_active_ports actv_ports =
2795                        mlx4_get_active_ports(dev, i);
2796                if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2797                                 dev->caps.num_ports))
2798                        set_bit(i, slaves_pport.slaves);
2799        }
2800
2801        return slaves_pport;
2802}
2803EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2804
2805static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2806{
2807        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2808        int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2809                        + 1;
2810        int max_port = min_port +
2811                bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2812
2813        if (port < min_port)
2814                port = min_port;
2815        else if (port >= max_port)
2816                port = max_port - 1;
2817
2818        return port;
2819}
2820
2821static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
2822                              int max_tx_rate)
2823{
2824        int i;
2825        int err;
2826        struct mlx4_qos_manager *port_qos;
2827        struct mlx4_dev *dev = &priv->dev;
2828        struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
2829
2830        port_qos = &priv->mfunc.master.qos_ctl[port];
2831        memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
2832
2833        if (slave > port_qos->num_of_qos_vfs) {
2834                mlx4_info(dev, "No availible VPP resources for this VF\n");
2835                return -EINVAL;
2836        }
2837
2838        /* Query for default QoS values from Vport 0 is needed */
2839        err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
2840        if (err) {
2841                mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
2842                return err;
2843        }
2844
2845        for (i = 0; i < MLX4_NUM_UP; i++) {
2846                if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
2847                        vpp_qos[i].max_avg_bw = max_tx_rate;
2848                        vpp_qos[i].enable = 1;
2849                } else {
2850                        /* if user supplied tx_rate == 0, meaning no rate limit
2851                         * configuration is required. so we are leaving the
2852                         * value of max_avg_bw as queried from Vport 0.
2853                         */
2854                        vpp_qos[i].enable = 0;
2855                }
2856        }
2857
2858        err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
2859        if (err) {
2860                mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
2861                return err;
2862        }
2863
2864        return 0;
2865}
2866
2867static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
2868                                        struct mlx4_vport_state *vf_admin)
2869{
2870        struct mlx4_qos_manager *info;
2871        struct mlx4_priv *priv = mlx4_priv(dev);
2872
2873        if (!mlx4_is_master(dev) ||
2874            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2875                return false;
2876
2877        info = &priv->mfunc.master.qos_ctl[port];
2878
2879        if (vf_admin->default_vlan != MLX4_VGT &&
2880            test_bit(vf_admin->default_qos, info->priority_bm))
2881                return true;
2882
2883        return false;
2884}
2885
2886static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
2887                                       struct mlx4_vport_state *vf_admin,
2888                                       int vlan, int qos)
2889{
2890        struct mlx4_vport_state dummy_admin = {0};
2891
2892        if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
2893            !vf_admin->tx_rate)
2894                return true;
2895
2896        dummy_admin.default_qos = qos;
2897        dummy_admin.default_vlan = vlan;
2898
2899        /* VF wants to move to other VST state which is valid with current
2900         * rate limit. Either differnt default vlan in VST or other
2901         * supported QoS priority. Otherwise we don't allow this change when
2902         * the TX rate is still configured.
2903         */
2904        if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
2905                return true;
2906
2907        mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
2908                  (vlan == MLX4_VGT) ? "VGT" : "VST");
2909
2910        if (vlan != MLX4_VGT)
2911                mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
2912
2913        mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
2914
2915        return false;
2916}
2917
2918int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2919{
2920        struct mlx4_priv *priv = mlx4_priv(dev);
2921        struct mlx4_vport_state *s_info;
2922        int slave;
2923
2924        if (!mlx4_is_master(dev))
2925                return -EPROTONOSUPPORT;
2926
2927        slave = mlx4_get_slave_indx(dev, vf);
2928        if (slave < 0)
2929                return -EINVAL;
2930
2931        port = mlx4_slaves_closest_port(dev, slave, port);
2932        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2933        s_info->mac = mac;
2934        mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
2935                  vf, port, s_info->mac);
2936        return 0;
2937}
2938EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2939
2940
2941int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2942{
2943        struct mlx4_priv *priv = mlx4_priv(dev);
2944        struct mlx4_vport_state *vf_admin;
2945        int slave;
2946
2947        if ((!mlx4_is_master(dev)) ||
2948            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2949                return -EPROTONOSUPPORT;
2950
2951        if ((vlan > 4095) || (qos > 7))
2952                return -EINVAL;
2953
2954        slave = mlx4_get_slave_indx(dev, vf);
2955        if (slave < 0)
2956                return -EINVAL;
2957
2958        port = mlx4_slaves_closest_port(dev, slave, port);
2959        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2960
2961        if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
2962                return -EPERM;
2963
2964        if ((0 == vlan) && (0 == qos))
2965                vf_admin->default_vlan = MLX4_VGT;
2966        else
2967                vf_admin->default_vlan = vlan;
2968        vf_admin->default_qos = qos;
2969
2970        /* If rate was configured prior to VST, we saved the configured rate
2971         * in vf_admin->rate and now, if priority supported we enforce the QoS
2972         */
2973        if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
2974            vf_admin->tx_rate)
2975                vf_admin->qos_vport = slave;
2976
2977        if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2978                mlx4_info(dev,
2979                          "updating vf %d port %d config will take effect on next VF restart\n",
2980                          vf, port);
2981        return 0;
2982}
2983EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2984
2985int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
2986                     int max_tx_rate)
2987{
2988        int err;
2989        int slave;
2990        struct mlx4_vport_state *vf_admin;
2991        struct mlx4_priv *priv = mlx4_priv(dev);
2992
2993        if (!mlx4_is_master(dev) ||
2994            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2995                return -EPROTONOSUPPORT;
2996
2997        if (min_tx_rate) {
2998                mlx4_info(dev, "Minimum BW share not supported\n");
2999                return -EPROTONOSUPPORT;
3000        }
3001
3002        slave = mlx4_get_slave_indx(dev, vf);
3003        if (slave < 0)
3004                return -EINVAL;
3005
3006        port = mlx4_slaves_closest_port(dev, slave, port);
3007        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3008
3009        err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
3010        if (err) {
3011                mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
3012                          max_tx_rate);
3013                return err;
3014        }
3015
3016        vf_admin->tx_rate = max_tx_rate;
3017        /* if VF is not in supported mode (VST with supported prio),
3018         * we do not change vport configuration for its QPs, but save
3019         * the rate, so it will be enforced when it moves to supported
3020         * mode next time.
3021         */
3022        if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
3023                mlx4_info(dev,
3024                          "rate set for VF %d when not in valid state\n", vf);
3025
3026                if (vf_admin->default_vlan != MLX4_VGT)
3027                        mlx4_info(dev, "VST priority not supported by QoS\n");
3028                else
3029                        mlx4_info(dev, "VF in VGT mode (needed VST)\n");
3030
3031                mlx4_info(dev,
3032                          "rate %d take affect when VF moves to valid state\n",
3033                          max_tx_rate);
3034                return 0;
3035        }
3036
3037        /* If user sets rate 0 assigning default vport for its QPs */
3038        vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
3039
3040        if (priv->mfunc.master.slave_state[slave].active &&
3041            dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
3042                mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
3043
3044        return 0;
3045}
3046EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
3047
3048 /* mlx4_get_slave_default_vlan -
3049 * return true if VST ( default vlan)
3050 * if VST, will return vlan & qos (if not NULL)
3051 */
3052bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
3053                                 u16 *vlan, u8 *qos)
3054{
3055        struct mlx4_vport_oper_state *vp_oper;
3056        struct mlx4_priv *priv;
3057
3058        priv = mlx4_priv(dev);
3059        port = mlx4_slaves_closest_port(dev, slave, port);
3060        vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3061
3062        if (MLX4_VGT != vp_oper->state.default_vlan) {
3063                if (vlan)
3064                        *vlan = vp_oper->state.default_vlan;
3065                if (qos)
3066                        *qos = vp_oper->state.default_qos;
3067                return true;
3068        }
3069        return false;
3070}
3071EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
3072
3073int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
3074{
3075        struct mlx4_priv *priv = mlx4_priv(dev);
3076        struct mlx4_vport_state *s_info;
3077        int slave;
3078
3079        if ((!mlx4_is_master(dev)) ||
3080            !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
3081                return -EPROTONOSUPPORT;
3082
3083        slave = mlx4_get_slave_indx(dev, vf);
3084        if (slave < 0)
3085                return -EINVAL;
3086
3087        port = mlx4_slaves_closest_port(dev, slave, port);
3088        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3089        s_info->spoofchk = setting;
3090
3091        return 0;
3092}
3093EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
3094
3095int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
3096{
3097        struct mlx4_priv *priv = mlx4_priv(dev);
3098        struct mlx4_vport_state *s_info;
3099        int slave;
3100
3101        if (!mlx4_is_master(dev))
3102                return -EPROTONOSUPPORT;
3103
3104        slave = mlx4_get_slave_indx(dev, vf);
3105        if (slave < 0)
3106                return -EINVAL;
3107
3108        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3109        ivf->vf = vf;
3110
3111        /* need to convert it to a func */
3112        ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
3113        ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
3114        ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
3115        ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
3116        ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
3117        ivf->mac[5] = ((s_info->mac)  & 0xff);
3118
3119        ivf->vlan               = s_info->default_vlan;
3120        ivf->qos                = s_info->default_qos;
3121
3122        if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
3123                ivf->max_tx_rate = s_info->tx_rate;
3124        else
3125                ivf->max_tx_rate = 0;
3126
3127        ivf->min_tx_rate        = 0;
3128        ivf->spoofchk           = s_info->spoofchk;
3129        ivf->linkstate          = s_info->link_state;
3130
3131        return 0;
3132}
3133EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
3134
3135int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
3136{
3137        struct mlx4_priv *priv = mlx4_priv(dev);
3138        struct mlx4_vport_state *s_info;
3139        int slave;
3140        u8 link_stat_event;
3141
3142        slave = mlx4_get_slave_indx(dev, vf);
3143        if (slave < 0)
3144                return -EINVAL;
3145
3146        port = mlx4_slaves_closest_port(dev, slave, port);
3147        switch (link_state) {
3148        case IFLA_VF_LINK_STATE_AUTO:
3149                /* get current link state */
3150                if (!priv->sense.do_sense_port[port])
3151                        link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3152                else
3153                        link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3154            break;
3155
3156        case IFLA_VF_LINK_STATE_ENABLE:
3157                link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3158            break;
3159
3160        case IFLA_VF_LINK_STATE_DISABLE:
3161                link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3162            break;
3163
3164        default:
3165                mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
3166                          link_state, slave, port);
3167                return -EINVAL;
3168        };
3169        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3170        s_info->link_state = link_state;
3171
3172        /* send event */
3173        mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
3174
3175        if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3176                mlx4_dbg(dev,
3177                         "updating vf %d port %d no link state HW enforcment\n",
3178                         vf, port);
3179        return 0;
3180}
3181EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
3182
3183int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
3184                           struct mlx4_counter *counter_stats, int reset)
3185{
3186        struct mlx4_cmd_mailbox *mailbox = NULL;
3187        struct mlx4_counter *tmp_counter;
3188        int err;
3189        u32 if_stat_in_mod;
3190
3191        if (!counter_stats)
3192                return -EINVAL;
3193
3194        if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
3195                return 0;
3196
3197        mailbox = mlx4_alloc_cmd_mailbox(dev);
3198        if (IS_ERR(mailbox))
3199                return PTR_ERR(mailbox);
3200
3201        memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
3202        if_stat_in_mod = counter_index;
3203        if (reset)
3204                if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
3205        err = mlx4_cmd_box(dev, 0, mailbox->dma,
3206                           if_stat_in_mod, 0,
3207                           MLX4_CMD_QUERY_IF_STAT,
3208                           MLX4_CMD_TIME_CLASS_C,
3209                           MLX4_CMD_NATIVE);
3210        if (err) {
3211                mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
3212                         __func__, counter_index);
3213                goto if_stat_out;
3214        }
3215        tmp_counter = (struct mlx4_counter *)mailbox->buf;
3216        counter_stats->counter_mode = tmp_counter->counter_mode;
3217        if (counter_stats->counter_mode == 0) {
3218                counter_stats->rx_frames =
3219                        cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
3220                                    be64_to_cpu(tmp_counter->rx_frames));
3221                counter_stats->tx_frames =
3222                        cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
3223                                    be64_to_cpu(tmp_counter->tx_frames));
3224                counter_stats->rx_bytes =
3225                        cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
3226                                    be64_to_cpu(tmp_counter->rx_bytes));
3227                counter_stats->tx_bytes =
3228                        cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
3229                                    be64_to_cpu(tmp_counter->tx_bytes));
3230        }
3231
3232if_stat_out:
3233        mlx4_free_cmd_mailbox(dev, mailbox);
3234
3235        return err;
3236}
3237EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
3238
3239int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
3240                      struct ifla_vf_stats *vf_stats)
3241{
3242        struct mlx4_counter tmp_vf_stats;
3243        int slave;
3244        int err = 0;
3245
3246        if (!vf_stats)
3247                return -EINVAL;
3248
3249        if (!mlx4_is_master(dev))
3250                return -EPROTONOSUPPORT;
3251
3252        slave = mlx4_get_slave_indx(dev, vf_idx);
3253        if (slave < 0)
3254                return -EINVAL;
3255
3256        port = mlx4_slaves_closest_port(dev, slave, port);
3257        err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
3258        if (!err && tmp_vf_stats.counter_mode == 0) {
3259                vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
3260                vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
3261                vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
3262                vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
3263        }
3264
3265        return err;
3266}
3267EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);
3268
3269int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
3270{
3271        struct mlx4_priv *priv = mlx4_priv(dev);
3272
3273        if (slave < 1 || slave >= dev->num_slaves ||
3274            port < 1 || port > MLX4_MAX_PORTS)
3275                return 0;
3276
3277        return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
3278                MLX4_VF_SMI_ENABLED;
3279}
3280EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
3281
3282int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
3283{
3284        struct mlx4_priv *priv = mlx4_priv(dev);
3285
3286        if (slave == mlx4_master_func_num(dev))
3287                return 1;
3288
3289        if (slave < 1 || slave >= dev->num_slaves ||
3290            port < 1 || port > MLX4_MAX_PORTS)
3291                return 0;
3292
3293        return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
3294                MLX4_VF_SMI_ENABLED;
3295}
3296EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
3297
3298int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
3299                                 int enabled)
3300{
3301        struct mlx4_priv *priv = mlx4_priv(dev);
3302        struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
3303                        &priv->dev, slave);
3304        int min_port = find_first_bit(actv_ports.ports,
3305                                      priv->dev.caps.num_ports) + 1;
3306        int max_port = min_port - 1 +
3307                bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
3308
3309        if (slave == mlx4_master_func_num(dev))
3310                return 0;
3311
3312        if (slave < 1 || slave >= dev->num_slaves ||
3313            port < 1 || port > MLX4_MAX_PORTS ||
3314            enabled < 0 || enabled > 1)
3315                return -EINVAL;
3316
3317        if (min_port == max_port && dev->caps.num_ports > 1) {
3318                mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
3319                return -EPROTONOSUPPORT;
3320        }
3321
3322        priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
3323        return 0;
3324}
3325EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
3326