linux/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/highmem.h>
  34#include <linux/module.h>
  35#include <linux/errno.h>
  36#include <linux/pci.h>
  37#include <linux/dma-mapping.h>
  38#include <linux/slab.h>
  39#include <linux/delay.h>
  40#include <linux/random.h>
  41#include <linux/io-mapping.h>
  42#include <linux/mlx5/driver.h>
  43#include <linux/mlx5/eq.h>
  44#include <linux/debugfs.h>
  45
  46#include "mlx5_core.h"
  47#include "lib/eq.h"
  48
  49enum {
  50        CMD_IF_REV = 5,
  51};
  52
  53enum {
  54        CMD_MODE_POLLING,
  55        CMD_MODE_EVENTS
  56};
  57
  58enum {
  59        MLX5_CMD_DELIVERY_STAT_OK                       = 0x0,
  60        MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR               = 0x1,
  61        MLX5_CMD_DELIVERY_STAT_TOK_ERR                  = 0x2,
  62        MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR          = 0x3,
  63        MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR        = 0x4,
  64        MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR         = 0x5,
  65        MLX5_CMD_DELIVERY_STAT_FW_ERR                   = 0x6,
  66        MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR            = 0x7,
  67        MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR           = 0x8,
  68        MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR      = 0x9,
  69        MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR            = 0x10,
  70};
  71
  72static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
  73                                           struct mlx5_cmd_msg *in,
  74                                           struct mlx5_cmd_msg *out,
  75                                           void *uout, int uout_size,
  76                                           mlx5_cmd_cbk_t cbk,
  77                                           void *context, int page_queue)
  78{
  79        gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
  80        struct mlx5_cmd_work_ent *ent;
  81
  82        ent = kzalloc(sizeof(*ent), alloc_flags);
  83        if (!ent)
  84                return ERR_PTR(-ENOMEM);
  85
  86        ent->in         = in;
  87        ent->out        = out;
  88        ent->uout       = uout;
  89        ent->uout_size  = uout_size;
  90        ent->callback   = cbk;
  91        ent->context    = context;
  92        ent->cmd        = cmd;
  93        ent->page_queue = page_queue;
  94
  95        return ent;
  96}
  97
  98static u8 alloc_token(struct mlx5_cmd *cmd)
  99{
 100        u8 token;
 101
 102        spin_lock(&cmd->token_lock);
 103        cmd->token++;
 104        if (cmd->token == 0)
 105                cmd->token++;
 106        token = cmd->token;
 107        spin_unlock(&cmd->token_lock);
 108
 109        return token;
 110}
 111
 112static int alloc_ent(struct mlx5_cmd *cmd)
 113{
 114        unsigned long flags;
 115        int ret;
 116
 117        spin_lock_irqsave(&cmd->alloc_lock, flags);
 118        ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
 119        if (ret < cmd->max_reg_cmds)
 120                clear_bit(ret, &cmd->bitmask);
 121        spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 122
 123        return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
 124}
 125
 126static void free_ent(struct mlx5_cmd *cmd, int idx)
 127{
 128        unsigned long flags;
 129
 130        spin_lock_irqsave(&cmd->alloc_lock, flags);
 131        set_bit(idx, &cmd->bitmask);
 132        spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 133}
 134
 135static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
 136{
 137        return cmd->cmd_buf + (idx << cmd->log_stride);
 138}
 139
 140static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
 141{
 142        int size = msg->len;
 143        int blen = size - min_t(int, sizeof(msg->first.data), size);
 144
 145        return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE);
 146}
 147
 148static u8 xor8_buf(void *buf, size_t offset, int len)
 149{
 150        u8 *ptr = buf;
 151        u8 sum = 0;
 152        int i;
 153        int end = len + offset;
 154
 155        for (i = offset; i < end; i++)
 156                sum ^= ptr[i];
 157
 158        return sum;
 159}
 160
 161static int verify_block_sig(struct mlx5_cmd_prot_block *block)
 162{
 163        size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
 164        int xor_len = sizeof(*block) - sizeof(block->data) - 1;
 165
 166        if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
 167                return -EINVAL;
 168
 169        if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
 170                return -EINVAL;
 171
 172        return 0;
 173}
 174
 175static void calc_block_sig(struct mlx5_cmd_prot_block *block)
 176{
 177        int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
 178        size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
 179
 180        block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
 181        block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
 182}
 183
 184static void calc_chain_sig(struct mlx5_cmd_msg *msg)
 185{
 186        struct mlx5_cmd_mailbox *next = msg->next;
 187        int n = mlx5_calc_cmd_blocks(msg);
 188        int i = 0;
 189
 190        for (i = 0; i < n && next; i++)  {
 191                calc_block_sig(next->buf);
 192                next = next->next;
 193        }
 194}
 195
 196static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
 197{
 198        ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
 199        if (csum) {
 200                calc_chain_sig(ent->in);
 201                calc_chain_sig(ent->out);
 202        }
 203}
 204
 205static void poll_timeout(struct mlx5_cmd_work_ent *ent)
 206{
 207        unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
 208        u8 own;
 209
 210        do {
 211                own = READ_ONCE(ent->lay->status_own);
 212                if (!(own & CMD_OWNER_HW)) {
 213                        ent->ret = 0;
 214                        return;
 215                }
 216                cond_resched();
 217        } while (time_before(jiffies, poll_end));
 218
 219        ent->ret = -ETIMEDOUT;
 220}
 221
 222static void free_cmd(struct mlx5_cmd_work_ent *ent)
 223{
 224        kfree(ent);
 225}
 226
 227static int verify_signature(struct mlx5_cmd_work_ent *ent)
 228{
 229        struct mlx5_cmd_mailbox *next = ent->out->next;
 230        int n = mlx5_calc_cmd_blocks(ent->out);
 231        int err;
 232        u8 sig;
 233        int i = 0;
 234
 235        sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
 236        if (sig != 0xff)
 237                return -EINVAL;
 238
 239        for (i = 0; i < n && next; i++) {
 240                err = verify_block_sig(next->buf);
 241                if (err)
 242                        return err;
 243
 244                next = next->next;
 245        }
 246
 247        return 0;
 248}
 249
 250static void dump_buf(void *buf, int size, int data_only, int offset)
 251{
 252        __be32 *p = buf;
 253        int i;
 254
 255        for (i = 0; i < size; i += 16) {
 256                pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
 257                         be32_to_cpu(p[1]), be32_to_cpu(p[2]),
 258                         be32_to_cpu(p[3]));
 259                p += 4;
 260                offset += 16;
 261        }
 262        if (!data_only)
 263                pr_debug("\n");
 264}
 265
 266static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
 267                                       u32 *synd, u8 *status)
 268{
 269        *synd = 0;
 270        *status = 0;
 271
 272        switch (op) {
 273        case MLX5_CMD_OP_TEARDOWN_HCA:
 274        case MLX5_CMD_OP_DISABLE_HCA:
 275        case MLX5_CMD_OP_MANAGE_PAGES:
 276        case MLX5_CMD_OP_DESTROY_MKEY:
 277        case MLX5_CMD_OP_DESTROY_EQ:
 278        case MLX5_CMD_OP_DESTROY_CQ:
 279        case MLX5_CMD_OP_DESTROY_QP:
 280        case MLX5_CMD_OP_DESTROY_PSV:
 281        case MLX5_CMD_OP_DESTROY_SRQ:
 282        case MLX5_CMD_OP_DESTROY_XRC_SRQ:
 283        case MLX5_CMD_OP_DESTROY_XRQ:
 284        case MLX5_CMD_OP_DESTROY_DCT:
 285        case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
 286        case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
 287        case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
 288        case MLX5_CMD_OP_DEALLOC_PD:
 289        case MLX5_CMD_OP_DEALLOC_UAR:
 290        case MLX5_CMD_OP_DETACH_FROM_MCG:
 291        case MLX5_CMD_OP_DEALLOC_XRCD:
 292        case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
 293        case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
 294        case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
 295        case MLX5_CMD_OP_DESTROY_LAG:
 296        case MLX5_CMD_OP_DESTROY_VPORT_LAG:
 297        case MLX5_CMD_OP_DESTROY_TIR:
 298        case MLX5_CMD_OP_DESTROY_SQ:
 299        case MLX5_CMD_OP_DESTROY_RQ:
 300        case MLX5_CMD_OP_DESTROY_RMP:
 301        case MLX5_CMD_OP_DESTROY_TIS:
 302        case MLX5_CMD_OP_DESTROY_RQT:
 303        case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
 304        case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
 305        case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
 306        case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
 307        case MLX5_CMD_OP_2ERR_QP:
 308        case MLX5_CMD_OP_2RST_QP:
 309        case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
 310        case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
 311        case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 312        case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
 313        case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT:
 314        case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT:
 315        case MLX5_CMD_OP_FPGA_DESTROY_QP:
 316        case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
 317        case MLX5_CMD_OP_DEALLOC_MEMIC:
 318        case MLX5_CMD_OP_PAGE_FAULT_RESUME:
 319        case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
 320                return MLX5_CMD_STAT_OK;
 321
 322        case MLX5_CMD_OP_QUERY_HCA_CAP:
 323        case MLX5_CMD_OP_QUERY_ADAPTER:
 324        case MLX5_CMD_OP_INIT_HCA:
 325        case MLX5_CMD_OP_ENABLE_HCA:
 326        case MLX5_CMD_OP_QUERY_PAGES:
 327        case MLX5_CMD_OP_SET_HCA_CAP:
 328        case MLX5_CMD_OP_QUERY_ISSI:
 329        case MLX5_CMD_OP_SET_ISSI:
 330        case MLX5_CMD_OP_CREATE_MKEY:
 331        case MLX5_CMD_OP_QUERY_MKEY:
 332        case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
 333        case MLX5_CMD_OP_CREATE_EQ:
 334        case MLX5_CMD_OP_QUERY_EQ:
 335        case MLX5_CMD_OP_GEN_EQE:
 336        case MLX5_CMD_OP_CREATE_CQ:
 337        case MLX5_CMD_OP_QUERY_CQ:
 338        case MLX5_CMD_OP_MODIFY_CQ:
 339        case MLX5_CMD_OP_CREATE_QP:
 340        case MLX5_CMD_OP_RST2INIT_QP:
 341        case MLX5_CMD_OP_INIT2RTR_QP:
 342        case MLX5_CMD_OP_RTR2RTS_QP:
 343        case MLX5_CMD_OP_RTS2RTS_QP:
 344        case MLX5_CMD_OP_SQERR2RTS_QP:
 345        case MLX5_CMD_OP_QUERY_QP:
 346        case MLX5_CMD_OP_SQD_RTS_QP:
 347        case MLX5_CMD_OP_INIT2INIT_QP:
 348        case MLX5_CMD_OP_CREATE_PSV:
 349        case MLX5_CMD_OP_CREATE_SRQ:
 350        case MLX5_CMD_OP_QUERY_SRQ:
 351        case MLX5_CMD_OP_ARM_RQ:
 352        case MLX5_CMD_OP_CREATE_XRC_SRQ:
 353        case MLX5_CMD_OP_QUERY_XRC_SRQ:
 354        case MLX5_CMD_OP_ARM_XRC_SRQ:
 355        case MLX5_CMD_OP_CREATE_XRQ:
 356        case MLX5_CMD_OP_QUERY_XRQ:
 357        case MLX5_CMD_OP_ARM_XRQ:
 358        case MLX5_CMD_OP_CREATE_DCT:
 359        case MLX5_CMD_OP_DRAIN_DCT:
 360        case MLX5_CMD_OP_QUERY_DCT:
 361        case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
 362        case MLX5_CMD_OP_QUERY_VPORT_STATE:
 363        case MLX5_CMD_OP_MODIFY_VPORT_STATE:
 364        case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
 365        case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
 366        case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
 367        case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
 368        case MLX5_CMD_OP_SET_ROCE_ADDRESS:
 369        case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
 370        case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
 371        case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
 372        case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
 373        case MLX5_CMD_OP_QUERY_VNIC_ENV:
 374        case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
 375        case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 376        case MLX5_CMD_OP_QUERY_Q_COUNTER:
 377        case MLX5_CMD_OP_SET_MONITOR_COUNTER:
 378        case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
 379        case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
 380        case MLX5_CMD_OP_QUERY_RATE_LIMIT:
 381        case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
 382        case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
 383        case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
 384        case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
 385        case MLX5_CMD_OP_ALLOC_PD:
 386        case MLX5_CMD_OP_ALLOC_UAR:
 387        case MLX5_CMD_OP_CONFIG_INT_MODERATION:
 388        case MLX5_CMD_OP_ACCESS_REG:
 389        case MLX5_CMD_OP_ATTACH_TO_MCG:
 390        case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
 391        case MLX5_CMD_OP_MAD_IFC:
 392        case MLX5_CMD_OP_QUERY_MAD_DEMUX:
 393        case MLX5_CMD_OP_SET_MAD_DEMUX:
 394        case MLX5_CMD_OP_NOP:
 395        case MLX5_CMD_OP_ALLOC_XRCD:
 396        case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
 397        case MLX5_CMD_OP_QUERY_CONG_STATUS:
 398        case MLX5_CMD_OP_MODIFY_CONG_STATUS:
 399        case MLX5_CMD_OP_QUERY_CONG_PARAMS:
 400        case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
 401        case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
 402        case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 403        case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 404        case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
 405        case MLX5_CMD_OP_CREATE_LAG:
 406        case MLX5_CMD_OP_MODIFY_LAG:
 407        case MLX5_CMD_OP_QUERY_LAG:
 408        case MLX5_CMD_OP_CREATE_VPORT_LAG:
 409        case MLX5_CMD_OP_CREATE_TIR:
 410        case MLX5_CMD_OP_MODIFY_TIR:
 411        case MLX5_CMD_OP_QUERY_TIR:
 412        case MLX5_CMD_OP_CREATE_SQ:
 413        case MLX5_CMD_OP_MODIFY_SQ:
 414        case MLX5_CMD_OP_QUERY_SQ:
 415        case MLX5_CMD_OP_CREATE_RQ:
 416        case MLX5_CMD_OP_MODIFY_RQ:
 417        case MLX5_CMD_OP_QUERY_RQ:
 418        case MLX5_CMD_OP_CREATE_RMP:
 419        case MLX5_CMD_OP_MODIFY_RMP:
 420        case MLX5_CMD_OP_QUERY_RMP:
 421        case MLX5_CMD_OP_CREATE_TIS:
 422        case MLX5_CMD_OP_MODIFY_TIS:
 423        case MLX5_CMD_OP_QUERY_TIS:
 424        case MLX5_CMD_OP_CREATE_RQT:
 425        case MLX5_CMD_OP_MODIFY_RQT:
 426        case MLX5_CMD_OP_QUERY_RQT:
 427
 428        case MLX5_CMD_OP_CREATE_FLOW_TABLE:
 429        case MLX5_CMD_OP_QUERY_FLOW_TABLE:
 430        case MLX5_CMD_OP_CREATE_FLOW_GROUP:
 431        case MLX5_CMD_OP_QUERY_FLOW_GROUP:
 432        case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
 433        case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 434        case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
 435        case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
 436        case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
 437        case MLX5_CMD_OP_FPGA_CREATE_QP:
 438        case MLX5_CMD_OP_FPGA_MODIFY_QP:
 439        case MLX5_CMD_OP_FPGA_QUERY_QP:
 440        case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS:
 441        case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
 442        case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
 443        case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
 444        case MLX5_CMD_OP_CREATE_UCTX:
 445        case MLX5_CMD_OP_DESTROY_UCTX:
 446        case MLX5_CMD_OP_CREATE_UMEM:
 447        case MLX5_CMD_OP_DESTROY_UMEM:
 448        case MLX5_CMD_OP_ALLOC_MEMIC:
 449                *status = MLX5_DRIVER_STATUS_ABORTED;
 450                *synd = MLX5_DRIVER_SYND;
 451                return -EIO;
 452        default:
 453                mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
 454                return -EINVAL;
 455        }
 456}
 457
 458const char *mlx5_command_str(int command)
 459{
 460#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
 461
 462        switch (command) {
 463        MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
 464        MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
 465        MLX5_COMMAND_STR_CASE(INIT_HCA);
 466        MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
 467        MLX5_COMMAND_STR_CASE(ENABLE_HCA);
 468        MLX5_COMMAND_STR_CASE(DISABLE_HCA);
 469        MLX5_COMMAND_STR_CASE(QUERY_PAGES);
 470        MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
 471        MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
 472        MLX5_COMMAND_STR_CASE(QUERY_ISSI);
 473        MLX5_COMMAND_STR_CASE(SET_ISSI);
 474        MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION);
 475        MLX5_COMMAND_STR_CASE(CREATE_MKEY);
 476        MLX5_COMMAND_STR_CASE(QUERY_MKEY);
 477        MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
 478        MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
 479        MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
 480        MLX5_COMMAND_STR_CASE(CREATE_EQ);
 481        MLX5_COMMAND_STR_CASE(DESTROY_EQ);
 482        MLX5_COMMAND_STR_CASE(QUERY_EQ);
 483        MLX5_COMMAND_STR_CASE(GEN_EQE);
 484        MLX5_COMMAND_STR_CASE(CREATE_CQ);
 485        MLX5_COMMAND_STR_CASE(DESTROY_CQ);
 486        MLX5_COMMAND_STR_CASE(QUERY_CQ);
 487        MLX5_COMMAND_STR_CASE(MODIFY_CQ);
 488        MLX5_COMMAND_STR_CASE(CREATE_QP);
 489        MLX5_COMMAND_STR_CASE(DESTROY_QP);
 490        MLX5_COMMAND_STR_CASE(RST2INIT_QP);
 491        MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
 492        MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
 493        MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
 494        MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
 495        MLX5_COMMAND_STR_CASE(2ERR_QP);
 496        MLX5_COMMAND_STR_CASE(2RST_QP);
 497        MLX5_COMMAND_STR_CASE(QUERY_QP);
 498        MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
 499        MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
 500        MLX5_COMMAND_STR_CASE(CREATE_PSV);
 501        MLX5_COMMAND_STR_CASE(DESTROY_PSV);
 502        MLX5_COMMAND_STR_CASE(CREATE_SRQ);
 503        MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
 504        MLX5_COMMAND_STR_CASE(QUERY_SRQ);
 505        MLX5_COMMAND_STR_CASE(ARM_RQ);
 506        MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
 507        MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
 508        MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
 509        MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
 510        MLX5_COMMAND_STR_CASE(CREATE_DCT);
 511        MLX5_COMMAND_STR_CASE(DESTROY_DCT);
 512        MLX5_COMMAND_STR_CASE(DRAIN_DCT);
 513        MLX5_COMMAND_STR_CASE(QUERY_DCT);
 514        MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
 515        MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
 516        MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
 517        MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
 518        MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
 519        MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
 520        MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
 521        MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
 522        MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
 523        MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
 524        MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
 525        MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
 526        MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
 527        MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
 528        MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
 529        MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
 530        MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
 531        MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
 532        MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
 533        MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
 534        MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
 535        MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
 536        MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
 537        MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
 538        MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
 539        MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
 540        MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
 541        MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
 542        MLX5_COMMAND_STR_CASE(ALLOC_PD);
 543        MLX5_COMMAND_STR_CASE(DEALLOC_PD);
 544        MLX5_COMMAND_STR_CASE(ALLOC_UAR);
 545        MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
 546        MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
 547        MLX5_COMMAND_STR_CASE(ACCESS_REG);
 548        MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
 549        MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
 550        MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
 551        MLX5_COMMAND_STR_CASE(MAD_IFC);
 552        MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
 553        MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
 554        MLX5_COMMAND_STR_CASE(NOP);
 555        MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
 556        MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
 557        MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
 558        MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
 559        MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
 560        MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
 561        MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
 562        MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
 563        MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
 564        MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
 565        MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
 566        MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
 567        MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
 568        MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
 569        MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
 570        MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
 571        MLX5_COMMAND_STR_CASE(CREATE_LAG);
 572        MLX5_COMMAND_STR_CASE(MODIFY_LAG);
 573        MLX5_COMMAND_STR_CASE(QUERY_LAG);
 574        MLX5_COMMAND_STR_CASE(DESTROY_LAG);
 575        MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
 576        MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
 577        MLX5_COMMAND_STR_CASE(CREATE_TIR);
 578        MLX5_COMMAND_STR_CASE(MODIFY_TIR);
 579        MLX5_COMMAND_STR_CASE(DESTROY_TIR);
 580        MLX5_COMMAND_STR_CASE(QUERY_TIR);
 581        MLX5_COMMAND_STR_CASE(CREATE_SQ);
 582        MLX5_COMMAND_STR_CASE(MODIFY_SQ);
 583        MLX5_COMMAND_STR_CASE(DESTROY_SQ);
 584        MLX5_COMMAND_STR_CASE(QUERY_SQ);
 585        MLX5_COMMAND_STR_CASE(CREATE_RQ);
 586        MLX5_COMMAND_STR_CASE(MODIFY_RQ);
 587        MLX5_COMMAND_STR_CASE(DESTROY_RQ);
 588        MLX5_COMMAND_STR_CASE(QUERY_RQ);
 589        MLX5_COMMAND_STR_CASE(CREATE_RMP);
 590        MLX5_COMMAND_STR_CASE(MODIFY_RMP);
 591        MLX5_COMMAND_STR_CASE(DESTROY_RMP);
 592        MLX5_COMMAND_STR_CASE(QUERY_RMP);
 593        MLX5_COMMAND_STR_CASE(CREATE_TIS);
 594        MLX5_COMMAND_STR_CASE(MODIFY_TIS);
 595        MLX5_COMMAND_STR_CASE(DESTROY_TIS);
 596        MLX5_COMMAND_STR_CASE(QUERY_TIS);
 597        MLX5_COMMAND_STR_CASE(CREATE_RQT);
 598        MLX5_COMMAND_STR_CASE(MODIFY_RQT);
 599        MLX5_COMMAND_STR_CASE(DESTROY_RQT);
 600        MLX5_COMMAND_STR_CASE(QUERY_RQT);
 601        MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
 602        MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
 603        MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
 604        MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
 605        MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
 606        MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
 607        MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
 608        MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
 609        MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
 610        MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
 611        MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
 612        MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
 613        MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
 614        MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
 615        MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT);
 616        MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT);
 617        MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT);
 618        MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT);
 619        MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP);
 620        MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP);
 621        MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP);
 622        MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS);
 623        MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP);
 624        MLX5_COMMAND_STR_CASE(CREATE_XRQ);
 625        MLX5_COMMAND_STR_CASE(DESTROY_XRQ);
 626        MLX5_COMMAND_STR_CASE(QUERY_XRQ);
 627        MLX5_COMMAND_STR_CASE(ARM_XRQ);
 628        MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT);
 629        MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT);
 630        MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT);
 631        MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT);
 632        MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT);
 633        MLX5_COMMAND_STR_CASE(ALLOC_MEMIC);
 634        MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC);
 635        MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS);
 636        MLX5_COMMAND_STR_CASE(CREATE_UCTX);
 637        MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
 638        MLX5_COMMAND_STR_CASE(CREATE_UMEM);
 639        MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
 640        default: return "unknown command opcode";
 641        }
 642}
 643
 644static const char *cmd_status_str(u8 status)
 645{
 646        switch (status) {
 647        case MLX5_CMD_STAT_OK:
 648                return "OK";
 649        case MLX5_CMD_STAT_INT_ERR:
 650                return "internal error";
 651        case MLX5_CMD_STAT_BAD_OP_ERR:
 652                return "bad operation";
 653        case MLX5_CMD_STAT_BAD_PARAM_ERR:
 654                return "bad parameter";
 655        case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
 656                return "bad system state";
 657        case MLX5_CMD_STAT_BAD_RES_ERR:
 658                return "bad resource";
 659        case MLX5_CMD_STAT_RES_BUSY:
 660                return "resource busy";
 661        case MLX5_CMD_STAT_LIM_ERR:
 662                return "limits exceeded";
 663        case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
 664                return "bad resource state";
 665        case MLX5_CMD_STAT_IX_ERR:
 666                return "bad index";
 667        case MLX5_CMD_STAT_NO_RES_ERR:
 668                return "no resources";
 669        case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
 670                return "bad input length";
 671        case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
 672                return "bad output length";
 673        case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
 674                return "bad QP state";
 675        case MLX5_CMD_STAT_BAD_PKT_ERR:
 676                return "bad packet (discarded)";
 677        case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
 678                return "bad size too many outstanding CQEs";
 679        default:
 680                return "unknown status";
 681        }
 682}
 683
 684static int cmd_status_to_err(u8 status)
 685{
 686        switch (status) {
 687        case MLX5_CMD_STAT_OK:                          return 0;
 688        case MLX5_CMD_STAT_INT_ERR:                     return -EIO;
 689        case MLX5_CMD_STAT_BAD_OP_ERR:                  return -EINVAL;
 690        case MLX5_CMD_STAT_BAD_PARAM_ERR:               return -EINVAL;
 691        case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:           return -EIO;
 692        case MLX5_CMD_STAT_BAD_RES_ERR:                 return -EINVAL;
 693        case MLX5_CMD_STAT_RES_BUSY:                    return -EBUSY;
 694        case MLX5_CMD_STAT_LIM_ERR:                     return -ENOMEM;
 695        case MLX5_CMD_STAT_BAD_RES_STATE_ERR:           return -EINVAL;
 696        case MLX5_CMD_STAT_IX_ERR:                      return -EINVAL;
 697        case MLX5_CMD_STAT_NO_RES_ERR:                  return -EAGAIN;
 698        case MLX5_CMD_STAT_BAD_INP_LEN_ERR:             return -EIO;
 699        case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:            return -EIO;
 700        case MLX5_CMD_STAT_BAD_QP_STATE_ERR:            return -EINVAL;
 701        case MLX5_CMD_STAT_BAD_PKT_ERR:                 return -EINVAL;
 702        case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:      return -EINVAL;
 703        default:                                        return -EIO;
 704        }
 705}
 706
 707struct mlx5_ifc_mbox_out_bits {
 708        u8         status[0x8];
 709        u8         reserved_at_8[0x18];
 710
 711        u8         syndrome[0x20];
 712
 713        u8         reserved_at_40[0x40];
 714};
 715
 716struct mlx5_ifc_mbox_in_bits {
 717        u8         opcode[0x10];
 718        u8         uid[0x10];
 719
 720        u8         reserved_at_20[0x10];
 721        u8         op_mod[0x10];
 722
 723        u8         reserved_at_40[0x40];
 724};
 725
 726void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
 727{
 728        *status = MLX5_GET(mbox_out, out, status);
 729        *syndrome = MLX5_GET(mbox_out, out, syndrome);
 730}
 731
 732static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
 733{
 734        u32 syndrome;
 735        u8  status;
 736        u16 opcode;
 737        u16 op_mod;
 738        u16 uid;
 739
 740        mlx5_cmd_mbox_status(out, &status, &syndrome);
 741        if (!status)
 742                return 0;
 743
 744        opcode = MLX5_GET(mbox_in, in, opcode);
 745        op_mod = MLX5_GET(mbox_in, in, op_mod);
 746        uid    = MLX5_GET(mbox_in, in, uid);
 747
 748        if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
 749                mlx5_core_err_rl(dev,
 750                        "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
 751                        mlx5_command_str(opcode), opcode, op_mod,
 752                        cmd_status_str(status), status, syndrome);
 753        else
 754                mlx5_core_dbg(dev,
 755                      "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
 756                      mlx5_command_str(opcode),
 757                      opcode, op_mod,
 758                      cmd_status_str(status),
 759                      status,
 760                      syndrome);
 761
 762        return cmd_status_to_err(status);
 763}
 764
 765static void dump_command(struct mlx5_core_dev *dev,
 766                         struct mlx5_cmd_work_ent *ent, int input)
 767{
 768        struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
 769        u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
 770        struct mlx5_cmd_mailbox *next = msg->next;
 771        int n = mlx5_calc_cmd_blocks(msg);
 772        int data_only;
 773        u32 offset = 0;
 774        int dump_len;
 775        int i;
 776
 777        data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
 778
 779        if (data_only)
 780                mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
 781                                   "dump command data %s(0x%x) %s\n",
 782                                   mlx5_command_str(op), op,
 783                                   input ? "INPUT" : "OUTPUT");
 784        else
 785                mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
 786                              mlx5_command_str(op), op,
 787                              input ? "INPUT" : "OUTPUT");
 788
 789        if (data_only) {
 790                if (input) {
 791                        dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
 792                        offset += sizeof(ent->lay->in);
 793                } else {
 794                        dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
 795                        offset += sizeof(ent->lay->out);
 796                }
 797        } else {
 798                dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
 799                offset += sizeof(*ent->lay);
 800        }
 801
 802        for (i = 0; i < n && next; i++)  {
 803                if (data_only) {
 804                        dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
 805                        dump_buf(next->buf, dump_len, 1, offset);
 806                        offset += MLX5_CMD_DATA_BLOCK_SIZE;
 807                } else {
 808                        mlx5_core_dbg(dev, "command block:\n");
 809                        dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
 810                        offset += sizeof(struct mlx5_cmd_prot_block);
 811                }
 812                next = next->next;
 813        }
 814
 815        if (data_only)
 816                pr_debug("\n");
 817}
 818
 819static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
 820{
 821        return MLX5_GET(mbox_in, in->first.data, opcode);
 822}
 823
 824static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
 825
 826static void cb_timeout_handler(struct work_struct *work)
 827{
 828        struct delayed_work *dwork = container_of(work, struct delayed_work,
 829                                                  work);
 830        struct mlx5_cmd_work_ent *ent = container_of(dwork,
 831                                                     struct mlx5_cmd_work_ent,
 832                                                     cb_timeout_work);
 833        struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
 834                                                 cmd);
 835
 836        ent->ret = -ETIMEDOUT;
 837        mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
 838                       mlx5_command_str(msg_to_opcode(ent->in)),
 839                       msg_to_opcode(ent->in));
 840        mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
 841}
 842
 843static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
 844static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
 845                              struct mlx5_cmd_msg *msg);
 846
 847static void cmd_work_handler(struct work_struct *work)
 848{
 849        struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
 850        struct mlx5_cmd *cmd = ent->cmd;
 851        struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
 852        unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
 853        struct mlx5_cmd_layout *lay;
 854        struct semaphore *sem;
 855        unsigned long flags;
 856        bool poll_cmd = ent->polling;
 857        int alloc_ret;
 858        int cmd_mode;
 859
 860        sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
 861        down(sem);
 862        if (!ent->page_queue) {
 863                alloc_ret = alloc_ent(cmd);
 864                if (alloc_ret < 0) {
 865                        mlx5_core_err(dev, "failed to allocate command entry\n");
 866                        if (ent->callback) {
 867                                ent->callback(-EAGAIN, ent->context);
 868                                mlx5_free_cmd_msg(dev, ent->out);
 869                                free_msg(dev, ent->in);
 870                                free_cmd(ent);
 871                        } else {
 872                                ent->ret = -EAGAIN;
 873                                complete(&ent->done);
 874                        }
 875                        up(sem);
 876                        return;
 877                }
 878                ent->idx = alloc_ret;
 879        } else {
 880                ent->idx = cmd->max_reg_cmds;
 881                spin_lock_irqsave(&cmd->alloc_lock, flags);
 882                clear_bit(ent->idx, &cmd->bitmask);
 883                spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 884        }
 885
 886        cmd->ent_arr[ent->idx] = ent;
 887        set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
 888        lay = get_inst(cmd, ent->idx);
 889        ent->lay = lay;
 890        memset(lay, 0, sizeof(*lay));
 891        memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
 892        ent->op = be32_to_cpu(lay->in[0]) >> 16;
 893        if (ent->in->next)
 894                lay->in_ptr = cpu_to_be64(ent->in->next->dma);
 895        lay->inlen = cpu_to_be32(ent->in->len);
 896        if (ent->out->next)
 897                lay->out_ptr = cpu_to_be64(ent->out->next->dma);
 898        lay->outlen = cpu_to_be32(ent->out->len);
 899        lay->type = MLX5_PCI_CMD_XPORT;
 900        lay->token = ent->token;
 901        lay->status_own = CMD_OWNER_HW;
 902        set_signature(ent, !cmd->checksum_disabled);
 903        dump_command(dev, ent, 1);
 904        ent->ts1 = ktime_get_ns();
 905        cmd_mode = cmd->mode;
 906
 907        if (ent->callback)
 908                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
 909
 910        /* Skip sending command to fw if internal error */
 911        if (pci_channel_offline(dev->pdev) ||
 912            dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
 913                u8 status = 0;
 914                u32 drv_synd;
 915
 916                ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
 917                MLX5_SET(mbox_out, ent->out, status, status);
 918                MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
 919
 920                mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
 921                return;
 922        }
 923
 924        /* ring doorbell after the descriptor is valid */
 925        mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
 926        wmb();
 927        iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
 928        /* if not in polling don't use ent after this point */
 929        if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
 930                poll_timeout(ent);
 931                /* make sure we read the descriptor after ownership is SW */
 932                rmb();
 933                mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT));
 934        }
 935}
 936
 937static const char *deliv_status_to_str(u8 status)
 938{
 939        switch (status) {
 940        case MLX5_CMD_DELIVERY_STAT_OK:
 941                return "no errors";
 942        case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
 943                return "signature error";
 944        case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
 945                return "token error";
 946        case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
 947                return "bad block number";
 948        case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
 949                return "output pointer not aligned to block size";
 950        case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
 951                return "input pointer not aligned to block size";
 952        case MLX5_CMD_DELIVERY_STAT_FW_ERR:
 953                return "firmware internal error";
 954        case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
 955                return "command input length error";
 956        case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
 957                return "command output length error";
 958        case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
 959                return "reserved fields not cleared";
 960        case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
 961                return "bad command descriptor type";
 962        default:
 963                return "unknown status code";
 964        }
 965}
 966
 967static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
 968{
 969        unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
 970        struct mlx5_cmd *cmd = &dev->cmd;
 971        int err;
 972
 973        if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
 974                wait_for_completion(&ent->done);
 975        } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
 976                ent->ret = -ETIMEDOUT;
 977                mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
 978        }
 979
 980        err = ent->ret;
 981
 982        if (err == -ETIMEDOUT) {
 983                mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
 984                               mlx5_command_str(msg_to_opcode(ent->in)),
 985                               msg_to_opcode(ent->in));
 986        }
 987        mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
 988                      err, deliv_status_to_str(ent->status), ent->status);
 989
 990        return err;
 991}
 992
 993/*  Notes:
 994 *    1. Callback functions may not sleep
 995 *    2. page queue commands do not support asynchrous completion
 996 */
 997static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
 998                           struct mlx5_cmd_msg *out, void *uout, int uout_size,
 999                           mlx5_cmd_cbk_t callback,
1000                           void *context, int page_queue, u8 *status,
1001                           u8 token, bool force_polling)
1002{
1003        struct mlx5_cmd *cmd = &dev->cmd;
1004        struct mlx5_cmd_work_ent *ent;
1005        struct mlx5_cmd_stats *stats;
1006        int err = 0;
1007        s64 ds;
1008        u16 op;
1009
1010        if (callback && page_queue)
1011                return -EINVAL;
1012
1013        ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
1014                        page_queue);
1015        if (IS_ERR(ent))
1016                return PTR_ERR(ent);
1017
1018        ent->token = token;
1019        ent->polling = force_polling;
1020
1021        if (!callback)
1022                init_completion(&ent->done);
1023
1024        INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
1025        INIT_WORK(&ent->work, cmd_work_handler);
1026        if (page_queue) {
1027                cmd_work_handler(&ent->work);
1028        } else if (!queue_work(cmd->wq, &ent->work)) {
1029                mlx5_core_warn(dev, "failed to queue work\n");
1030                err = -ENOMEM;
1031                goto out_free;
1032        }
1033
1034        if (callback)
1035                goto out;
1036
1037        err = wait_func(dev, ent);
1038        if (err == -ETIMEDOUT)
1039                goto out;
1040
1041        ds = ent->ts2 - ent->ts1;
1042        op = MLX5_GET(mbox_in, in->first.data, opcode);
1043        if (op < ARRAY_SIZE(cmd->stats)) {
1044                stats = &cmd->stats[op];
1045                spin_lock_irq(&stats->lock);
1046                stats->sum += ds;
1047                ++stats->n;
1048                spin_unlock_irq(&stats->lock);
1049        }
1050        mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1051                           "fw exec time for %s is %lld nsec\n",
1052                           mlx5_command_str(op), ds);
1053        *status = ent->status;
1054
1055out_free:
1056        free_cmd(ent);
1057out:
1058        return err;
1059}
1060
1061static ssize_t dbg_write(struct file *filp, const char __user *buf,
1062                         size_t count, loff_t *pos)
1063{
1064        struct mlx5_core_dev *dev = filp->private_data;
1065        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1066        char lbuf[3];
1067        int err;
1068
1069        if (!dbg->in_msg || !dbg->out_msg)
1070                return -ENOMEM;
1071
1072        if (count < sizeof(lbuf) - 1)
1073                return -EINVAL;
1074
1075        if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1))
1076                return -EFAULT;
1077
1078        lbuf[sizeof(lbuf) - 1] = 0;
1079
1080        if (strcmp(lbuf, "go"))
1081                return -EINVAL;
1082
1083        err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
1084
1085        return err ? err : count;
1086}
1087
1088static const struct file_operations fops = {
1089        .owner  = THIS_MODULE,
1090        .open   = simple_open,
1091        .write  = dbg_write,
1092};
1093
1094static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
1095                            u8 token)
1096{
1097        struct mlx5_cmd_prot_block *block;
1098        struct mlx5_cmd_mailbox *next;
1099        int copy;
1100
1101        if (!to || !from)
1102                return -ENOMEM;
1103
1104        copy = min_t(int, size, sizeof(to->first.data));
1105        memcpy(to->first.data, from, copy);
1106        size -= copy;
1107        from += copy;
1108
1109        next = to->next;
1110        while (size) {
1111                if (!next) {
1112                        /* this is a BUG */
1113                        return -ENOMEM;
1114                }
1115
1116                copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1117                block = next->buf;
1118                memcpy(block->data, from, copy);
1119                from += copy;
1120                size -= copy;
1121                block->token = token;
1122                next = next->next;
1123        }
1124
1125        return 0;
1126}
1127
1128static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1129{
1130        struct mlx5_cmd_prot_block *block;
1131        struct mlx5_cmd_mailbox *next;
1132        int copy;
1133
1134        if (!to || !from)
1135                return -ENOMEM;
1136
1137        copy = min_t(int, size, sizeof(from->first.data));
1138        memcpy(to, from->first.data, copy);
1139        size -= copy;
1140        to += copy;
1141
1142        next = from->next;
1143        while (size) {
1144                if (!next) {
1145                        /* this is a BUG */
1146                        return -ENOMEM;
1147                }
1148
1149                copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1150                block = next->buf;
1151
1152                memcpy(to, block->data, copy);
1153                to += copy;
1154                size -= copy;
1155                next = next->next;
1156        }
1157
1158        return 0;
1159}
1160
1161static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1162                                              gfp_t flags)
1163{
1164        struct mlx5_cmd_mailbox *mailbox;
1165
1166        mailbox = kmalloc(sizeof(*mailbox), flags);
1167        if (!mailbox)
1168                return ERR_PTR(-ENOMEM);
1169
1170        mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags,
1171                                       &mailbox->dma);
1172        if (!mailbox->buf) {
1173                mlx5_core_dbg(dev, "failed allocation\n");
1174                kfree(mailbox);
1175                return ERR_PTR(-ENOMEM);
1176        }
1177        mailbox->next = NULL;
1178
1179        return mailbox;
1180}
1181
1182static void free_cmd_box(struct mlx5_core_dev *dev,
1183                         struct mlx5_cmd_mailbox *mailbox)
1184{
1185        dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1186        kfree(mailbox);
1187}
1188
1189static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1190                                               gfp_t flags, int size,
1191                                               u8 token)
1192{
1193        struct mlx5_cmd_mailbox *tmp, *head = NULL;
1194        struct mlx5_cmd_prot_block *block;
1195        struct mlx5_cmd_msg *msg;
1196        int err;
1197        int n;
1198        int i;
1199
1200        msg = kzalloc(sizeof(*msg), flags);
1201        if (!msg)
1202                return ERR_PTR(-ENOMEM);
1203
1204        msg->len = size;
1205        n = mlx5_calc_cmd_blocks(msg);
1206
1207        for (i = 0; i < n; i++) {
1208                tmp = alloc_cmd_box(dev, flags);
1209                if (IS_ERR(tmp)) {
1210                        mlx5_core_warn(dev, "failed allocating block\n");
1211                        err = PTR_ERR(tmp);
1212                        goto err_alloc;
1213                }
1214
1215                block = tmp->buf;
1216                tmp->next = head;
1217                block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1218                block->block_num = cpu_to_be32(n - i - 1);
1219                block->token = token;
1220                head = tmp;
1221        }
1222        msg->next = head;
1223        return msg;
1224
1225err_alloc:
1226        while (head) {
1227                tmp = head->next;
1228                free_cmd_box(dev, head);
1229                head = tmp;
1230        }
1231        kfree(msg);
1232
1233        return ERR_PTR(err);
1234}
1235
1236static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1237                              struct mlx5_cmd_msg *msg)
1238{
1239        struct mlx5_cmd_mailbox *head = msg->next;
1240        struct mlx5_cmd_mailbox *next;
1241
1242        while (head) {
1243                next = head->next;
1244                free_cmd_box(dev, head);
1245                head = next;
1246        }
1247        kfree(msg);
1248}
1249
1250static ssize_t data_write(struct file *filp, const char __user *buf,
1251                          size_t count, loff_t *pos)
1252{
1253        struct mlx5_core_dev *dev = filp->private_data;
1254        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1255        void *ptr;
1256
1257        if (*pos != 0)
1258                return -EINVAL;
1259
1260        kfree(dbg->in_msg);
1261        dbg->in_msg = NULL;
1262        dbg->inlen = 0;
1263        ptr = memdup_user(buf, count);
1264        if (IS_ERR(ptr))
1265                return PTR_ERR(ptr);
1266        dbg->in_msg = ptr;
1267        dbg->inlen = count;
1268
1269        *pos = count;
1270
1271        return count;
1272}
1273
1274static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1275                         loff_t *pos)
1276{
1277        struct mlx5_core_dev *dev = filp->private_data;
1278        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1279
1280        if (!dbg->out_msg)
1281                return -ENOMEM;
1282
1283        return simple_read_from_buffer(buf, count, pos, dbg->out_msg,
1284                                       dbg->outlen);
1285}
1286
1287static const struct file_operations dfops = {
1288        .owner  = THIS_MODULE,
1289        .open   = simple_open,
1290        .write  = data_write,
1291        .read   = data_read,
1292};
1293
1294static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1295                           loff_t *pos)
1296{
1297        struct mlx5_core_dev *dev = filp->private_data;
1298        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1299        char outlen[8];
1300        int err;
1301
1302        err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1303        if (err < 0)
1304                return err;
1305
1306        return simple_read_from_buffer(buf, count, pos, outlen, err);
1307}
1308
1309static ssize_t outlen_write(struct file *filp, const char __user *buf,
1310                            size_t count, loff_t *pos)
1311{
1312        struct mlx5_core_dev *dev = filp->private_data;
1313        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1314        char outlen_str[8] = {0};
1315        int outlen;
1316        void *ptr;
1317        int err;
1318
1319        if (*pos != 0 || count > 6)
1320                return -EINVAL;
1321
1322        kfree(dbg->out_msg);
1323        dbg->out_msg = NULL;
1324        dbg->outlen = 0;
1325
1326        if (copy_from_user(outlen_str, buf, count))
1327                return -EFAULT;
1328
1329        err = sscanf(outlen_str, "%d", &outlen);
1330        if (err < 0)
1331                return err;
1332
1333        ptr = kzalloc(outlen, GFP_KERNEL);
1334        if (!ptr)
1335                return -ENOMEM;
1336
1337        dbg->out_msg = ptr;
1338        dbg->outlen = outlen;
1339
1340        *pos = count;
1341
1342        return count;
1343}
1344
1345static const struct file_operations olfops = {
1346        .owner  = THIS_MODULE,
1347        .open   = simple_open,
1348        .write  = outlen_write,
1349        .read   = outlen_read,
1350};
1351
1352static void set_wqname(struct mlx5_core_dev *dev)
1353{
1354        struct mlx5_cmd *cmd = &dev->cmd;
1355
1356        snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1357                 dev_name(dev->device));
1358}
1359
1360static void clean_debug_files(struct mlx5_core_dev *dev)
1361{
1362        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1363
1364        if (!mlx5_debugfs_root)
1365                return;
1366
1367        mlx5_cmdif_debugfs_cleanup(dev);
1368        debugfs_remove_recursive(dbg->dbg_root);
1369}
1370
1371static int create_debugfs_files(struct mlx5_core_dev *dev)
1372{
1373        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1374        int err = -ENOMEM;
1375
1376        if (!mlx5_debugfs_root)
1377                return 0;
1378
1379        dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1380        if (!dbg->dbg_root)
1381                return err;
1382
1383        dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1384                                          dev, &dfops);
1385        if (!dbg->dbg_in)
1386                goto err_dbg;
1387
1388        dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1389                                           dev, &dfops);
1390        if (!dbg->dbg_out)
1391                goto err_dbg;
1392
1393        dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1394                                              dev, &olfops);
1395        if (!dbg->dbg_outlen)
1396                goto err_dbg;
1397
1398        dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1399                                            &dbg->status);
1400        if (!dbg->dbg_status)
1401                goto err_dbg;
1402
1403        dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1404        if (!dbg->dbg_run)
1405                goto err_dbg;
1406
1407        mlx5_cmdif_debugfs_init(dev);
1408
1409        return 0;
1410
1411err_dbg:
1412        clean_debug_files(dev);
1413        return err;
1414}
1415
1416static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1417{
1418        struct mlx5_cmd *cmd = &dev->cmd;
1419        int i;
1420
1421        for (i = 0; i < cmd->max_reg_cmds; i++)
1422                down(&cmd->sem);
1423        down(&cmd->pages_sem);
1424
1425        cmd->mode = mode;
1426
1427        up(&cmd->pages_sem);
1428        for (i = 0; i < cmd->max_reg_cmds; i++)
1429                up(&cmd->sem);
1430}
1431
1432static int cmd_comp_notifier(struct notifier_block *nb,
1433                             unsigned long type, void *data)
1434{
1435        struct mlx5_core_dev *dev;
1436        struct mlx5_cmd *cmd;
1437        struct mlx5_eqe *eqe;
1438
1439        cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
1440        dev = container_of(cmd, struct mlx5_core_dev, cmd);
1441        eqe = data;
1442
1443        mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
1444
1445        return NOTIFY_OK;
1446}
1447void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1448{
1449        MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
1450        mlx5_eq_notifier_register(dev, &dev->cmd.nb);
1451        mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1452}
1453
1454void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1455{
1456        mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1457        mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
1458}
1459
1460static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1461{
1462        unsigned long flags;
1463
1464        if (msg->parent) {
1465                spin_lock_irqsave(&msg->parent->lock, flags);
1466                list_add_tail(&msg->list, &msg->parent->head);
1467                spin_unlock_irqrestore(&msg->parent->lock, flags);
1468        } else {
1469                mlx5_free_cmd_msg(dev, msg);
1470        }
1471}
1472
1473static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1474{
1475        struct mlx5_cmd *cmd = &dev->cmd;
1476        struct mlx5_cmd_work_ent *ent;
1477        mlx5_cmd_cbk_t callback;
1478        void *context;
1479        int err;
1480        int i;
1481        s64 ds;
1482        struct mlx5_cmd_stats *stats;
1483        unsigned long flags;
1484        unsigned long vector;
1485
1486        /* there can be at most 32 command queues */
1487        vector = vec & 0xffffffff;
1488        for (i = 0; i < (1 << cmd->log_sz); i++) {
1489                if (test_bit(i, &vector)) {
1490                        struct semaphore *sem;
1491
1492                        ent = cmd->ent_arr[i];
1493
1494                        /* if we already completed the command, ignore it */
1495                        if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP,
1496                                                &ent->state)) {
1497                                /* only real completion can free the cmd slot */
1498                                if (!forced) {
1499                                        mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1500                                                      ent->idx);
1501                                        free_ent(cmd, ent->idx);
1502                                        free_cmd(ent);
1503                                }
1504                                continue;
1505                        }
1506
1507                        if (ent->callback)
1508                                cancel_delayed_work(&ent->cb_timeout_work);
1509                        if (ent->page_queue)
1510                                sem = &cmd->pages_sem;
1511                        else
1512                                sem = &cmd->sem;
1513                        ent->ts2 = ktime_get_ns();
1514                        memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1515                        dump_command(dev, ent, 0);
1516                        if (!ent->ret) {
1517                                if (!cmd->checksum_disabled)
1518                                        ent->ret = verify_signature(ent);
1519                                else
1520                                        ent->ret = 0;
1521                                if (vec & MLX5_TRIGGERED_CMD_COMP)
1522                                        ent->status = MLX5_DRIVER_STATUS_ABORTED;
1523                                else
1524                                        ent->status = ent->lay->status_own >> 1;
1525
1526                                mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1527                                              ent->ret, deliv_status_to_str(ent->status), ent->status);
1528                        }
1529
1530                        /* only real completion will free the entry slot */
1531                        if (!forced)
1532                                free_ent(cmd, ent->idx);
1533
1534                        if (ent->callback) {
1535                                ds = ent->ts2 - ent->ts1;
1536                                if (ent->op < ARRAY_SIZE(cmd->stats)) {
1537                                        stats = &cmd->stats[ent->op];
1538                                        spin_lock_irqsave(&stats->lock, flags);
1539                                        stats->sum += ds;
1540                                        ++stats->n;
1541                                        spin_unlock_irqrestore(&stats->lock, flags);
1542                                }
1543
1544                                callback = ent->callback;
1545                                context = ent->context;
1546                                err = ent->ret;
1547                                if (!err) {
1548                                        err = mlx5_copy_from_msg(ent->uout,
1549                                                                 ent->out,
1550                                                                 ent->uout_size);
1551
1552                                        err = err ? err : mlx5_cmd_check(dev,
1553                                                                        ent->in->first.data,
1554                                                                        ent->uout);
1555                                }
1556
1557                                mlx5_free_cmd_msg(dev, ent->out);
1558                                free_msg(dev, ent->in);
1559
1560                                err = err ? err : ent->status;
1561                                if (!forced)
1562                                        free_cmd(ent);
1563                                callback(err, context);
1564                        } else {
1565                                complete(&ent->done);
1566                        }
1567                        up(sem);
1568                }
1569        }
1570}
1571
1572void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
1573{
1574        unsigned long flags;
1575        u64 vector;
1576
1577        /* wait for pending handlers to complete */
1578        mlx5_eq_synchronize_cmd_irq(dev);
1579        spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
1580        vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
1581        if (!vector)
1582                goto no_trig;
1583
1584        vector |= MLX5_TRIGGERED_CMD_COMP;
1585        spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1586
1587        mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
1588        mlx5_cmd_comp_handler(dev, vector, true);
1589        return;
1590
1591no_trig:
1592        spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1593}
1594
1595void mlx5_cmd_flush(struct mlx5_core_dev *dev)
1596{
1597        struct mlx5_cmd *cmd = &dev->cmd;
1598        int i;
1599
1600        for (i = 0; i < cmd->max_reg_cmds; i++)
1601                while (down_trylock(&cmd->sem))
1602                        mlx5_cmd_trigger_completions(dev);
1603
1604        while (down_trylock(&cmd->pages_sem))
1605                mlx5_cmd_trigger_completions(dev);
1606
1607        /* Unlock cmdif */
1608        up(&cmd->pages_sem);
1609        for (i = 0; i < cmd->max_reg_cmds; i++)
1610                up(&cmd->sem);
1611}
1612
1613static int status_to_err(u8 status)
1614{
1615        switch (status) {
1616        case MLX5_CMD_DELIVERY_STAT_OK:
1617        case MLX5_DRIVER_STATUS_ABORTED:
1618                return 0;
1619        case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
1620        case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
1621                return -EBADR;
1622        case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
1623        case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
1624        case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
1625                return -EFAULT; /* Bad address */
1626        case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
1627        case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
1628        case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
1629        case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
1630                return -ENOMSG;
1631        case MLX5_CMD_DELIVERY_STAT_FW_ERR:
1632                return -EIO;
1633        default:
1634                return -EINVAL;
1635        }
1636}
1637
1638static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1639                                      gfp_t gfp)
1640{
1641        struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1642        struct cmd_msg_cache *ch = NULL;
1643        struct mlx5_cmd *cmd = &dev->cmd;
1644        int i;
1645
1646        if (in_size <= 16)
1647                goto cache_miss;
1648
1649        for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1650                ch = &cmd->cache[i];
1651                if (in_size > ch->max_inbox_size)
1652                        continue;
1653                spin_lock_irq(&ch->lock);
1654                if (list_empty(&ch->head)) {
1655                        spin_unlock_irq(&ch->lock);
1656                        continue;
1657                }
1658                msg = list_entry(ch->head.next, typeof(*msg), list);
1659                /* For cached lists, we must explicitly state what is
1660                 * the real size
1661                 */
1662                msg->len = in_size;
1663                list_del(&msg->list);
1664                spin_unlock_irq(&ch->lock);
1665                break;
1666        }
1667
1668        if (!IS_ERR(msg))
1669                return msg;
1670
1671cache_miss:
1672        msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1673        return msg;
1674}
1675
1676static int is_manage_pages(void *in)
1677{
1678        return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1679}
1680
1681static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1682                    int out_size, mlx5_cmd_cbk_t callback, void *context,
1683                    bool force_polling)
1684{
1685        struct mlx5_cmd_msg *inb;
1686        struct mlx5_cmd_msg *outb;
1687        int pages_queue;
1688        gfp_t gfp;
1689        int err;
1690        u8 status = 0;
1691        u32 drv_synd;
1692        u8 token;
1693
1694        if (pci_channel_offline(dev->pdev) ||
1695            dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1696                u16 opcode = MLX5_GET(mbox_in, in, opcode);
1697
1698                err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
1699                MLX5_SET(mbox_out, out, status, status);
1700                MLX5_SET(mbox_out, out, syndrome, drv_synd);
1701                return err;
1702        }
1703
1704        pages_queue = is_manage_pages(in);
1705        gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1706
1707        inb = alloc_msg(dev, in_size, gfp);
1708        if (IS_ERR(inb)) {
1709                err = PTR_ERR(inb);
1710                return err;
1711        }
1712
1713        token = alloc_token(&dev->cmd);
1714
1715        err = mlx5_copy_to_msg(inb, in, in_size, token);
1716        if (err) {
1717                mlx5_core_warn(dev, "err %d\n", err);
1718                goto out_in;
1719        }
1720
1721        outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1722        if (IS_ERR(outb)) {
1723                err = PTR_ERR(outb);
1724                goto out_in;
1725        }
1726
1727        err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1728                              pages_queue, &status, token, force_polling);
1729        if (err)
1730                goto out_out;
1731
1732        mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1733        if (status) {
1734                err = status_to_err(status);
1735                goto out_out;
1736        }
1737
1738        if (!callback)
1739                err = mlx5_copy_from_msg(out, outb, out_size);
1740
1741out_out:
1742        if (!callback)
1743                mlx5_free_cmd_msg(dev, outb);
1744
1745out_in:
1746        if (!callback)
1747                free_msg(dev, inb);
1748        return err;
1749}
1750
1751int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1752                  int out_size)
1753{
1754        int err;
1755
1756        err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
1757        return err ? : mlx5_cmd_check(dev, in, out);
1758}
1759EXPORT_SYMBOL(mlx5_cmd_exec);
1760
1761void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
1762                             struct mlx5_async_ctx *ctx)
1763{
1764        ctx->dev = dev;
1765        /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
1766        atomic_set(&ctx->num_inflight, 1);
1767        init_waitqueue_head(&ctx->wait);
1768}
1769EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
1770
1771/**
1772 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
1773 * @ctx: The ctx to clean
1774 *
1775 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
1776 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
1777 * the call mlx5_cleanup_async_ctx().
1778 */
1779void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
1780{
1781        atomic_dec(&ctx->num_inflight);
1782        wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
1783}
1784EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
1785
1786static void mlx5_cmd_exec_cb_handler(int status, void *_work)
1787{
1788        struct mlx5_async_work *work = _work;
1789        struct mlx5_async_ctx *ctx = work->ctx;
1790
1791        work->user_callback(status, work);
1792        if (atomic_dec_and_test(&ctx->num_inflight))
1793                wake_up(&ctx->wait);
1794}
1795
1796int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
1797                     void *out, int out_size, mlx5_async_cbk_t callback,
1798                     struct mlx5_async_work *work)
1799{
1800        int ret;
1801
1802        work->ctx = ctx;
1803        work->user_callback = callback;
1804        if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
1805                return -EIO;
1806        ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
1807                       mlx5_cmd_exec_cb_handler, work, false);
1808        if (ret && atomic_dec_and_test(&ctx->num_inflight))
1809                wake_up(&ctx->wait);
1810
1811        return ret;
1812}
1813EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1814
1815int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
1816                          void *out, int out_size)
1817{
1818        int err;
1819
1820        err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
1821
1822        return err ? : mlx5_cmd_check(dev, in, out);
1823}
1824EXPORT_SYMBOL(mlx5_cmd_exec_polling);
1825
1826static void destroy_msg_cache(struct mlx5_core_dev *dev)
1827{
1828        struct cmd_msg_cache *ch;
1829        struct mlx5_cmd_msg *msg;
1830        struct mlx5_cmd_msg *n;
1831        int i;
1832
1833        for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1834                ch = &dev->cmd.cache[i];
1835                list_for_each_entry_safe(msg, n, &ch->head, list) {
1836                        list_del(&msg->list);
1837                        mlx5_free_cmd_msg(dev, msg);
1838                }
1839        }
1840}
1841
1842static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
1843        512, 32, 16, 8, 2
1844};
1845
1846static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
1847        16 + MLX5_CMD_DATA_BLOCK_SIZE,
1848        16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
1849        16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
1850        16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
1851        16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
1852};
1853
1854static void create_msg_cache(struct mlx5_core_dev *dev)
1855{
1856        struct mlx5_cmd *cmd = &dev->cmd;
1857        struct cmd_msg_cache *ch;
1858        struct mlx5_cmd_msg *msg;
1859        int i;
1860        int k;
1861
1862        /* Initialize and fill the caches with initial entries */
1863        for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
1864                ch = &cmd->cache[k];
1865                spin_lock_init(&ch->lock);
1866                INIT_LIST_HEAD(&ch->head);
1867                ch->num_ent = cmd_cache_num_ent[k];
1868                ch->max_inbox_size = cmd_cache_ent_size[k];
1869                for (i = 0; i < ch->num_ent; i++) {
1870                        msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
1871                                                 ch->max_inbox_size, 0);
1872                        if (IS_ERR(msg))
1873                                break;
1874                        msg->parent = ch;
1875                        list_add_tail(&msg->list, &ch->head);
1876                }
1877        }
1878}
1879
1880static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1881{
1882        struct device *ddev = dev->device;
1883
1884        cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1885                                                &cmd->alloc_dma, GFP_KERNEL);
1886        if (!cmd->cmd_alloc_buf)
1887                return -ENOMEM;
1888
1889        /* make sure it is aligned to 4K */
1890        if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1891                cmd->cmd_buf = cmd->cmd_alloc_buf;
1892                cmd->dma = cmd->alloc_dma;
1893                cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1894                return 0;
1895        }
1896
1897        dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1898                          cmd->alloc_dma);
1899        cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
1900                                                2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1901                                                &cmd->alloc_dma, GFP_KERNEL);
1902        if (!cmd->cmd_alloc_buf)
1903                return -ENOMEM;
1904
1905        cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1906        cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1907        cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1908        return 0;
1909}
1910
1911static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1912{
1913        struct device *ddev = dev->device;
1914
1915        dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
1916                          cmd->alloc_dma);
1917}
1918
1919int mlx5_cmd_init(struct mlx5_core_dev *dev)
1920{
1921        int size = sizeof(struct mlx5_cmd_prot_block);
1922        int align = roundup_pow_of_two(size);
1923        struct mlx5_cmd *cmd = &dev->cmd;
1924        u32 cmd_h, cmd_l;
1925        u16 cmd_if_rev;
1926        int err;
1927        int i;
1928
1929        memset(cmd, 0, sizeof(*cmd));
1930        cmd_if_rev = cmdif_rev(dev);
1931        if (cmd_if_rev != CMD_IF_REV) {
1932                mlx5_core_err(dev,
1933                              "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1934                              CMD_IF_REV, cmd_if_rev);
1935                return -EINVAL;
1936        }
1937
1938        cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
1939        if (!cmd->pool)
1940                return -ENOMEM;
1941
1942        err = alloc_cmd_page(dev, cmd);
1943        if (err)
1944                goto err_free_pool;
1945
1946        cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1947        cmd->log_sz = cmd_l >> 4 & 0xf;
1948        cmd->log_stride = cmd_l & 0xf;
1949        if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1950                mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
1951                              1 << cmd->log_sz);
1952                err = -EINVAL;
1953                goto err_free_page;
1954        }
1955
1956        if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1957                mlx5_core_err(dev, "command queue size overflow\n");
1958                err = -EINVAL;
1959                goto err_free_page;
1960        }
1961
1962        cmd->checksum_disabled = 1;
1963        cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1964        cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
1965
1966        cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1967        if (cmd->cmdif_rev > CMD_IF_REV) {
1968                mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
1969                              CMD_IF_REV, cmd->cmdif_rev);
1970                err = -EOPNOTSUPP;
1971                goto err_free_page;
1972        }
1973
1974        spin_lock_init(&cmd->alloc_lock);
1975        spin_lock_init(&cmd->token_lock);
1976        for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1977                spin_lock_init(&cmd->stats[i].lock);
1978
1979        sema_init(&cmd->sem, cmd->max_reg_cmds);
1980        sema_init(&cmd->pages_sem, 1);
1981
1982        cmd_h = (u32)((u64)(cmd->dma) >> 32);
1983        cmd_l = (u32)(cmd->dma);
1984        if (cmd_l & 0xfff) {
1985                mlx5_core_err(dev, "invalid command queue address\n");
1986                err = -ENOMEM;
1987                goto err_free_page;
1988        }
1989
1990        iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1991        iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1992
1993        /* Make sure firmware sees the complete address before we proceed */
1994        wmb();
1995
1996        mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1997
1998        cmd->mode = CMD_MODE_POLLING;
1999
2000        create_msg_cache(dev);
2001
2002        set_wqname(dev);
2003        cmd->wq = create_singlethread_workqueue(cmd->wq_name);
2004        if (!cmd->wq) {
2005                mlx5_core_err(dev, "failed to create command workqueue\n");
2006                err = -ENOMEM;
2007                goto err_cache;
2008        }
2009
2010        err = create_debugfs_files(dev);
2011        if (err) {
2012                err = -ENOMEM;
2013                goto err_wq;
2014        }
2015
2016        return 0;
2017
2018err_wq:
2019        destroy_workqueue(cmd->wq);
2020
2021err_cache:
2022        destroy_msg_cache(dev);
2023
2024err_free_page:
2025        free_cmd_page(dev, cmd);
2026
2027err_free_pool:
2028        dma_pool_destroy(cmd->pool);
2029
2030        return err;
2031}
2032EXPORT_SYMBOL(mlx5_cmd_init);
2033
2034void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
2035{
2036        struct mlx5_cmd *cmd = &dev->cmd;
2037
2038        clean_debug_files(dev);
2039        destroy_workqueue(cmd->wq);
2040        destroy_msg_cache(dev);
2041        free_cmd_page(dev, cmd);
2042        dma_pool_destroy(cmd->pool);
2043}
2044EXPORT_SYMBOL(mlx5_cmd_cleanup);
2045