linux/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/highmem.h>
  34#include <linux/module.h>
  35#include <linux/errno.h>
  36#include <linux/pci.h>
  37#include <linux/dma-mapping.h>
  38#include <linux/slab.h>
  39#include <linux/delay.h>
  40#include <linux/random.h>
  41#include <linux/io-mapping.h>
  42#include <linux/mlx5/driver.h>
  43#include <linux/debugfs.h>
  44
  45#include "mlx5_core.h"
  46
  47enum {
  48        CMD_IF_REV = 5,
  49};
  50
  51enum {
  52        CMD_MODE_POLLING,
  53        CMD_MODE_EVENTS
  54};
  55
  56enum {
  57        MLX5_CMD_DELIVERY_STAT_OK                       = 0x0,
  58        MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR               = 0x1,
  59        MLX5_CMD_DELIVERY_STAT_TOK_ERR                  = 0x2,
  60        MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR          = 0x3,
  61        MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR        = 0x4,
  62        MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR         = 0x5,
  63        MLX5_CMD_DELIVERY_STAT_FW_ERR                   = 0x6,
  64        MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR            = 0x7,
  65        MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR           = 0x8,
  66        MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR      = 0x9,
  67        MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR            = 0x10,
  68};
  69
  70static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
  71                                           struct mlx5_cmd_msg *in,
  72                                           struct mlx5_cmd_msg *out,
  73                                           void *uout, int uout_size,
  74                                           mlx5_cmd_cbk_t cbk,
  75                                           void *context, int page_queue)
  76{
  77        gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
  78        struct mlx5_cmd_work_ent *ent;
  79
  80        ent = kzalloc(sizeof(*ent), alloc_flags);
  81        if (!ent)
  82                return ERR_PTR(-ENOMEM);
  83
  84        ent->in         = in;
  85        ent->out        = out;
  86        ent->uout       = uout;
  87        ent->uout_size  = uout_size;
  88        ent->callback   = cbk;
  89        ent->context    = context;
  90        ent->cmd        = cmd;
  91        ent->page_queue = page_queue;
  92
  93        return ent;
  94}
  95
  96static u8 alloc_token(struct mlx5_cmd *cmd)
  97{
  98        u8 token;
  99
 100        spin_lock(&cmd->token_lock);
 101        cmd->token++;
 102        if (cmd->token == 0)
 103                cmd->token++;
 104        token = cmd->token;
 105        spin_unlock(&cmd->token_lock);
 106
 107        return token;
 108}
 109
 110static int alloc_ent(struct mlx5_cmd *cmd)
 111{
 112        unsigned long flags;
 113        int ret;
 114
 115        spin_lock_irqsave(&cmd->alloc_lock, flags);
 116        ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
 117        if (ret < cmd->max_reg_cmds)
 118                clear_bit(ret, &cmd->bitmask);
 119        spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 120
 121        return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
 122}
 123
 124static void free_ent(struct mlx5_cmd *cmd, int idx)
 125{
 126        unsigned long flags;
 127
 128        spin_lock_irqsave(&cmd->alloc_lock, flags);
 129        set_bit(idx, &cmd->bitmask);
 130        spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 131}
 132
 133static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
 134{
 135        return cmd->cmd_buf + (idx << cmd->log_stride);
 136}
 137
 138static u8 xor8_buf(void *buf, size_t offset, int len)
 139{
 140        u8 *ptr = buf;
 141        u8 sum = 0;
 142        int i;
 143        int end = len + offset;
 144
 145        for (i = offset; i < end; i++)
 146                sum ^= ptr[i];
 147
 148        return sum;
 149}
 150
 151static int verify_block_sig(struct mlx5_cmd_prot_block *block)
 152{
 153        size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
 154        int xor_len = sizeof(*block) - sizeof(block->data) - 1;
 155
 156        if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
 157                return -EINVAL;
 158
 159        if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
 160                return -EINVAL;
 161
 162        return 0;
 163}
 164
 165static void calc_block_sig(struct mlx5_cmd_prot_block *block)
 166{
 167        int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
 168        size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
 169
 170        block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
 171        block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
 172}
 173
 174static void calc_chain_sig(struct mlx5_cmd_msg *msg)
 175{
 176        struct mlx5_cmd_mailbox *next = msg->next;
 177        int size = msg->len;
 178        int blen = size - min_t(int, sizeof(msg->first.data), size);
 179        int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
 180                / MLX5_CMD_DATA_BLOCK_SIZE;
 181        int i = 0;
 182
 183        for (i = 0; i < n && next; i++)  {
 184                calc_block_sig(next->buf);
 185                next = next->next;
 186        }
 187}
 188
 189static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
 190{
 191        ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
 192        if (csum) {
 193                calc_chain_sig(ent->in);
 194                calc_chain_sig(ent->out);
 195        }
 196}
 197
 198static void poll_timeout(struct mlx5_cmd_work_ent *ent)
 199{
 200        unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
 201        u8 own;
 202
 203        do {
 204                own = ent->lay->status_own;
 205                if (!(own & CMD_OWNER_HW)) {
 206                        ent->ret = 0;
 207                        return;
 208                }
 209                usleep_range(5000, 10000);
 210        } while (time_before(jiffies, poll_end));
 211
 212        ent->ret = -ETIMEDOUT;
 213}
 214
 215static void free_cmd(struct mlx5_cmd_work_ent *ent)
 216{
 217        kfree(ent);
 218}
 219
 220
 221static int verify_signature(struct mlx5_cmd_work_ent *ent)
 222{
 223        struct mlx5_cmd_mailbox *next = ent->out->next;
 224        int err;
 225        u8 sig;
 226        int size = ent->out->len;
 227        int blen = size - min_t(int, sizeof(ent->out->first.data), size);
 228        int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
 229                / MLX5_CMD_DATA_BLOCK_SIZE;
 230        int i = 0;
 231
 232        sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
 233        if (sig != 0xff)
 234                return -EINVAL;
 235
 236        for (i = 0; i < n && next; i++) {
 237                err = verify_block_sig(next->buf);
 238                if (err)
 239                        return err;
 240
 241                next = next->next;
 242        }
 243
 244        return 0;
 245}
 246
 247static void dump_buf(void *buf, int size, int data_only, int offset)
 248{
 249        __be32 *p = buf;
 250        int i;
 251
 252        for (i = 0; i < size; i += 16) {
 253                pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
 254                         be32_to_cpu(p[1]), be32_to_cpu(p[2]),
 255                         be32_to_cpu(p[3]));
 256                p += 4;
 257                offset += 16;
 258        }
 259        if (!data_only)
 260                pr_debug("\n");
 261}
 262
 263static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
 264                                       u32 *synd, u8 *status)
 265{
 266        *synd = 0;
 267        *status = 0;
 268
 269        switch (op) {
 270        case MLX5_CMD_OP_TEARDOWN_HCA:
 271        case MLX5_CMD_OP_DISABLE_HCA:
 272        case MLX5_CMD_OP_MANAGE_PAGES:
 273        case MLX5_CMD_OP_DESTROY_MKEY:
 274        case MLX5_CMD_OP_DESTROY_EQ:
 275        case MLX5_CMD_OP_DESTROY_CQ:
 276        case MLX5_CMD_OP_DESTROY_QP:
 277        case MLX5_CMD_OP_DESTROY_PSV:
 278        case MLX5_CMD_OP_DESTROY_SRQ:
 279        case MLX5_CMD_OP_DESTROY_XRC_SRQ:
 280        case MLX5_CMD_OP_DESTROY_DCT:
 281        case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
 282        case MLX5_CMD_OP_DEALLOC_PD:
 283        case MLX5_CMD_OP_DEALLOC_UAR:
 284        case MLX5_CMD_OP_DETACH_FROM_MCG:
 285        case MLX5_CMD_OP_DEALLOC_XRCD:
 286        case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
 287        case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
 288        case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
 289        case MLX5_CMD_OP_DESTROY_LAG:
 290        case MLX5_CMD_OP_DESTROY_VPORT_LAG:
 291        case MLX5_CMD_OP_DESTROY_TIR:
 292        case MLX5_CMD_OP_DESTROY_SQ:
 293        case MLX5_CMD_OP_DESTROY_RQ:
 294        case MLX5_CMD_OP_DESTROY_RMP:
 295        case MLX5_CMD_OP_DESTROY_TIS:
 296        case MLX5_CMD_OP_DESTROY_RQT:
 297        case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
 298        case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
 299        case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
 300        case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
 301        case MLX5_CMD_OP_2ERR_QP:
 302        case MLX5_CMD_OP_2RST_QP:
 303        case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
 304        case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
 305        case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 306        case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
 307        case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
 308        case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
 309        case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
 310                return MLX5_CMD_STAT_OK;
 311
 312        case MLX5_CMD_OP_QUERY_HCA_CAP:
 313        case MLX5_CMD_OP_QUERY_ADAPTER:
 314        case MLX5_CMD_OP_INIT_HCA:
 315        case MLX5_CMD_OP_ENABLE_HCA:
 316        case MLX5_CMD_OP_QUERY_PAGES:
 317        case MLX5_CMD_OP_SET_HCA_CAP:
 318        case MLX5_CMD_OP_QUERY_ISSI:
 319        case MLX5_CMD_OP_SET_ISSI:
 320        case MLX5_CMD_OP_CREATE_MKEY:
 321        case MLX5_CMD_OP_QUERY_MKEY:
 322        case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
 323        case MLX5_CMD_OP_PAGE_FAULT_RESUME:
 324        case MLX5_CMD_OP_CREATE_EQ:
 325        case MLX5_CMD_OP_QUERY_EQ:
 326        case MLX5_CMD_OP_GEN_EQE:
 327        case MLX5_CMD_OP_CREATE_CQ:
 328        case MLX5_CMD_OP_QUERY_CQ:
 329        case MLX5_CMD_OP_MODIFY_CQ:
 330        case MLX5_CMD_OP_CREATE_QP:
 331        case MLX5_CMD_OP_RST2INIT_QP:
 332        case MLX5_CMD_OP_INIT2RTR_QP:
 333        case MLX5_CMD_OP_RTR2RTS_QP:
 334        case MLX5_CMD_OP_RTS2RTS_QP:
 335        case MLX5_CMD_OP_SQERR2RTS_QP:
 336        case MLX5_CMD_OP_QUERY_QP:
 337        case MLX5_CMD_OP_SQD_RTS_QP:
 338        case MLX5_CMD_OP_INIT2INIT_QP:
 339        case MLX5_CMD_OP_CREATE_PSV:
 340        case MLX5_CMD_OP_CREATE_SRQ:
 341        case MLX5_CMD_OP_QUERY_SRQ:
 342        case MLX5_CMD_OP_ARM_RQ:
 343        case MLX5_CMD_OP_CREATE_XRC_SRQ:
 344        case MLX5_CMD_OP_QUERY_XRC_SRQ:
 345        case MLX5_CMD_OP_ARM_XRC_SRQ:
 346        case MLX5_CMD_OP_CREATE_DCT:
 347        case MLX5_CMD_OP_DRAIN_DCT:
 348        case MLX5_CMD_OP_QUERY_DCT:
 349        case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
 350        case MLX5_CMD_OP_QUERY_VPORT_STATE:
 351        case MLX5_CMD_OP_MODIFY_VPORT_STATE:
 352        case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
 353        case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
 354        case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
 355        case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
 356        case MLX5_CMD_OP_SET_ROCE_ADDRESS:
 357        case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
 358        case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
 359        case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
 360        case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
 361        case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
 362        case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 363        case MLX5_CMD_OP_QUERY_Q_COUNTER:
 364        case MLX5_CMD_OP_SET_RATE_LIMIT:
 365        case MLX5_CMD_OP_QUERY_RATE_LIMIT:
 366        case MLX5_CMD_OP_ALLOC_PD:
 367        case MLX5_CMD_OP_ALLOC_UAR:
 368        case MLX5_CMD_OP_CONFIG_INT_MODERATION:
 369        case MLX5_CMD_OP_ACCESS_REG:
 370        case MLX5_CMD_OP_ATTACH_TO_MCG:
 371        case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
 372        case MLX5_CMD_OP_MAD_IFC:
 373        case MLX5_CMD_OP_QUERY_MAD_DEMUX:
 374        case MLX5_CMD_OP_SET_MAD_DEMUX:
 375        case MLX5_CMD_OP_NOP:
 376        case MLX5_CMD_OP_ALLOC_XRCD:
 377        case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
 378        case MLX5_CMD_OP_QUERY_CONG_STATUS:
 379        case MLX5_CMD_OP_MODIFY_CONG_STATUS:
 380        case MLX5_CMD_OP_QUERY_CONG_PARAMS:
 381        case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
 382        case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
 383        case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 384        case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 385        case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
 386        case MLX5_CMD_OP_CREATE_LAG:
 387        case MLX5_CMD_OP_MODIFY_LAG:
 388        case MLX5_CMD_OP_QUERY_LAG:
 389        case MLX5_CMD_OP_CREATE_VPORT_LAG:
 390        case MLX5_CMD_OP_CREATE_TIR:
 391        case MLX5_CMD_OP_MODIFY_TIR:
 392        case MLX5_CMD_OP_QUERY_TIR:
 393        case MLX5_CMD_OP_CREATE_SQ:
 394        case MLX5_CMD_OP_MODIFY_SQ:
 395        case MLX5_CMD_OP_QUERY_SQ:
 396        case MLX5_CMD_OP_CREATE_RQ:
 397        case MLX5_CMD_OP_MODIFY_RQ:
 398        case MLX5_CMD_OP_QUERY_RQ:
 399        case MLX5_CMD_OP_CREATE_RMP:
 400        case MLX5_CMD_OP_MODIFY_RMP:
 401        case MLX5_CMD_OP_QUERY_RMP:
 402        case MLX5_CMD_OP_CREATE_TIS:
 403        case MLX5_CMD_OP_MODIFY_TIS:
 404        case MLX5_CMD_OP_QUERY_TIS:
 405        case MLX5_CMD_OP_CREATE_RQT:
 406        case MLX5_CMD_OP_MODIFY_RQT:
 407        case MLX5_CMD_OP_QUERY_RQT:
 408
 409        case MLX5_CMD_OP_CREATE_FLOW_TABLE:
 410        case MLX5_CMD_OP_QUERY_FLOW_TABLE:
 411        case MLX5_CMD_OP_CREATE_FLOW_GROUP:
 412        case MLX5_CMD_OP_QUERY_FLOW_GROUP:
 413        case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
 414        case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 415        case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
 416        case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
 417        case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
 418        case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
 419        case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
 420        case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
 421                *status = MLX5_DRIVER_STATUS_ABORTED;
 422                *synd = MLX5_DRIVER_SYND;
 423                return -EIO;
 424        default:
 425                mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
 426                return -EINVAL;
 427        }
 428}
 429
 430const char *mlx5_command_str(int command)
 431{
 432#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
 433
 434        switch (command) {
 435        MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
 436        MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
 437        MLX5_COMMAND_STR_CASE(INIT_HCA);
 438        MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
 439        MLX5_COMMAND_STR_CASE(ENABLE_HCA);
 440        MLX5_COMMAND_STR_CASE(DISABLE_HCA);
 441        MLX5_COMMAND_STR_CASE(QUERY_PAGES);
 442        MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
 443        MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
 444        MLX5_COMMAND_STR_CASE(QUERY_ISSI);
 445        MLX5_COMMAND_STR_CASE(SET_ISSI);
 446        MLX5_COMMAND_STR_CASE(CREATE_MKEY);
 447        MLX5_COMMAND_STR_CASE(QUERY_MKEY);
 448        MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
 449        MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
 450        MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
 451        MLX5_COMMAND_STR_CASE(CREATE_EQ);
 452        MLX5_COMMAND_STR_CASE(DESTROY_EQ);
 453        MLX5_COMMAND_STR_CASE(QUERY_EQ);
 454        MLX5_COMMAND_STR_CASE(GEN_EQE);
 455        MLX5_COMMAND_STR_CASE(CREATE_CQ);
 456        MLX5_COMMAND_STR_CASE(DESTROY_CQ);
 457        MLX5_COMMAND_STR_CASE(QUERY_CQ);
 458        MLX5_COMMAND_STR_CASE(MODIFY_CQ);
 459        MLX5_COMMAND_STR_CASE(CREATE_QP);
 460        MLX5_COMMAND_STR_CASE(DESTROY_QP);
 461        MLX5_COMMAND_STR_CASE(RST2INIT_QP);
 462        MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
 463        MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
 464        MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
 465        MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
 466        MLX5_COMMAND_STR_CASE(2ERR_QP);
 467        MLX5_COMMAND_STR_CASE(2RST_QP);
 468        MLX5_COMMAND_STR_CASE(QUERY_QP);
 469        MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
 470        MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
 471        MLX5_COMMAND_STR_CASE(CREATE_PSV);
 472        MLX5_COMMAND_STR_CASE(DESTROY_PSV);
 473        MLX5_COMMAND_STR_CASE(CREATE_SRQ);
 474        MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
 475        MLX5_COMMAND_STR_CASE(QUERY_SRQ);
 476        MLX5_COMMAND_STR_CASE(ARM_RQ);
 477        MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
 478        MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
 479        MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
 480        MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
 481        MLX5_COMMAND_STR_CASE(CREATE_DCT);
 482        MLX5_COMMAND_STR_CASE(DESTROY_DCT);
 483        MLX5_COMMAND_STR_CASE(DRAIN_DCT);
 484        MLX5_COMMAND_STR_CASE(QUERY_DCT);
 485        MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
 486        MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
 487        MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
 488        MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
 489        MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
 490        MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
 491        MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
 492        MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
 493        MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
 494        MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
 495        MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
 496        MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
 497        MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
 498        MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
 499        MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
 500        MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
 501        MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
 502        MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
 503        MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
 504        MLX5_COMMAND_STR_CASE(ALLOC_PD);
 505        MLX5_COMMAND_STR_CASE(DEALLOC_PD);
 506        MLX5_COMMAND_STR_CASE(ALLOC_UAR);
 507        MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
 508        MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
 509        MLX5_COMMAND_STR_CASE(ACCESS_REG);
 510        MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
 511        MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
 512        MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
 513        MLX5_COMMAND_STR_CASE(MAD_IFC);
 514        MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
 515        MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
 516        MLX5_COMMAND_STR_CASE(NOP);
 517        MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
 518        MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
 519        MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
 520        MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
 521        MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
 522        MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
 523        MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
 524        MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
 525        MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
 526        MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
 527        MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
 528        MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
 529        MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
 530        MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
 531        MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
 532        MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
 533        MLX5_COMMAND_STR_CASE(CREATE_LAG);
 534        MLX5_COMMAND_STR_CASE(MODIFY_LAG);
 535        MLX5_COMMAND_STR_CASE(QUERY_LAG);
 536        MLX5_COMMAND_STR_CASE(DESTROY_LAG);
 537        MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
 538        MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
 539        MLX5_COMMAND_STR_CASE(CREATE_TIR);
 540        MLX5_COMMAND_STR_CASE(MODIFY_TIR);
 541        MLX5_COMMAND_STR_CASE(DESTROY_TIR);
 542        MLX5_COMMAND_STR_CASE(QUERY_TIR);
 543        MLX5_COMMAND_STR_CASE(CREATE_SQ);
 544        MLX5_COMMAND_STR_CASE(MODIFY_SQ);
 545        MLX5_COMMAND_STR_CASE(DESTROY_SQ);
 546        MLX5_COMMAND_STR_CASE(QUERY_SQ);
 547        MLX5_COMMAND_STR_CASE(CREATE_RQ);
 548        MLX5_COMMAND_STR_CASE(MODIFY_RQ);
 549        MLX5_COMMAND_STR_CASE(DESTROY_RQ);
 550        MLX5_COMMAND_STR_CASE(QUERY_RQ);
 551        MLX5_COMMAND_STR_CASE(CREATE_RMP);
 552        MLX5_COMMAND_STR_CASE(MODIFY_RMP);
 553        MLX5_COMMAND_STR_CASE(DESTROY_RMP);
 554        MLX5_COMMAND_STR_CASE(QUERY_RMP);
 555        MLX5_COMMAND_STR_CASE(CREATE_TIS);
 556        MLX5_COMMAND_STR_CASE(MODIFY_TIS);
 557        MLX5_COMMAND_STR_CASE(DESTROY_TIS);
 558        MLX5_COMMAND_STR_CASE(QUERY_TIS);
 559        MLX5_COMMAND_STR_CASE(CREATE_RQT);
 560        MLX5_COMMAND_STR_CASE(MODIFY_RQT);
 561        MLX5_COMMAND_STR_CASE(DESTROY_RQT);
 562        MLX5_COMMAND_STR_CASE(QUERY_RQT);
 563        MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
 564        MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
 565        MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
 566        MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
 567        MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
 568        MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
 569        MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
 570        MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
 571        MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
 572        MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
 573        MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
 574        MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
 575        MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
 576        MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
 577        MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
 578        MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
 579        MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
 580        MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
 581        MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
 582        MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
 583        MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
 584        MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
 585        default: return "unknown command opcode";
 586        }
 587}
 588
 589static const char *cmd_status_str(u8 status)
 590{
 591        switch (status) {
 592        case MLX5_CMD_STAT_OK:
 593                return "OK";
 594        case MLX5_CMD_STAT_INT_ERR:
 595                return "internal error";
 596        case MLX5_CMD_STAT_BAD_OP_ERR:
 597                return "bad operation";
 598        case MLX5_CMD_STAT_BAD_PARAM_ERR:
 599                return "bad parameter";
 600        case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
 601                return "bad system state";
 602        case MLX5_CMD_STAT_BAD_RES_ERR:
 603                return "bad resource";
 604        case MLX5_CMD_STAT_RES_BUSY:
 605                return "resource busy";
 606        case MLX5_CMD_STAT_LIM_ERR:
 607                return "limits exceeded";
 608        case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
 609                return "bad resource state";
 610        case MLX5_CMD_STAT_IX_ERR:
 611                return "bad index";
 612        case MLX5_CMD_STAT_NO_RES_ERR:
 613                return "no resources";
 614        case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
 615                return "bad input length";
 616        case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
 617                return "bad output length";
 618        case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
 619                return "bad QP state";
 620        case MLX5_CMD_STAT_BAD_PKT_ERR:
 621                return "bad packet (discarded)";
 622        case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
 623                return "bad size too many outstanding CQEs";
 624        default:
 625                return "unknown status";
 626        }
 627}
 628
 629static int cmd_status_to_err(u8 status)
 630{
 631        switch (status) {
 632        case MLX5_CMD_STAT_OK:                          return 0;
 633        case MLX5_CMD_STAT_INT_ERR:                     return -EIO;
 634        case MLX5_CMD_STAT_BAD_OP_ERR:                  return -EINVAL;
 635        case MLX5_CMD_STAT_BAD_PARAM_ERR:               return -EINVAL;
 636        case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:           return -EIO;
 637        case MLX5_CMD_STAT_BAD_RES_ERR:                 return -EINVAL;
 638        case MLX5_CMD_STAT_RES_BUSY:                    return -EBUSY;
 639        case MLX5_CMD_STAT_LIM_ERR:                     return -ENOMEM;
 640        case MLX5_CMD_STAT_BAD_RES_STATE_ERR:           return -EINVAL;
 641        case MLX5_CMD_STAT_IX_ERR:                      return -EINVAL;
 642        case MLX5_CMD_STAT_NO_RES_ERR:                  return -EAGAIN;
 643        case MLX5_CMD_STAT_BAD_INP_LEN_ERR:             return -EIO;
 644        case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:            return -EIO;
 645        case MLX5_CMD_STAT_BAD_QP_STATE_ERR:            return -EINVAL;
 646        case MLX5_CMD_STAT_BAD_PKT_ERR:                 return -EINVAL;
 647        case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:      return -EINVAL;
 648        default:                                        return -EIO;
 649        }
 650}
 651
 652struct mlx5_ifc_mbox_out_bits {
 653        u8         status[0x8];
 654        u8         reserved_at_8[0x18];
 655
 656        u8         syndrome[0x20];
 657
 658        u8         reserved_at_40[0x40];
 659};
 660
 661struct mlx5_ifc_mbox_in_bits {
 662        u8         opcode[0x10];
 663        u8         reserved_at_10[0x10];
 664
 665        u8         reserved_at_20[0x10];
 666        u8         op_mod[0x10];
 667
 668        u8         reserved_at_40[0x40];
 669};
 670
 671void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
 672{
 673        *status = MLX5_GET(mbox_out, out, status);
 674        *syndrome = MLX5_GET(mbox_out, out, syndrome);
 675}
 676
 677static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
 678{
 679        u32 syndrome;
 680        u8  status;
 681        u16 opcode;
 682        u16 op_mod;
 683
 684        mlx5_cmd_mbox_status(out, &status, &syndrome);
 685        if (!status)
 686                return 0;
 687
 688        opcode = MLX5_GET(mbox_in, in, opcode);
 689        op_mod = MLX5_GET(mbox_in, in, op_mod);
 690
 691        mlx5_core_err(dev,
 692                      "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
 693                      mlx5_command_str(opcode),
 694                      opcode, op_mod,
 695                      cmd_status_str(status),
 696                      status,
 697                      syndrome);
 698
 699        return cmd_status_to_err(status);
 700}
 701
 702static void dump_command(struct mlx5_core_dev *dev,
 703                         struct mlx5_cmd_work_ent *ent, int input)
 704{
 705        struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
 706        u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
 707        struct mlx5_cmd_mailbox *next = msg->next;
 708        int data_only;
 709        u32 offset = 0;
 710        int dump_len;
 711
 712        data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
 713
 714        if (data_only)
 715                mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
 716                                   "dump command data %s(0x%x) %s\n",
 717                                   mlx5_command_str(op), op,
 718                                   input ? "INPUT" : "OUTPUT");
 719        else
 720                mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
 721                              mlx5_command_str(op), op,
 722                              input ? "INPUT" : "OUTPUT");
 723
 724        if (data_only) {
 725                if (input) {
 726                        dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
 727                        offset += sizeof(ent->lay->in);
 728                } else {
 729                        dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
 730                        offset += sizeof(ent->lay->out);
 731                }
 732        } else {
 733                dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
 734                offset += sizeof(*ent->lay);
 735        }
 736
 737        while (next && offset < msg->len) {
 738                if (data_only) {
 739                        dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
 740                        dump_buf(next->buf, dump_len, 1, offset);
 741                        offset += MLX5_CMD_DATA_BLOCK_SIZE;
 742                } else {
 743                        mlx5_core_dbg(dev, "command block:\n");
 744                        dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
 745                        offset += sizeof(struct mlx5_cmd_prot_block);
 746                }
 747                next = next->next;
 748        }
 749
 750        if (data_only)
 751                pr_debug("\n");
 752}
 753
 754static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
 755{
 756        return MLX5_GET(mbox_in, in->first.data, opcode);
 757}
 758
 759static void cb_timeout_handler(struct work_struct *work)
 760{
 761        struct delayed_work *dwork = container_of(work, struct delayed_work,
 762                                                  work);
 763        struct mlx5_cmd_work_ent *ent = container_of(dwork,
 764                                                     struct mlx5_cmd_work_ent,
 765                                                     cb_timeout_work);
 766        struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
 767                                                 cmd);
 768
 769        ent->ret = -ETIMEDOUT;
 770        mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
 771                       mlx5_command_str(msg_to_opcode(ent->in)),
 772                       msg_to_opcode(ent->in));
 773        mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
 774}
 775
 776static void cmd_work_handler(struct work_struct *work)
 777{
 778        struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
 779        struct mlx5_cmd *cmd = ent->cmd;
 780        struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
 781        unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
 782        struct mlx5_cmd_layout *lay;
 783        struct semaphore *sem;
 784        unsigned long flags;
 785
 786        sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
 787        down(sem);
 788        if (!ent->page_queue) {
 789                ent->idx = alloc_ent(cmd);
 790                if (ent->idx < 0) {
 791                        mlx5_core_err(dev, "failed to allocate command entry\n");
 792                        up(sem);
 793                        return;
 794                }
 795        } else {
 796                ent->idx = cmd->max_reg_cmds;
 797                spin_lock_irqsave(&cmd->alloc_lock, flags);
 798                clear_bit(ent->idx, &cmd->bitmask);
 799                spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 800        }
 801
 802        cmd->ent_arr[ent->idx] = ent;
 803        lay = get_inst(cmd, ent->idx);
 804        ent->lay = lay;
 805        memset(lay, 0, sizeof(*lay));
 806        memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
 807        ent->op = be32_to_cpu(lay->in[0]) >> 16;
 808        if (ent->in->next)
 809                lay->in_ptr = cpu_to_be64(ent->in->next->dma);
 810        lay->inlen = cpu_to_be32(ent->in->len);
 811        if (ent->out->next)
 812                lay->out_ptr = cpu_to_be64(ent->out->next->dma);
 813        lay->outlen = cpu_to_be32(ent->out->len);
 814        lay->type = MLX5_PCI_CMD_XPORT;
 815        lay->token = ent->token;
 816        lay->status_own = CMD_OWNER_HW;
 817        set_signature(ent, !cmd->checksum_disabled);
 818        dump_command(dev, ent, 1);
 819        ent->ts1 = ktime_get_ns();
 820
 821        if (ent->callback)
 822                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
 823
 824        /* ring doorbell after the descriptor is valid */
 825        mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
 826        wmb();
 827        iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
 828        mmiowb();
 829        /* if not in polling don't use ent after this point */
 830        if (cmd->mode == CMD_MODE_POLLING) {
 831                poll_timeout(ent);
 832                /* make sure we read the descriptor after ownership is SW */
 833                rmb();
 834                mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
 835        }
 836}
 837
 838static const char *deliv_status_to_str(u8 status)
 839{
 840        switch (status) {
 841        case MLX5_CMD_DELIVERY_STAT_OK:
 842                return "no errors";
 843        case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
 844                return "signature error";
 845        case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
 846                return "token error";
 847        case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
 848                return "bad block number";
 849        case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
 850                return "output pointer not aligned to block size";
 851        case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
 852                return "input pointer not aligned to block size";
 853        case MLX5_CMD_DELIVERY_STAT_FW_ERR:
 854                return "firmware internal error";
 855        case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
 856                return "command input length error";
 857        case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
 858                return "command ouput length error";
 859        case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
 860                return "reserved fields not cleared";
 861        case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
 862                return "bad command descriptor type";
 863        default:
 864                return "unknown status code";
 865        }
 866}
 867
 868static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
 869{
 870        unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
 871        struct mlx5_cmd *cmd = &dev->cmd;
 872        int err;
 873
 874        if (cmd->mode == CMD_MODE_POLLING) {
 875                wait_for_completion(&ent->done);
 876        } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
 877                ent->ret = -ETIMEDOUT;
 878                mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
 879        }
 880
 881        err = ent->ret;
 882
 883        if (err == -ETIMEDOUT) {
 884                mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
 885                               mlx5_command_str(msg_to_opcode(ent->in)),
 886                               msg_to_opcode(ent->in));
 887        }
 888        mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
 889                      err, deliv_status_to_str(ent->status), ent->status);
 890
 891        return err;
 892}
 893
 894/*  Notes:
 895 *    1. Callback functions may not sleep
 896 *    2. page queue commands do not support asynchrous completion
 897 */
 898static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
 899                           struct mlx5_cmd_msg *out, void *uout, int uout_size,
 900                           mlx5_cmd_cbk_t callback,
 901                           void *context, int page_queue, u8 *status,
 902                           u8 token)
 903{
 904        struct mlx5_cmd *cmd = &dev->cmd;
 905        struct mlx5_cmd_work_ent *ent;
 906        struct mlx5_cmd_stats *stats;
 907        int err = 0;
 908        s64 ds;
 909        u16 op;
 910
 911        if (callback && page_queue)
 912                return -EINVAL;
 913
 914        ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
 915                        page_queue);
 916        if (IS_ERR(ent))
 917                return PTR_ERR(ent);
 918
 919        ent->token = token;
 920
 921        if (!callback)
 922                init_completion(&ent->done);
 923
 924        INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
 925        INIT_WORK(&ent->work, cmd_work_handler);
 926        if (page_queue) {
 927                cmd_work_handler(&ent->work);
 928        } else if (!queue_work(cmd->wq, &ent->work)) {
 929                mlx5_core_warn(dev, "failed to queue work\n");
 930                err = -ENOMEM;
 931                goto out_free;
 932        }
 933
 934        if (callback)
 935                goto out;
 936
 937        err = wait_func(dev, ent);
 938        if (err == -ETIMEDOUT)
 939                goto out_free;
 940
 941        ds = ent->ts2 - ent->ts1;
 942        op = MLX5_GET(mbox_in, in->first.data, opcode);
 943        if (op < ARRAY_SIZE(cmd->stats)) {
 944                stats = &cmd->stats[op];
 945                spin_lock_irq(&stats->lock);
 946                stats->sum += ds;
 947                ++stats->n;
 948                spin_unlock_irq(&stats->lock);
 949        }
 950        mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
 951                           "fw exec time for %s is %lld nsec\n",
 952                           mlx5_command_str(op), ds);
 953        *status = ent->status;
 954
 955out_free:
 956        free_cmd(ent);
 957out:
 958        return err;
 959}
 960
 961static ssize_t dbg_write(struct file *filp, const char __user *buf,
 962                         size_t count, loff_t *pos)
 963{
 964        struct mlx5_core_dev *dev = filp->private_data;
 965        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
 966        char lbuf[3];
 967        int err;
 968
 969        if (!dbg->in_msg || !dbg->out_msg)
 970                return -ENOMEM;
 971
 972        if (copy_from_user(lbuf, buf, sizeof(lbuf)))
 973                return -EFAULT;
 974
 975        lbuf[sizeof(lbuf) - 1] = 0;
 976
 977        if (strcmp(lbuf, "go"))
 978                return -EINVAL;
 979
 980        err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
 981
 982        return err ? err : count;
 983}
 984
 985
 986static const struct file_operations fops = {
 987        .owner  = THIS_MODULE,
 988        .open   = simple_open,
 989        .write  = dbg_write,
 990};
 991
 992static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
 993                            u8 token)
 994{
 995        struct mlx5_cmd_prot_block *block;
 996        struct mlx5_cmd_mailbox *next;
 997        int copy;
 998
 999        if (!to || !from)
1000                return -ENOMEM;
1001
1002        copy = min_t(int, size, sizeof(to->first.data));
1003        memcpy(to->first.data, from, copy);
1004        size -= copy;
1005        from += copy;
1006
1007        next = to->next;
1008        while (size) {
1009                if (!next) {
1010                        /* this is a BUG */
1011                        return -ENOMEM;
1012                }
1013
1014                copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1015                block = next->buf;
1016                memcpy(block->data, from, copy);
1017                from += copy;
1018                size -= copy;
1019                block->token = token;
1020                next = next->next;
1021        }
1022
1023        return 0;
1024}
1025
1026static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1027{
1028        struct mlx5_cmd_prot_block *block;
1029        struct mlx5_cmd_mailbox *next;
1030        int copy;
1031
1032        if (!to || !from)
1033                return -ENOMEM;
1034
1035        copy = min_t(int, size, sizeof(from->first.data));
1036        memcpy(to, from->first.data, copy);
1037        size -= copy;
1038        to += copy;
1039
1040        next = from->next;
1041        while (size) {
1042                if (!next) {
1043                        /* this is a BUG */
1044                        return -ENOMEM;
1045                }
1046
1047                copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1048                block = next->buf;
1049
1050                memcpy(to, block->data, copy);
1051                to += copy;
1052                size -= copy;
1053                next = next->next;
1054        }
1055
1056        return 0;
1057}
1058
1059static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1060                                              gfp_t flags)
1061{
1062        struct mlx5_cmd_mailbox *mailbox;
1063
1064        mailbox = kmalloc(sizeof(*mailbox), flags);
1065        if (!mailbox)
1066                return ERR_PTR(-ENOMEM);
1067
1068        mailbox->buf = pci_pool_zalloc(dev->cmd.pool, flags,
1069                                       &mailbox->dma);
1070        if (!mailbox->buf) {
1071                mlx5_core_dbg(dev, "failed allocation\n");
1072                kfree(mailbox);
1073                return ERR_PTR(-ENOMEM);
1074        }
1075        mailbox->next = NULL;
1076
1077        return mailbox;
1078}
1079
1080static void free_cmd_box(struct mlx5_core_dev *dev,
1081                         struct mlx5_cmd_mailbox *mailbox)
1082{
1083        pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1084        kfree(mailbox);
1085}
1086
1087static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1088                                               gfp_t flags, int size,
1089                                               u8 token)
1090{
1091        struct mlx5_cmd_mailbox *tmp, *head = NULL;
1092        struct mlx5_cmd_prot_block *block;
1093        struct mlx5_cmd_msg *msg;
1094        int blen;
1095        int err;
1096        int n;
1097        int i;
1098
1099        msg = kzalloc(sizeof(*msg), flags);
1100        if (!msg)
1101                return ERR_PTR(-ENOMEM);
1102
1103        blen = size - min_t(int, sizeof(msg->first.data), size);
1104        n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
1105
1106        for (i = 0; i < n; i++) {
1107                tmp = alloc_cmd_box(dev, flags);
1108                if (IS_ERR(tmp)) {
1109                        mlx5_core_warn(dev, "failed allocating block\n");
1110                        err = PTR_ERR(tmp);
1111                        goto err_alloc;
1112                }
1113
1114                block = tmp->buf;
1115                tmp->next = head;
1116                block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1117                block->block_num = cpu_to_be32(n - i - 1);
1118                block->token = token;
1119                head = tmp;
1120        }
1121        msg->next = head;
1122        msg->len = size;
1123        return msg;
1124
1125err_alloc:
1126        while (head) {
1127                tmp = head->next;
1128                free_cmd_box(dev, head);
1129                head = tmp;
1130        }
1131        kfree(msg);
1132
1133        return ERR_PTR(err);
1134}
1135
1136static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1137                                  struct mlx5_cmd_msg *msg)
1138{
1139        struct mlx5_cmd_mailbox *head = msg->next;
1140        struct mlx5_cmd_mailbox *next;
1141
1142        while (head) {
1143                next = head->next;
1144                free_cmd_box(dev, head);
1145                head = next;
1146        }
1147        kfree(msg);
1148}
1149
1150static ssize_t data_write(struct file *filp, const char __user *buf,
1151                          size_t count, loff_t *pos)
1152{
1153        struct mlx5_core_dev *dev = filp->private_data;
1154        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1155        void *ptr;
1156
1157        if (*pos != 0)
1158                return -EINVAL;
1159
1160        kfree(dbg->in_msg);
1161        dbg->in_msg = NULL;
1162        dbg->inlen = 0;
1163        ptr = memdup_user(buf, count);
1164        if (IS_ERR(ptr))
1165                return PTR_ERR(ptr);
1166        dbg->in_msg = ptr;
1167        dbg->inlen = count;
1168
1169        *pos = count;
1170
1171        return count;
1172}
1173
1174static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1175                         loff_t *pos)
1176{
1177        struct mlx5_core_dev *dev = filp->private_data;
1178        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1179        int copy;
1180
1181        if (*pos)
1182                return 0;
1183
1184        if (!dbg->out_msg)
1185                return -ENOMEM;
1186
1187        copy = min_t(int, count, dbg->outlen);
1188        if (copy_to_user(buf, dbg->out_msg, copy))
1189                return -EFAULT;
1190
1191        *pos += copy;
1192
1193        return copy;
1194}
1195
1196static const struct file_operations dfops = {
1197        .owner  = THIS_MODULE,
1198        .open   = simple_open,
1199        .write  = data_write,
1200        .read   = data_read,
1201};
1202
1203static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1204                           loff_t *pos)
1205{
1206        struct mlx5_core_dev *dev = filp->private_data;
1207        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1208        char outlen[8];
1209        int err;
1210
1211        if (*pos)
1212                return 0;
1213
1214        err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1215        if (err < 0)
1216                return err;
1217
1218        if (copy_to_user(buf, &outlen, err))
1219                return -EFAULT;
1220
1221        *pos += err;
1222
1223        return err;
1224}
1225
1226static ssize_t outlen_write(struct file *filp, const char __user *buf,
1227                            size_t count, loff_t *pos)
1228{
1229        struct mlx5_core_dev *dev = filp->private_data;
1230        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1231        char outlen_str[8];
1232        int outlen;
1233        void *ptr;
1234        int err;
1235
1236        if (*pos != 0 || count > 6)
1237                return -EINVAL;
1238
1239        kfree(dbg->out_msg);
1240        dbg->out_msg = NULL;
1241        dbg->outlen = 0;
1242
1243        if (copy_from_user(outlen_str, buf, count))
1244                return -EFAULT;
1245
1246        outlen_str[7] = 0;
1247
1248        err = sscanf(outlen_str, "%d", &outlen);
1249        if (err < 0)
1250                return err;
1251
1252        ptr = kzalloc(outlen, GFP_KERNEL);
1253        if (!ptr)
1254                return -ENOMEM;
1255
1256        dbg->out_msg = ptr;
1257        dbg->outlen = outlen;
1258
1259        *pos = count;
1260
1261        return count;
1262}
1263
1264static const struct file_operations olfops = {
1265        .owner  = THIS_MODULE,
1266        .open   = simple_open,
1267        .write  = outlen_write,
1268        .read   = outlen_read,
1269};
1270
1271static void set_wqname(struct mlx5_core_dev *dev)
1272{
1273        struct mlx5_cmd *cmd = &dev->cmd;
1274
1275        snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1276                 dev_name(&dev->pdev->dev));
1277}
1278
1279static void clean_debug_files(struct mlx5_core_dev *dev)
1280{
1281        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1282
1283        if (!mlx5_debugfs_root)
1284                return;
1285
1286        mlx5_cmdif_debugfs_cleanup(dev);
1287        debugfs_remove_recursive(dbg->dbg_root);
1288}
1289
1290static int create_debugfs_files(struct mlx5_core_dev *dev)
1291{
1292        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1293        int err = -ENOMEM;
1294
1295        if (!mlx5_debugfs_root)
1296                return 0;
1297
1298        dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1299        if (!dbg->dbg_root)
1300                return err;
1301
1302        dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1303                                          dev, &dfops);
1304        if (!dbg->dbg_in)
1305                goto err_dbg;
1306
1307        dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1308                                           dev, &dfops);
1309        if (!dbg->dbg_out)
1310                goto err_dbg;
1311
1312        dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1313                                              dev, &olfops);
1314        if (!dbg->dbg_outlen)
1315                goto err_dbg;
1316
1317        dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1318                                            &dbg->status);
1319        if (!dbg->dbg_status)
1320                goto err_dbg;
1321
1322        dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1323        if (!dbg->dbg_run)
1324                goto err_dbg;
1325
1326        mlx5_cmdif_debugfs_init(dev);
1327
1328        return 0;
1329
1330err_dbg:
1331        clean_debug_files(dev);
1332        return err;
1333}
1334
1335static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1336{
1337        struct mlx5_cmd *cmd = &dev->cmd;
1338        int i;
1339
1340        for (i = 0; i < cmd->max_reg_cmds; i++)
1341                down(&cmd->sem);
1342        down(&cmd->pages_sem);
1343
1344        cmd->mode = mode;
1345
1346        up(&cmd->pages_sem);
1347        for (i = 0; i < cmd->max_reg_cmds; i++)
1348                up(&cmd->sem);
1349}
1350
1351void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1352{
1353        mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1354}
1355
1356void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1357{
1358        mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1359}
1360
1361static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1362{
1363        unsigned long flags;
1364
1365        if (msg->parent) {
1366                spin_lock_irqsave(&msg->parent->lock, flags);
1367                list_add_tail(&msg->list, &msg->parent->head);
1368                spin_unlock_irqrestore(&msg->parent->lock, flags);
1369        } else {
1370                mlx5_free_cmd_msg(dev, msg);
1371        }
1372}
1373
1374void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1375{
1376        struct mlx5_cmd *cmd = &dev->cmd;
1377        struct mlx5_cmd_work_ent *ent;
1378        mlx5_cmd_cbk_t callback;
1379        void *context;
1380        int err;
1381        int i;
1382        s64 ds;
1383        struct mlx5_cmd_stats *stats;
1384        unsigned long flags;
1385        unsigned long vector;
1386
1387        /* there can be at most 32 command queues */
1388        vector = vec & 0xffffffff;
1389        for (i = 0; i < (1 << cmd->log_sz); i++) {
1390                if (test_bit(i, &vector)) {
1391                        struct semaphore *sem;
1392
1393                        ent = cmd->ent_arr[i];
1394                        if (ent->callback)
1395                                cancel_delayed_work(&ent->cb_timeout_work);
1396                        if (ent->page_queue)
1397                                sem = &cmd->pages_sem;
1398                        else
1399                                sem = &cmd->sem;
1400                        ent->ts2 = ktime_get_ns();
1401                        memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1402                        dump_command(dev, ent, 0);
1403                        if (!ent->ret) {
1404                                if (!cmd->checksum_disabled)
1405                                        ent->ret = verify_signature(ent);
1406                                else
1407                                        ent->ret = 0;
1408                                if (vec & MLX5_TRIGGERED_CMD_COMP)
1409                                        ent->status = MLX5_DRIVER_STATUS_ABORTED;
1410                                else
1411                                        ent->status = ent->lay->status_own >> 1;
1412
1413                                mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1414                                              ent->ret, deliv_status_to_str(ent->status), ent->status);
1415                        }
1416                        free_ent(cmd, ent->idx);
1417
1418                        if (ent->callback) {
1419                                ds = ent->ts2 - ent->ts1;
1420                                if (ent->op < ARRAY_SIZE(cmd->stats)) {
1421                                        stats = &cmd->stats[ent->op];
1422                                        spin_lock_irqsave(&stats->lock, flags);
1423                                        stats->sum += ds;
1424                                        ++stats->n;
1425                                        spin_unlock_irqrestore(&stats->lock, flags);
1426                                }
1427
1428                                callback = ent->callback;
1429                                context = ent->context;
1430                                err = ent->ret;
1431                                if (!err) {
1432                                        err = mlx5_copy_from_msg(ent->uout,
1433                                                                 ent->out,
1434                                                                 ent->uout_size);
1435
1436                                        err = err ? err : mlx5_cmd_check(dev,
1437                                                                        ent->in->first.data,
1438                                                                        ent->uout);
1439                                }
1440
1441                                mlx5_free_cmd_msg(dev, ent->out);
1442                                free_msg(dev, ent->in);
1443
1444                                err = err ? err : ent->status;
1445                                free_cmd(ent);
1446                                callback(err, context);
1447                        } else {
1448                                complete(&ent->done);
1449                        }
1450                        up(sem);
1451                }
1452        }
1453}
1454EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1455
1456static int status_to_err(u8 status)
1457{
1458        return status ? -1 : 0; /* TBD more meaningful codes */
1459}
1460
1461static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1462                                      gfp_t gfp)
1463{
1464        struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1465        struct cmd_msg_cache *ch = NULL;
1466        struct mlx5_cmd *cmd = &dev->cmd;
1467        int i;
1468
1469        if (in_size <= 16)
1470                goto cache_miss;
1471
1472        for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1473                ch = &cmd->cache[i];
1474                if (in_size > ch->max_inbox_size)
1475                        continue;
1476                spin_lock_irq(&ch->lock);
1477                if (list_empty(&ch->head)) {
1478                        spin_unlock_irq(&ch->lock);
1479                        continue;
1480                }
1481                msg = list_entry(ch->head.next, typeof(*msg), list);
1482                /* For cached lists, we must explicitly state what is
1483                 * the real size
1484                 */
1485                msg->len = in_size;
1486                list_del(&msg->list);
1487                spin_unlock_irq(&ch->lock);
1488                break;
1489        }
1490
1491        if (!IS_ERR(msg))
1492                return msg;
1493
1494cache_miss:
1495        msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1496        return msg;
1497}
1498
1499static int is_manage_pages(void *in)
1500{
1501        return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1502}
1503
1504static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1505                    int out_size, mlx5_cmd_cbk_t callback, void *context)
1506{
1507        struct mlx5_cmd_msg *inb;
1508        struct mlx5_cmd_msg *outb;
1509        int pages_queue;
1510        gfp_t gfp;
1511        int err;
1512        u8 status = 0;
1513        u32 drv_synd;
1514        u8 token;
1515
1516        if (pci_channel_offline(dev->pdev) ||
1517            dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1518                u16 opcode = MLX5_GET(mbox_in, in, opcode);
1519
1520                err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
1521                MLX5_SET(mbox_out, out, status, status);
1522                MLX5_SET(mbox_out, out, syndrome, drv_synd);
1523                return err;
1524        }
1525
1526        pages_queue = is_manage_pages(in);
1527        gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1528
1529        inb = alloc_msg(dev, in_size, gfp);
1530        if (IS_ERR(inb)) {
1531                err = PTR_ERR(inb);
1532                return err;
1533        }
1534
1535        token = alloc_token(&dev->cmd);
1536
1537        err = mlx5_copy_to_msg(inb, in, in_size, token);
1538        if (err) {
1539                mlx5_core_warn(dev, "err %d\n", err);
1540                goto out_in;
1541        }
1542
1543        outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1544        if (IS_ERR(outb)) {
1545                err = PTR_ERR(outb);
1546                goto out_in;
1547        }
1548
1549        err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1550                              pages_queue, &status, token);
1551        if (err)
1552                goto out_out;
1553
1554        mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1555        if (status) {
1556                err = status_to_err(status);
1557                goto out_out;
1558        }
1559
1560        if (!callback)
1561                err = mlx5_copy_from_msg(out, outb, out_size);
1562
1563out_out:
1564        if (!callback)
1565                mlx5_free_cmd_msg(dev, outb);
1566
1567out_in:
1568        if (!callback)
1569                free_msg(dev, inb);
1570        return err;
1571}
1572
1573int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1574                  int out_size)
1575{
1576        int err;
1577
1578        err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
1579        return err ? : mlx5_cmd_check(dev, in, out);
1580}
1581EXPORT_SYMBOL(mlx5_cmd_exec);
1582
1583int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1584                     void *out, int out_size, mlx5_cmd_cbk_t callback,
1585                     void *context)
1586{
1587        return cmd_exec(dev, in, in_size, out, out_size, callback, context);
1588}
1589EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1590
1591static void destroy_msg_cache(struct mlx5_core_dev *dev)
1592{
1593        struct cmd_msg_cache *ch;
1594        struct mlx5_cmd_msg *msg;
1595        struct mlx5_cmd_msg *n;
1596        int i;
1597
1598        for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
1599                ch = &dev->cmd.cache[i];
1600                list_for_each_entry_safe(msg, n, &ch->head, list) {
1601                        list_del(&msg->list);
1602                        mlx5_free_cmd_msg(dev, msg);
1603                }
1604        }
1605}
1606
1607static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
1608        512, 32, 16, 8, 2
1609};
1610
1611static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
1612        16 + MLX5_CMD_DATA_BLOCK_SIZE,
1613        16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
1614        16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
1615        16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
1616        16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
1617};
1618
1619static void create_msg_cache(struct mlx5_core_dev *dev)
1620{
1621        struct mlx5_cmd *cmd = &dev->cmd;
1622        struct cmd_msg_cache *ch;
1623        struct mlx5_cmd_msg *msg;
1624        int i;
1625        int k;
1626
1627        /* Initialize and fill the caches with initial entries */
1628        for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
1629                ch = &cmd->cache[k];
1630                spin_lock_init(&ch->lock);
1631                INIT_LIST_HEAD(&ch->head);
1632                ch->num_ent = cmd_cache_num_ent[k];
1633                ch->max_inbox_size = cmd_cache_ent_size[k];
1634                for (i = 0; i < ch->num_ent; i++) {
1635                        msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
1636                                                 ch->max_inbox_size, 0);
1637                        if (IS_ERR(msg))
1638                                break;
1639                        msg->parent = ch;
1640                        list_add_tail(&msg->list, &ch->head);
1641                }
1642        }
1643}
1644
1645static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1646{
1647        struct device *ddev = &dev->pdev->dev;
1648
1649        cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1650                                                 &cmd->alloc_dma, GFP_KERNEL);
1651        if (!cmd->cmd_alloc_buf)
1652                return -ENOMEM;
1653
1654        /* make sure it is aligned to 4K */
1655        if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1656                cmd->cmd_buf = cmd->cmd_alloc_buf;
1657                cmd->dma = cmd->alloc_dma;
1658                cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1659                return 0;
1660        }
1661
1662        dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1663                          cmd->alloc_dma);
1664        cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
1665                                                 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1666                                                 &cmd->alloc_dma, GFP_KERNEL);
1667        if (!cmd->cmd_alloc_buf)
1668                return -ENOMEM;
1669
1670        cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1671        cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1672        cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1673        return 0;
1674}
1675
1676static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1677{
1678        struct device *ddev = &dev->pdev->dev;
1679
1680        dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
1681                          cmd->alloc_dma);
1682}
1683
1684int mlx5_cmd_init(struct mlx5_core_dev *dev)
1685{
1686        int size = sizeof(struct mlx5_cmd_prot_block);
1687        int align = roundup_pow_of_two(size);
1688        struct mlx5_cmd *cmd = &dev->cmd;
1689        u32 cmd_h, cmd_l;
1690        u16 cmd_if_rev;
1691        int err;
1692        int i;
1693
1694        memset(cmd, 0, sizeof(*cmd));
1695        cmd_if_rev = cmdif_rev(dev);
1696        if (cmd_if_rev != CMD_IF_REV) {
1697                dev_err(&dev->pdev->dev,
1698                        "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1699                        CMD_IF_REV, cmd_if_rev);
1700                return -EINVAL;
1701        }
1702
1703        cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1704        if (!cmd->pool)
1705                return -ENOMEM;
1706
1707        err = alloc_cmd_page(dev, cmd);
1708        if (err)
1709                goto err_free_pool;
1710
1711        cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1712        cmd->log_sz = cmd_l >> 4 & 0xf;
1713        cmd->log_stride = cmd_l & 0xf;
1714        if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1715                dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1716                        1 << cmd->log_sz);
1717                err = -EINVAL;
1718                goto err_free_page;
1719        }
1720
1721        if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1722                dev_err(&dev->pdev->dev, "command queue size overflow\n");
1723                err = -EINVAL;
1724                goto err_free_page;
1725        }
1726
1727        cmd->checksum_disabled = 1;
1728        cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1729        cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1730
1731        cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1732        if (cmd->cmdif_rev > CMD_IF_REV) {
1733                dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1734                        CMD_IF_REV, cmd->cmdif_rev);
1735                err = -EOPNOTSUPP;
1736                goto err_free_page;
1737        }
1738
1739        spin_lock_init(&cmd->alloc_lock);
1740        spin_lock_init(&cmd->token_lock);
1741        for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1742                spin_lock_init(&cmd->stats[i].lock);
1743
1744        sema_init(&cmd->sem, cmd->max_reg_cmds);
1745        sema_init(&cmd->pages_sem, 1);
1746
1747        cmd_h = (u32)((u64)(cmd->dma) >> 32);
1748        cmd_l = (u32)(cmd->dma);
1749        if (cmd_l & 0xfff) {
1750                dev_err(&dev->pdev->dev, "invalid command queue address\n");
1751                err = -ENOMEM;
1752                goto err_free_page;
1753        }
1754
1755        iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1756        iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1757
1758        /* Make sure firmware sees the complete address before we proceed */
1759        wmb();
1760
1761        mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1762
1763        cmd->mode = CMD_MODE_POLLING;
1764
1765        create_msg_cache(dev);
1766
1767        set_wqname(dev);
1768        cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1769        if (!cmd->wq) {
1770                dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1771                err = -ENOMEM;
1772                goto err_cache;
1773        }
1774
1775        err = create_debugfs_files(dev);
1776        if (err) {
1777                err = -ENOMEM;
1778                goto err_wq;
1779        }
1780
1781        return 0;
1782
1783err_wq:
1784        destroy_workqueue(cmd->wq);
1785
1786err_cache:
1787        destroy_msg_cache(dev);
1788
1789err_free_page:
1790        free_cmd_page(dev, cmd);
1791
1792err_free_pool:
1793        pci_pool_destroy(cmd->pool);
1794
1795        return err;
1796}
1797EXPORT_SYMBOL(mlx5_cmd_init);
1798
1799void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1800{
1801        struct mlx5_cmd *cmd = &dev->cmd;
1802
1803        clean_debug_files(dev);
1804        destroy_workqueue(cmd->wq);
1805        destroy_msg_cache(dev);
1806        free_cmd_page(dev, cmd);
1807        pci_pool_destroy(cmd->pool);
1808}
1809EXPORT_SYMBOL(mlx5_cmd_cleanup);
1810