linux/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/highmem.h>
  34#include <linux/module.h>
  35#include <linux/errno.h>
  36#include <linux/pci.h>
  37#include <linux/dma-mapping.h>
  38#include <linux/slab.h>
  39#include <linux/delay.h>
  40#include <linux/random.h>
  41#include <linux/io-mapping.h>
  42#include <linux/mlx5/driver.h>
  43#include <linux/debugfs.h>
  44
  45#include "mlx5_core.h"
  46
  47enum {
  48        CMD_IF_REV = 5,
  49};
  50
  51enum {
  52        CMD_MODE_POLLING,
  53        CMD_MODE_EVENTS
  54};
  55
  56enum {
  57        NUM_LONG_LISTS    = 2,
  58        NUM_MED_LISTS     = 64,
  59        LONG_LIST_SIZE    = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
  60                                MLX5_CMD_DATA_BLOCK_SIZE,
  61        MED_LIST_SIZE     = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
  62};
  63
  64enum {
  65        MLX5_CMD_DELIVERY_STAT_OK                       = 0x0,
  66        MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR               = 0x1,
  67        MLX5_CMD_DELIVERY_STAT_TOK_ERR                  = 0x2,
  68        MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR          = 0x3,
  69        MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR        = 0x4,
  70        MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR         = 0x5,
  71        MLX5_CMD_DELIVERY_STAT_FW_ERR                   = 0x6,
  72        MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR            = 0x7,
  73        MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR           = 0x8,
  74        MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR      = 0x9,
  75        MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR            = 0x10,
  76};
  77
  78static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
  79                                           struct mlx5_cmd_msg *in,
  80                                           struct mlx5_cmd_msg *out,
  81                                           void *uout, int uout_size,
  82                                           mlx5_cmd_cbk_t cbk,
  83                                           void *context, int page_queue)
  84{
  85        gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
  86        struct mlx5_cmd_work_ent *ent;
  87
  88        ent = kzalloc(sizeof(*ent), alloc_flags);
  89        if (!ent)
  90                return ERR_PTR(-ENOMEM);
  91
  92        ent->in         = in;
  93        ent->out        = out;
  94        ent->uout       = uout;
  95        ent->uout_size  = uout_size;
  96        ent->callback   = cbk;
  97        ent->context    = context;
  98        ent->cmd        = cmd;
  99        ent->page_queue = page_queue;
 100
 101        return ent;
 102}
 103
 104static u8 alloc_token(struct mlx5_cmd *cmd)
 105{
 106        u8 token;
 107
 108        spin_lock(&cmd->token_lock);
 109        cmd->token++;
 110        if (cmd->token == 0)
 111                cmd->token++;
 112        token = cmd->token;
 113        spin_unlock(&cmd->token_lock);
 114
 115        return token;
 116}
 117
 118static int alloc_ent(struct mlx5_cmd *cmd)
 119{
 120        unsigned long flags;
 121        int ret;
 122
 123        spin_lock_irqsave(&cmd->alloc_lock, flags);
 124        ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
 125        if (ret < cmd->max_reg_cmds)
 126                clear_bit(ret, &cmd->bitmask);
 127        spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 128
 129        return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
 130}
 131
 132static void free_ent(struct mlx5_cmd *cmd, int idx)
 133{
 134        unsigned long flags;
 135
 136        spin_lock_irqsave(&cmd->alloc_lock, flags);
 137        set_bit(idx, &cmd->bitmask);
 138        spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 139}
 140
 141static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
 142{
 143        return cmd->cmd_buf + (idx << cmd->log_stride);
 144}
 145
 146static u8 xor8_buf(void *buf, size_t offset, int len)
 147{
 148        u8 *ptr = buf;
 149        u8 sum = 0;
 150        int i;
 151        int end = len + offset;
 152
 153        for (i = offset; i < end; i++)
 154                sum ^= ptr[i];
 155
 156        return sum;
 157}
 158
 159static int verify_block_sig(struct mlx5_cmd_prot_block *block)
 160{
 161        size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
 162        int xor_len = sizeof(*block) - sizeof(block->data) - 1;
 163
 164        if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
 165                return -EINVAL;
 166
 167        if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
 168                return -EINVAL;
 169
 170        return 0;
 171}
 172
 173static void calc_block_sig(struct mlx5_cmd_prot_block *block)
 174{
 175        int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
 176        size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
 177
 178        block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
 179        block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
 180}
 181
 182static void calc_chain_sig(struct mlx5_cmd_msg *msg)
 183{
 184        struct mlx5_cmd_mailbox *next = msg->next;
 185        int size = msg->len;
 186        int blen = size - min_t(int, sizeof(msg->first.data), size);
 187        int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
 188                / MLX5_CMD_DATA_BLOCK_SIZE;
 189        int i = 0;
 190
 191        for (i = 0; i < n && next; i++)  {
 192                calc_block_sig(next->buf);
 193                next = next->next;
 194        }
 195}
 196
 197static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
 198{
 199        ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
 200        if (csum) {
 201                calc_chain_sig(ent->in);
 202                calc_chain_sig(ent->out);
 203        }
 204}
 205
 206static void poll_timeout(struct mlx5_cmd_work_ent *ent)
 207{
 208        unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
 209        u8 own;
 210
 211        do {
 212                own = ent->lay->status_own;
 213                if (!(own & CMD_OWNER_HW)) {
 214                        ent->ret = 0;
 215                        return;
 216                }
 217                usleep_range(5000, 10000);
 218        } while (time_before(jiffies, poll_end));
 219
 220        ent->ret = -ETIMEDOUT;
 221}
 222
 223static void free_cmd(struct mlx5_cmd_work_ent *ent)
 224{
 225        kfree(ent);
 226}
 227
 228
 229static int verify_signature(struct mlx5_cmd_work_ent *ent)
 230{
 231        struct mlx5_cmd_mailbox *next = ent->out->next;
 232        int err;
 233        u8 sig;
 234        int size = ent->out->len;
 235        int blen = size - min_t(int, sizeof(ent->out->first.data), size);
 236        int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
 237                / MLX5_CMD_DATA_BLOCK_SIZE;
 238        int i = 0;
 239
 240        sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
 241        if (sig != 0xff)
 242                return -EINVAL;
 243
 244        for (i = 0; i < n && next; i++) {
 245                err = verify_block_sig(next->buf);
 246                if (err)
 247                        return err;
 248
 249                next = next->next;
 250        }
 251
 252        return 0;
 253}
 254
 255static void dump_buf(void *buf, int size, int data_only, int offset)
 256{
 257        __be32 *p = buf;
 258        int i;
 259
 260        for (i = 0; i < size; i += 16) {
 261                pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
 262                         be32_to_cpu(p[1]), be32_to_cpu(p[2]),
 263                         be32_to_cpu(p[3]));
 264                p += 4;
 265                offset += 16;
 266        }
 267        if (!data_only)
 268                pr_debug("\n");
 269}
 270
 271static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
 272                                       u32 *synd, u8 *status)
 273{
 274        *synd = 0;
 275        *status = 0;
 276
 277        switch (op) {
 278        case MLX5_CMD_OP_TEARDOWN_HCA:
 279        case MLX5_CMD_OP_DISABLE_HCA:
 280        case MLX5_CMD_OP_MANAGE_PAGES:
 281        case MLX5_CMD_OP_DESTROY_MKEY:
 282        case MLX5_CMD_OP_DESTROY_EQ:
 283        case MLX5_CMD_OP_DESTROY_CQ:
 284        case MLX5_CMD_OP_DESTROY_QP:
 285        case MLX5_CMD_OP_DESTROY_PSV:
 286        case MLX5_CMD_OP_DESTROY_SRQ:
 287        case MLX5_CMD_OP_DESTROY_XRC_SRQ:
 288        case MLX5_CMD_OP_DESTROY_DCT:
 289        case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
 290        case MLX5_CMD_OP_DEALLOC_PD:
 291        case MLX5_CMD_OP_DEALLOC_UAR:
 292        case MLX5_CMD_OP_DETACH_FROM_MCG:
 293        case MLX5_CMD_OP_DEALLOC_XRCD:
 294        case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
 295        case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
 296        case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
 297        case MLX5_CMD_OP_DESTROY_LAG:
 298        case MLX5_CMD_OP_DESTROY_VPORT_LAG:
 299        case MLX5_CMD_OP_DESTROY_TIR:
 300        case MLX5_CMD_OP_DESTROY_SQ:
 301        case MLX5_CMD_OP_DESTROY_RQ:
 302        case MLX5_CMD_OP_DESTROY_RMP:
 303        case MLX5_CMD_OP_DESTROY_TIS:
 304        case MLX5_CMD_OP_DESTROY_RQT:
 305        case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
 306        case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
 307        case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
 308        case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
 309        case MLX5_CMD_OP_2ERR_QP:
 310        case MLX5_CMD_OP_2RST_QP:
 311        case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
 312        case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
 313        case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
 314        case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
 315        case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
 316                return MLX5_CMD_STAT_OK;
 317
 318        case MLX5_CMD_OP_QUERY_HCA_CAP:
 319        case MLX5_CMD_OP_QUERY_ADAPTER:
 320        case MLX5_CMD_OP_INIT_HCA:
 321        case MLX5_CMD_OP_ENABLE_HCA:
 322        case MLX5_CMD_OP_QUERY_PAGES:
 323        case MLX5_CMD_OP_SET_HCA_CAP:
 324        case MLX5_CMD_OP_QUERY_ISSI:
 325        case MLX5_CMD_OP_SET_ISSI:
 326        case MLX5_CMD_OP_CREATE_MKEY:
 327        case MLX5_CMD_OP_QUERY_MKEY:
 328        case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
 329        case MLX5_CMD_OP_PAGE_FAULT_RESUME:
 330        case MLX5_CMD_OP_CREATE_EQ:
 331        case MLX5_CMD_OP_QUERY_EQ:
 332        case MLX5_CMD_OP_GEN_EQE:
 333        case MLX5_CMD_OP_CREATE_CQ:
 334        case MLX5_CMD_OP_QUERY_CQ:
 335        case MLX5_CMD_OP_MODIFY_CQ:
 336        case MLX5_CMD_OP_CREATE_QP:
 337        case MLX5_CMD_OP_RST2INIT_QP:
 338        case MLX5_CMD_OP_INIT2RTR_QP:
 339        case MLX5_CMD_OP_RTR2RTS_QP:
 340        case MLX5_CMD_OP_RTS2RTS_QP:
 341        case MLX5_CMD_OP_SQERR2RTS_QP:
 342        case MLX5_CMD_OP_QUERY_QP:
 343        case MLX5_CMD_OP_SQD_RTS_QP:
 344        case MLX5_CMD_OP_INIT2INIT_QP:
 345        case MLX5_CMD_OP_CREATE_PSV:
 346        case MLX5_CMD_OP_CREATE_SRQ:
 347        case MLX5_CMD_OP_QUERY_SRQ:
 348        case MLX5_CMD_OP_ARM_RQ:
 349        case MLX5_CMD_OP_CREATE_XRC_SRQ:
 350        case MLX5_CMD_OP_QUERY_XRC_SRQ:
 351        case MLX5_CMD_OP_ARM_XRC_SRQ:
 352        case MLX5_CMD_OP_CREATE_DCT:
 353        case MLX5_CMD_OP_DRAIN_DCT:
 354        case MLX5_CMD_OP_QUERY_DCT:
 355        case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
 356        case MLX5_CMD_OP_QUERY_VPORT_STATE:
 357        case MLX5_CMD_OP_MODIFY_VPORT_STATE:
 358        case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
 359        case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
 360        case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
 361        case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
 362        case MLX5_CMD_OP_SET_ROCE_ADDRESS:
 363        case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
 364        case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
 365        case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
 366        case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
 367        case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
 368        case MLX5_CMD_OP_ALLOC_Q_COUNTER:
 369        case MLX5_CMD_OP_QUERY_Q_COUNTER:
 370        case MLX5_CMD_OP_ALLOC_PD:
 371        case MLX5_CMD_OP_ALLOC_UAR:
 372        case MLX5_CMD_OP_CONFIG_INT_MODERATION:
 373        case MLX5_CMD_OP_ACCESS_REG:
 374        case MLX5_CMD_OP_ATTACH_TO_MCG:
 375        case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
 376        case MLX5_CMD_OP_MAD_IFC:
 377        case MLX5_CMD_OP_QUERY_MAD_DEMUX:
 378        case MLX5_CMD_OP_SET_MAD_DEMUX:
 379        case MLX5_CMD_OP_NOP:
 380        case MLX5_CMD_OP_ALLOC_XRCD:
 381        case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
 382        case MLX5_CMD_OP_QUERY_CONG_STATUS:
 383        case MLX5_CMD_OP_MODIFY_CONG_STATUS:
 384        case MLX5_CMD_OP_QUERY_CONG_PARAMS:
 385        case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
 386        case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
 387        case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
 388        case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
 389        case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
 390        case MLX5_CMD_OP_CREATE_LAG:
 391        case MLX5_CMD_OP_MODIFY_LAG:
 392        case MLX5_CMD_OP_QUERY_LAG:
 393        case MLX5_CMD_OP_CREATE_VPORT_LAG:
 394        case MLX5_CMD_OP_CREATE_TIR:
 395        case MLX5_CMD_OP_MODIFY_TIR:
 396        case MLX5_CMD_OP_QUERY_TIR:
 397        case MLX5_CMD_OP_CREATE_SQ:
 398        case MLX5_CMD_OP_MODIFY_SQ:
 399        case MLX5_CMD_OP_QUERY_SQ:
 400        case MLX5_CMD_OP_CREATE_RQ:
 401        case MLX5_CMD_OP_MODIFY_RQ:
 402        case MLX5_CMD_OP_QUERY_RQ:
 403        case MLX5_CMD_OP_CREATE_RMP:
 404        case MLX5_CMD_OP_MODIFY_RMP:
 405        case MLX5_CMD_OP_QUERY_RMP:
 406        case MLX5_CMD_OP_CREATE_TIS:
 407        case MLX5_CMD_OP_MODIFY_TIS:
 408        case MLX5_CMD_OP_QUERY_TIS:
 409        case MLX5_CMD_OP_CREATE_RQT:
 410        case MLX5_CMD_OP_MODIFY_RQT:
 411        case MLX5_CMD_OP_QUERY_RQT:
 412
 413        case MLX5_CMD_OP_CREATE_FLOW_TABLE:
 414        case MLX5_CMD_OP_QUERY_FLOW_TABLE:
 415        case MLX5_CMD_OP_CREATE_FLOW_GROUP:
 416        case MLX5_CMD_OP_QUERY_FLOW_GROUP:
 417
 418        case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
 419        case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
 420        case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
 421        case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
 422                *status = MLX5_DRIVER_STATUS_ABORTED;
 423                *synd = MLX5_DRIVER_SYND;
 424                return -EIO;
 425        default:
 426                mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
 427                return -EINVAL;
 428        }
 429}
 430
 431const char *mlx5_command_str(int command)
 432{
 433#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
 434
 435        switch (command) {
 436        MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
 437        MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
 438        MLX5_COMMAND_STR_CASE(INIT_HCA);
 439        MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
 440        MLX5_COMMAND_STR_CASE(ENABLE_HCA);
 441        MLX5_COMMAND_STR_CASE(DISABLE_HCA);
 442        MLX5_COMMAND_STR_CASE(QUERY_PAGES);
 443        MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
 444        MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
 445        MLX5_COMMAND_STR_CASE(QUERY_ISSI);
 446        MLX5_COMMAND_STR_CASE(SET_ISSI);
 447        MLX5_COMMAND_STR_CASE(CREATE_MKEY);
 448        MLX5_COMMAND_STR_CASE(QUERY_MKEY);
 449        MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
 450        MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
 451        MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
 452        MLX5_COMMAND_STR_CASE(CREATE_EQ);
 453        MLX5_COMMAND_STR_CASE(DESTROY_EQ);
 454        MLX5_COMMAND_STR_CASE(QUERY_EQ);
 455        MLX5_COMMAND_STR_CASE(GEN_EQE);
 456        MLX5_COMMAND_STR_CASE(CREATE_CQ);
 457        MLX5_COMMAND_STR_CASE(DESTROY_CQ);
 458        MLX5_COMMAND_STR_CASE(QUERY_CQ);
 459        MLX5_COMMAND_STR_CASE(MODIFY_CQ);
 460        MLX5_COMMAND_STR_CASE(CREATE_QP);
 461        MLX5_COMMAND_STR_CASE(DESTROY_QP);
 462        MLX5_COMMAND_STR_CASE(RST2INIT_QP);
 463        MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
 464        MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
 465        MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
 466        MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
 467        MLX5_COMMAND_STR_CASE(2ERR_QP);
 468        MLX5_COMMAND_STR_CASE(2RST_QP);
 469        MLX5_COMMAND_STR_CASE(QUERY_QP);
 470        MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
 471        MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
 472        MLX5_COMMAND_STR_CASE(CREATE_PSV);
 473        MLX5_COMMAND_STR_CASE(DESTROY_PSV);
 474        MLX5_COMMAND_STR_CASE(CREATE_SRQ);
 475        MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
 476        MLX5_COMMAND_STR_CASE(QUERY_SRQ);
 477        MLX5_COMMAND_STR_CASE(ARM_RQ);
 478        MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
 479        MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
 480        MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
 481        MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
 482        MLX5_COMMAND_STR_CASE(CREATE_DCT);
 483        MLX5_COMMAND_STR_CASE(DESTROY_DCT);
 484        MLX5_COMMAND_STR_CASE(DRAIN_DCT);
 485        MLX5_COMMAND_STR_CASE(QUERY_DCT);
 486        MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
 487        MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
 488        MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
 489        MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
 490        MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
 491        MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
 492        MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
 493        MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
 494        MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
 495        MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
 496        MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
 497        MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
 498        MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
 499        MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
 500        MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
 501        MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
 502        MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
 503        MLX5_COMMAND_STR_CASE(ALLOC_PD);
 504        MLX5_COMMAND_STR_CASE(DEALLOC_PD);
 505        MLX5_COMMAND_STR_CASE(ALLOC_UAR);
 506        MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
 507        MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
 508        MLX5_COMMAND_STR_CASE(ACCESS_REG);
 509        MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
 510        MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
 511        MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
 512        MLX5_COMMAND_STR_CASE(MAD_IFC);
 513        MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
 514        MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
 515        MLX5_COMMAND_STR_CASE(NOP);
 516        MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
 517        MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
 518        MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
 519        MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
 520        MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
 521        MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
 522        MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
 523        MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
 524        MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
 525        MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
 526        MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
 527        MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
 528        MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
 529        MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
 530        MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
 531        MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
 532        MLX5_COMMAND_STR_CASE(CREATE_LAG);
 533        MLX5_COMMAND_STR_CASE(MODIFY_LAG);
 534        MLX5_COMMAND_STR_CASE(QUERY_LAG);
 535        MLX5_COMMAND_STR_CASE(DESTROY_LAG);
 536        MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
 537        MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
 538        MLX5_COMMAND_STR_CASE(CREATE_TIR);
 539        MLX5_COMMAND_STR_CASE(MODIFY_TIR);
 540        MLX5_COMMAND_STR_CASE(DESTROY_TIR);
 541        MLX5_COMMAND_STR_CASE(QUERY_TIR);
 542        MLX5_COMMAND_STR_CASE(CREATE_SQ);
 543        MLX5_COMMAND_STR_CASE(MODIFY_SQ);
 544        MLX5_COMMAND_STR_CASE(DESTROY_SQ);
 545        MLX5_COMMAND_STR_CASE(QUERY_SQ);
 546        MLX5_COMMAND_STR_CASE(CREATE_RQ);
 547        MLX5_COMMAND_STR_CASE(MODIFY_RQ);
 548        MLX5_COMMAND_STR_CASE(DESTROY_RQ);
 549        MLX5_COMMAND_STR_CASE(QUERY_RQ);
 550        MLX5_COMMAND_STR_CASE(CREATE_RMP);
 551        MLX5_COMMAND_STR_CASE(MODIFY_RMP);
 552        MLX5_COMMAND_STR_CASE(DESTROY_RMP);
 553        MLX5_COMMAND_STR_CASE(QUERY_RMP);
 554        MLX5_COMMAND_STR_CASE(CREATE_TIS);
 555        MLX5_COMMAND_STR_CASE(MODIFY_TIS);
 556        MLX5_COMMAND_STR_CASE(DESTROY_TIS);
 557        MLX5_COMMAND_STR_CASE(QUERY_TIS);
 558        MLX5_COMMAND_STR_CASE(CREATE_RQT);
 559        MLX5_COMMAND_STR_CASE(MODIFY_RQT);
 560        MLX5_COMMAND_STR_CASE(DESTROY_RQT);
 561        MLX5_COMMAND_STR_CASE(QUERY_RQT);
 562        MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
 563        MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
 564        MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
 565        MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
 566        MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
 567        MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
 568        MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
 569        MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
 570        MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
 571        MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
 572        MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
 573        MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
 574        MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
 575        MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
 576        MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
 577        MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
 578        default: return "unknown command opcode";
 579        }
 580}
 581
 582static const char *cmd_status_str(u8 status)
 583{
 584        switch (status) {
 585        case MLX5_CMD_STAT_OK:
 586                return "OK";
 587        case MLX5_CMD_STAT_INT_ERR:
 588                return "internal error";
 589        case MLX5_CMD_STAT_BAD_OP_ERR:
 590                return "bad operation";
 591        case MLX5_CMD_STAT_BAD_PARAM_ERR:
 592                return "bad parameter";
 593        case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
 594                return "bad system state";
 595        case MLX5_CMD_STAT_BAD_RES_ERR:
 596                return "bad resource";
 597        case MLX5_CMD_STAT_RES_BUSY:
 598                return "resource busy";
 599        case MLX5_CMD_STAT_LIM_ERR:
 600                return "limits exceeded";
 601        case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
 602                return "bad resource state";
 603        case MLX5_CMD_STAT_IX_ERR:
 604                return "bad index";
 605        case MLX5_CMD_STAT_NO_RES_ERR:
 606                return "no resources";
 607        case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
 608                return "bad input length";
 609        case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
 610                return "bad output length";
 611        case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
 612                return "bad QP state";
 613        case MLX5_CMD_STAT_BAD_PKT_ERR:
 614                return "bad packet (discarded)";
 615        case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
 616                return "bad size too many outstanding CQEs";
 617        default:
 618                return "unknown status";
 619        }
 620}
 621
 622static int cmd_status_to_err(u8 status)
 623{
 624        switch (status) {
 625        case MLX5_CMD_STAT_OK:                          return 0;
 626        case MLX5_CMD_STAT_INT_ERR:                     return -EIO;
 627        case MLX5_CMD_STAT_BAD_OP_ERR:                  return -EINVAL;
 628        case MLX5_CMD_STAT_BAD_PARAM_ERR:               return -EINVAL;
 629        case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:           return -EIO;
 630        case MLX5_CMD_STAT_BAD_RES_ERR:                 return -EINVAL;
 631        case MLX5_CMD_STAT_RES_BUSY:                    return -EBUSY;
 632        case MLX5_CMD_STAT_LIM_ERR:                     return -ENOMEM;
 633        case MLX5_CMD_STAT_BAD_RES_STATE_ERR:           return -EINVAL;
 634        case MLX5_CMD_STAT_IX_ERR:                      return -EINVAL;
 635        case MLX5_CMD_STAT_NO_RES_ERR:                  return -EAGAIN;
 636        case MLX5_CMD_STAT_BAD_INP_LEN_ERR:             return -EIO;
 637        case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:            return -EIO;
 638        case MLX5_CMD_STAT_BAD_QP_STATE_ERR:            return -EINVAL;
 639        case MLX5_CMD_STAT_BAD_PKT_ERR:                 return -EINVAL;
 640        case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:      return -EINVAL;
 641        default:                                        return -EIO;
 642        }
 643}
 644
 645struct mlx5_ifc_mbox_out_bits {
 646        u8         status[0x8];
 647        u8         reserved_at_8[0x18];
 648
 649        u8         syndrome[0x20];
 650
 651        u8         reserved_at_40[0x40];
 652};
 653
 654struct mlx5_ifc_mbox_in_bits {
 655        u8         opcode[0x10];
 656        u8         reserved_at_10[0x10];
 657
 658        u8         reserved_at_20[0x10];
 659        u8         op_mod[0x10];
 660
 661        u8         reserved_at_40[0x40];
 662};
 663
 664void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
 665{
 666        *status = MLX5_GET(mbox_out, out, status);
 667        *syndrome = MLX5_GET(mbox_out, out, syndrome);
 668}
 669
 670static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
 671{
 672        u32 syndrome;
 673        u8  status;
 674        u16 opcode;
 675        u16 op_mod;
 676
 677        mlx5_cmd_mbox_status(out, &status, &syndrome);
 678        if (!status)
 679                return 0;
 680
 681        opcode = MLX5_GET(mbox_in, in, opcode);
 682        op_mod = MLX5_GET(mbox_in, in, op_mod);
 683
 684        mlx5_core_err(dev,
 685                      "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
 686                      mlx5_command_str(opcode),
 687                      opcode, op_mod,
 688                      cmd_status_str(status),
 689                      status,
 690                      syndrome);
 691
 692        return cmd_status_to_err(status);
 693}
 694
 695static void dump_command(struct mlx5_core_dev *dev,
 696                         struct mlx5_cmd_work_ent *ent, int input)
 697{
 698        struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
 699        u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
 700        struct mlx5_cmd_mailbox *next = msg->next;
 701        int data_only;
 702        u32 offset = 0;
 703        int dump_len;
 704
 705        data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
 706
 707        if (data_only)
 708                mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
 709                                   "dump command data %s(0x%x) %s\n",
 710                                   mlx5_command_str(op), op,
 711                                   input ? "INPUT" : "OUTPUT");
 712        else
 713                mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
 714                              mlx5_command_str(op), op,
 715                              input ? "INPUT" : "OUTPUT");
 716
 717        if (data_only) {
 718                if (input) {
 719                        dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
 720                        offset += sizeof(ent->lay->in);
 721                } else {
 722                        dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
 723                        offset += sizeof(ent->lay->out);
 724                }
 725        } else {
 726                dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
 727                offset += sizeof(*ent->lay);
 728        }
 729
 730        while (next && offset < msg->len) {
 731                if (data_only) {
 732                        dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
 733                        dump_buf(next->buf, dump_len, 1, offset);
 734                        offset += MLX5_CMD_DATA_BLOCK_SIZE;
 735                } else {
 736                        mlx5_core_dbg(dev, "command block:\n");
 737                        dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
 738                        offset += sizeof(struct mlx5_cmd_prot_block);
 739                }
 740                next = next->next;
 741        }
 742
 743        if (data_only)
 744                pr_debug("\n");
 745}
 746
 747static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
 748{
 749        return MLX5_GET(mbox_in, in->first.data, opcode);
 750}
 751
 752static void cb_timeout_handler(struct work_struct *work)
 753{
 754        struct delayed_work *dwork = container_of(work, struct delayed_work,
 755                                                  work);
 756        struct mlx5_cmd_work_ent *ent = container_of(dwork,
 757                                                     struct mlx5_cmd_work_ent,
 758                                                     cb_timeout_work);
 759        struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
 760                                                 cmd);
 761
 762        ent->ret = -ETIMEDOUT;
 763        mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
 764                       mlx5_command_str(msg_to_opcode(ent->in)),
 765                       msg_to_opcode(ent->in));
 766        mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
 767}
 768
 769static void cmd_work_handler(struct work_struct *work)
 770{
 771        struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
 772        struct mlx5_cmd *cmd = ent->cmd;
 773        struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
 774        unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
 775        struct mlx5_cmd_layout *lay;
 776        struct semaphore *sem;
 777        unsigned long flags;
 778
 779        sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
 780        down(sem);
 781        if (!ent->page_queue) {
 782                ent->idx = alloc_ent(cmd);
 783                if (ent->idx < 0) {
 784                        mlx5_core_err(dev, "failed to allocate command entry\n");
 785                        up(sem);
 786                        return;
 787                }
 788        } else {
 789                ent->idx = cmd->max_reg_cmds;
 790                spin_lock_irqsave(&cmd->alloc_lock, flags);
 791                clear_bit(ent->idx, &cmd->bitmask);
 792                spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 793        }
 794
 795        cmd->ent_arr[ent->idx] = ent;
 796        lay = get_inst(cmd, ent->idx);
 797        ent->lay = lay;
 798        memset(lay, 0, sizeof(*lay));
 799        memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
 800        ent->op = be32_to_cpu(lay->in[0]) >> 16;
 801        if (ent->in->next)
 802                lay->in_ptr = cpu_to_be64(ent->in->next->dma);
 803        lay->inlen = cpu_to_be32(ent->in->len);
 804        if (ent->out->next)
 805                lay->out_ptr = cpu_to_be64(ent->out->next->dma);
 806        lay->outlen = cpu_to_be32(ent->out->len);
 807        lay->type = MLX5_PCI_CMD_XPORT;
 808        lay->token = ent->token;
 809        lay->status_own = CMD_OWNER_HW;
 810        set_signature(ent, !cmd->checksum_disabled);
 811        dump_command(dev, ent, 1);
 812        ent->ts1 = ktime_get_ns();
 813
 814        if (ent->callback)
 815                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
 816
 817        /* ring doorbell after the descriptor is valid */
 818        mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
 819        wmb();
 820        iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
 821        mmiowb();
 822        /* if not in polling don't use ent after this point */
 823        if (cmd->mode == CMD_MODE_POLLING) {
 824                poll_timeout(ent);
 825                /* make sure we read the descriptor after ownership is SW */
 826                rmb();
 827                mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
 828        }
 829}
 830
 831static const char *deliv_status_to_str(u8 status)
 832{
 833        switch (status) {
 834        case MLX5_CMD_DELIVERY_STAT_OK:
 835                return "no errors";
 836        case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
 837                return "signature error";
 838        case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
 839                return "token error";
 840        case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
 841                return "bad block number";
 842        case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
 843                return "output pointer not aligned to block size";
 844        case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
 845                return "input pointer not aligned to block size";
 846        case MLX5_CMD_DELIVERY_STAT_FW_ERR:
 847                return "firmware internal error";
 848        case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
 849                return "command input length error";
 850        case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
 851                return "command ouput length error";
 852        case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
 853                return "reserved fields not cleared";
 854        case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
 855                return "bad command descriptor type";
 856        default:
 857                return "unknown status code";
 858        }
 859}
 860
 861static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
 862{
 863        unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
 864        struct mlx5_cmd *cmd = &dev->cmd;
 865        int err;
 866
 867        if (cmd->mode == CMD_MODE_POLLING) {
 868                wait_for_completion(&ent->done);
 869        } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
 870                ent->ret = -ETIMEDOUT;
 871                mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
 872        }
 873
 874        err = ent->ret;
 875
 876        if (err == -ETIMEDOUT) {
 877                mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
 878                               mlx5_command_str(msg_to_opcode(ent->in)),
 879                               msg_to_opcode(ent->in));
 880        }
 881        mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
 882                      err, deliv_status_to_str(ent->status), ent->status);
 883
 884        return err;
 885}
 886
 887/*  Notes:
 888 *    1. Callback functions may not sleep
 889 *    2. page queue commands do not support asynchrous completion
 890 */
 891static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
 892                           struct mlx5_cmd_msg *out, void *uout, int uout_size,
 893                           mlx5_cmd_cbk_t callback,
 894                           void *context, int page_queue, u8 *status,
 895                           u8 token)
 896{
 897        struct mlx5_cmd *cmd = &dev->cmd;
 898        struct mlx5_cmd_work_ent *ent;
 899        struct mlx5_cmd_stats *stats;
 900        int err = 0;
 901        s64 ds;
 902        u16 op;
 903
 904        if (callback && page_queue)
 905                return -EINVAL;
 906
 907        ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
 908                        page_queue);
 909        if (IS_ERR(ent))
 910                return PTR_ERR(ent);
 911
 912        ent->token = token;
 913
 914        if (!callback)
 915                init_completion(&ent->done);
 916
 917        INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
 918        INIT_WORK(&ent->work, cmd_work_handler);
 919        if (page_queue) {
 920                cmd_work_handler(&ent->work);
 921        } else if (!queue_work(cmd->wq, &ent->work)) {
 922                mlx5_core_warn(dev, "failed to queue work\n");
 923                err = -ENOMEM;
 924                goto out_free;
 925        }
 926
 927        if (callback)
 928                goto out;
 929
 930        err = wait_func(dev, ent);
 931        if (err == -ETIMEDOUT)
 932                goto out_free;
 933
 934        ds = ent->ts2 - ent->ts1;
 935        op = MLX5_GET(mbox_in, in->first.data, opcode);
 936        if (op < ARRAY_SIZE(cmd->stats)) {
 937                stats = &cmd->stats[op];
 938                spin_lock_irq(&stats->lock);
 939                stats->sum += ds;
 940                ++stats->n;
 941                spin_unlock_irq(&stats->lock);
 942        }
 943        mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
 944                           "fw exec time for %s is %lld nsec\n",
 945                           mlx5_command_str(op), ds);
 946        *status = ent->status;
 947
 948out_free:
 949        free_cmd(ent);
 950out:
 951        return err;
 952}
 953
 954static ssize_t dbg_write(struct file *filp, const char __user *buf,
 955                         size_t count, loff_t *pos)
 956{
 957        struct mlx5_core_dev *dev = filp->private_data;
 958        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
 959        char lbuf[3];
 960        int err;
 961
 962        if (!dbg->in_msg || !dbg->out_msg)
 963                return -ENOMEM;
 964
 965        if (copy_from_user(lbuf, buf, sizeof(lbuf)))
 966                return -EFAULT;
 967
 968        lbuf[sizeof(lbuf) - 1] = 0;
 969
 970        if (strcmp(lbuf, "go"))
 971                return -EINVAL;
 972
 973        err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
 974
 975        return err ? err : count;
 976}
 977
 978
 979static const struct file_operations fops = {
 980        .owner  = THIS_MODULE,
 981        .open   = simple_open,
 982        .write  = dbg_write,
 983};
 984
 985static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
 986                            u8 token)
 987{
 988        struct mlx5_cmd_prot_block *block;
 989        struct mlx5_cmd_mailbox *next;
 990        int copy;
 991
 992        if (!to || !from)
 993                return -ENOMEM;
 994
 995        copy = min_t(int, size, sizeof(to->first.data));
 996        memcpy(to->first.data, from, copy);
 997        size -= copy;
 998        from += copy;
 999
1000        next = to->next;
1001        while (size) {
1002                if (!next) {
1003                        /* this is a BUG */
1004                        return -ENOMEM;
1005                }
1006
1007                copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1008                block = next->buf;
1009                memcpy(block->data, from, copy);
1010                from += copy;
1011                size -= copy;
1012                block->token = token;
1013                next = next->next;
1014        }
1015
1016        return 0;
1017}
1018
1019static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1020{
1021        struct mlx5_cmd_prot_block *block;
1022        struct mlx5_cmd_mailbox *next;
1023        int copy;
1024
1025        if (!to || !from)
1026                return -ENOMEM;
1027
1028        copy = min_t(int, size, sizeof(from->first.data));
1029        memcpy(to, from->first.data, copy);
1030        size -= copy;
1031        to += copy;
1032
1033        next = from->next;
1034        while (size) {
1035                if (!next) {
1036                        /* this is a BUG */
1037                        return -ENOMEM;
1038                }
1039
1040                copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1041                block = next->buf;
1042
1043                memcpy(to, block->data, copy);
1044                to += copy;
1045                size -= copy;
1046                next = next->next;
1047        }
1048
1049        return 0;
1050}
1051
1052static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1053                                              gfp_t flags)
1054{
1055        struct mlx5_cmd_mailbox *mailbox;
1056
1057        mailbox = kmalloc(sizeof(*mailbox), flags);
1058        if (!mailbox)
1059                return ERR_PTR(-ENOMEM);
1060
1061        mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
1062                                      &mailbox->dma);
1063        if (!mailbox->buf) {
1064                mlx5_core_dbg(dev, "failed allocation\n");
1065                kfree(mailbox);
1066                return ERR_PTR(-ENOMEM);
1067        }
1068        memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
1069        mailbox->next = NULL;
1070
1071        return mailbox;
1072}
1073
1074static void free_cmd_box(struct mlx5_core_dev *dev,
1075                         struct mlx5_cmd_mailbox *mailbox)
1076{
1077        pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1078        kfree(mailbox);
1079}
1080
1081static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1082                                               gfp_t flags, int size,
1083                                               u8 token)
1084{
1085        struct mlx5_cmd_mailbox *tmp, *head = NULL;
1086        struct mlx5_cmd_prot_block *block;
1087        struct mlx5_cmd_msg *msg;
1088        int blen;
1089        int err;
1090        int n;
1091        int i;
1092
1093        msg = kzalloc(sizeof(*msg), flags);
1094        if (!msg)
1095                return ERR_PTR(-ENOMEM);
1096
1097        blen = size - min_t(int, sizeof(msg->first.data), size);
1098        n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
1099
1100        for (i = 0; i < n; i++) {
1101                tmp = alloc_cmd_box(dev, flags);
1102                if (IS_ERR(tmp)) {
1103                        mlx5_core_warn(dev, "failed allocating block\n");
1104                        err = PTR_ERR(tmp);
1105                        goto err_alloc;
1106                }
1107
1108                block = tmp->buf;
1109                tmp->next = head;
1110                block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1111                block->block_num = cpu_to_be32(n - i - 1);
1112                block->token = token;
1113                head = tmp;
1114        }
1115        msg->next = head;
1116        msg->len = size;
1117        return msg;
1118
1119err_alloc:
1120        while (head) {
1121                tmp = head->next;
1122                free_cmd_box(dev, head);
1123                head = tmp;
1124        }
1125        kfree(msg);
1126
1127        return ERR_PTR(err);
1128}
1129
1130static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1131                                  struct mlx5_cmd_msg *msg)
1132{
1133        struct mlx5_cmd_mailbox *head = msg->next;
1134        struct mlx5_cmd_mailbox *next;
1135
1136        while (head) {
1137                next = head->next;
1138                free_cmd_box(dev, head);
1139                head = next;
1140        }
1141        kfree(msg);
1142}
1143
1144static ssize_t data_write(struct file *filp, const char __user *buf,
1145                          size_t count, loff_t *pos)
1146{
1147        struct mlx5_core_dev *dev = filp->private_data;
1148        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1149        void *ptr;
1150
1151        if (*pos != 0)
1152                return -EINVAL;
1153
1154        kfree(dbg->in_msg);
1155        dbg->in_msg = NULL;
1156        dbg->inlen = 0;
1157        ptr = memdup_user(buf, count);
1158        if (IS_ERR(ptr))
1159                return PTR_ERR(ptr);
1160        dbg->in_msg = ptr;
1161        dbg->inlen = count;
1162
1163        *pos = count;
1164
1165        return count;
1166}
1167
1168static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1169                         loff_t *pos)
1170{
1171        struct mlx5_core_dev *dev = filp->private_data;
1172        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1173        int copy;
1174
1175        if (*pos)
1176                return 0;
1177
1178        if (!dbg->out_msg)
1179                return -ENOMEM;
1180
1181        copy = min_t(int, count, dbg->outlen);
1182        if (copy_to_user(buf, dbg->out_msg, copy))
1183                return -EFAULT;
1184
1185        *pos += copy;
1186
1187        return copy;
1188}
1189
1190static const struct file_operations dfops = {
1191        .owner  = THIS_MODULE,
1192        .open   = simple_open,
1193        .write  = data_write,
1194        .read   = data_read,
1195};
1196
1197static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1198                           loff_t *pos)
1199{
1200        struct mlx5_core_dev *dev = filp->private_data;
1201        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1202        char outlen[8];
1203        int err;
1204
1205        if (*pos)
1206                return 0;
1207
1208        err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1209        if (err < 0)
1210                return err;
1211
1212        if (copy_to_user(buf, &outlen, err))
1213                return -EFAULT;
1214
1215        *pos += err;
1216
1217        return err;
1218}
1219
1220static ssize_t outlen_write(struct file *filp, const char __user *buf,
1221                            size_t count, loff_t *pos)
1222{
1223        struct mlx5_core_dev *dev = filp->private_data;
1224        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1225        char outlen_str[8];
1226        int outlen;
1227        void *ptr;
1228        int err;
1229
1230        if (*pos != 0 || count > 6)
1231                return -EINVAL;
1232
1233        kfree(dbg->out_msg);
1234        dbg->out_msg = NULL;
1235        dbg->outlen = 0;
1236
1237        if (copy_from_user(outlen_str, buf, count))
1238                return -EFAULT;
1239
1240        outlen_str[7] = 0;
1241
1242        err = sscanf(outlen_str, "%d", &outlen);
1243        if (err < 0)
1244                return err;
1245
1246        ptr = kzalloc(outlen, GFP_KERNEL);
1247        if (!ptr)
1248                return -ENOMEM;
1249
1250        dbg->out_msg = ptr;
1251        dbg->outlen = outlen;
1252
1253        *pos = count;
1254
1255        return count;
1256}
1257
1258static const struct file_operations olfops = {
1259        .owner  = THIS_MODULE,
1260        .open   = simple_open,
1261        .write  = outlen_write,
1262        .read   = outlen_read,
1263};
1264
1265static void set_wqname(struct mlx5_core_dev *dev)
1266{
1267        struct mlx5_cmd *cmd = &dev->cmd;
1268
1269        snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1270                 dev_name(&dev->pdev->dev));
1271}
1272
1273static void clean_debug_files(struct mlx5_core_dev *dev)
1274{
1275        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1276
1277        if (!mlx5_debugfs_root)
1278                return;
1279
1280        mlx5_cmdif_debugfs_cleanup(dev);
1281        debugfs_remove_recursive(dbg->dbg_root);
1282}
1283
1284static int create_debugfs_files(struct mlx5_core_dev *dev)
1285{
1286        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1287        int err = -ENOMEM;
1288
1289        if (!mlx5_debugfs_root)
1290                return 0;
1291
1292        dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1293        if (!dbg->dbg_root)
1294                return err;
1295
1296        dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1297                                          dev, &dfops);
1298        if (!dbg->dbg_in)
1299                goto err_dbg;
1300
1301        dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1302                                           dev, &dfops);
1303        if (!dbg->dbg_out)
1304                goto err_dbg;
1305
1306        dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1307                                              dev, &olfops);
1308        if (!dbg->dbg_outlen)
1309                goto err_dbg;
1310
1311        dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1312                                            &dbg->status);
1313        if (!dbg->dbg_status)
1314                goto err_dbg;
1315
1316        dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1317        if (!dbg->dbg_run)
1318                goto err_dbg;
1319
1320        mlx5_cmdif_debugfs_init(dev);
1321
1322        return 0;
1323
1324err_dbg:
1325        clean_debug_files(dev);
1326        return err;
1327}
1328
1329static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1330{
1331        struct mlx5_cmd *cmd = &dev->cmd;
1332        int i;
1333
1334        for (i = 0; i < cmd->max_reg_cmds; i++)
1335                down(&cmd->sem);
1336        down(&cmd->pages_sem);
1337
1338        cmd->mode = mode;
1339
1340        up(&cmd->pages_sem);
1341        for (i = 0; i < cmd->max_reg_cmds; i++)
1342                up(&cmd->sem);
1343}
1344
1345void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1346{
1347        mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1348}
1349
1350void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1351{
1352        mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1353}
1354
1355static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1356{
1357        unsigned long flags;
1358
1359        if (msg->cache) {
1360                spin_lock_irqsave(&msg->cache->lock, flags);
1361                list_add_tail(&msg->list, &msg->cache->head);
1362                spin_unlock_irqrestore(&msg->cache->lock, flags);
1363        } else {
1364                mlx5_free_cmd_msg(dev, msg);
1365        }
1366}
1367
1368void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1369{
1370        struct mlx5_cmd *cmd = &dev->cmd;
1371        struct mlx5_cmd_work_ent *ent;
1372        mlx5_cmd_cbk_t callback;
1373        void *context;
1374        int err;
1375        int i;
1376        s64 ds;
1377        struct mlx5_cmd_stats *stats;
1378        unsigned long flags;
1379        unsigned long vector;
1380
1381        /* there can be at most 32 command queues */
1382        vector = vec & 0xffffffff;
1383        for (i = 0; i < (1 << cmd->log_sz); i++) {
1384                if (test_bit(i, &vector)) {
1385                        struct semaphore *sem;
1386
1387                        ent = cmd->ent_arr[i];
1388                        if (ent->callback)
1389                                cancel_delayed_work(&ent->cb_timeout_work);
1390                        if (ent->page_queue)
1391                                sem = &cmd->pages_sem;
1392                        else
1393                                sem = &cmd->sem;
1394                        ent->ts2 = ktime_get_ns();
1395                        memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1396                        dump_command(dev, ent, 0);
1397                        if (!ent->ret) {
1398                                if (!cmd->checksum_disabled)
1399                                        ent->ret = verify_signature(ent);
1400                                else
1401                                        ent->ret = 0;
1402                                if (vec & MLX5_TRIGGERED_CMD_COMP)
1403                                        ent->status = MLX5_DRIVER_STATUS_ABORTED;
1404                                else
1405                                        ent->status = ent->lay->status_own >> 1;
1406
1407                                mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1408                                              ent->ret, deliv_status_to_str(ent->status), ent->status);
1409                        }
1410                        free_ent(cmd, ent->idx);
1411
1412                        if (ent->callback) {
1413                                ds = ent->ts2 - ent->ts1;
1414                                if (ent->op < ARRAY_SIZE(cmd->stats)) {
1415                                        stats = &cmd->stats[ent->op];
1416                                        spin_lock_irqsave(&stats->lock, flags);
1417                                        stats->sum += ds;
1418                                        ++stats->n;
1419                                        spin_unlock_irqrestore(&stats->lock, flags);
1420                                }
1421
1422                                callback = ent->callback;
1423                                context = ent->context;
1424                                err = ent->ret;
1425                                if (!err) {
1426                                        err = mlx5_copy_from_msg(ent->uout,
1427                                                                 ent->out,
1428                                                                 ent->uout_size);
1429
1430                                        err = err ? err : mlx5_cmd_check(dev,
1431                                                                        ent->in->first.data,
1432                                                                        ent->uout);
1433                                }
1434
1435                                mlx5_free_cmd_msg(dev, ent->out);
1436                                free_msg(dev, ent->in);
1437
1438                                err = err ? err : ent->status;
1439                                free_cmd(ent);
1440                                callback(err, context);
1441                        } else {
1442                                complete(&ent->done);
1443                        }
1444                        up(sem);
1445                }
1446        }
1447}
1448EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1449
1450static int status_to_err(u8 status)
1451{
1452        return status ? -1 : 0; /* TBD more meaningful codes */
1453}
1454
1455static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1456                                      gfp_t gfp)
1457{
1458        struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1459        struct mlx5_cmd *cmd = &dev->cmd;
1460        struct cache_ent *ent = NULL;
1461
1462        if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1463                ent = &cmd->cache.large;
1464        else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1465                ent = &cmd->cache.med;
1466
1467        if (ent) {
1468                spin_lock_irq(&ent->lock);
1469                if (!list_empty(&ent->head)) {
1470                        msg = list_entry(ent->head.next, typeof(*msg), list);
1471                        /* For cached lists, we must explicitly state what is
1472                         * the real size
1473                         */
1474                        msg->len = in_size;
1475                        list_del(&msg->list);
1476                }
1477                spin_unlock_irq(&ent->lock);
1478        }
1479
1480        if (IS_ERR(msg))
1481                msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1482
1483        return msg;
1484}
1485
1486static int is_manage_pages(void *in)
1487{
1488        return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1489}
1490
1491static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1492                    int out_size, mlx5_cmd_cbk_t callback, void *context)
1493{
1494        struct mlx5_cmd_msg *inb;
1495        struct mlx5_cmd_msg *outb;
1496        int pages_queue;
1497        gfp_t gfp;
1498        int err;
1499        u8 status = 0;
1500        u32 drv_synd;
1501        u8 token;
1502
1503        if (pci_channel_offline(dev->pdev) ||
1504            dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1505                u16 opcode = MLX5_GET(mbox_in, in, opcode);
1506
1507                err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
1508                MLX5_SET(mbox_out, out, status, status);
1509                MLX5_SET(mbox_out, out, syndrome, drv_synd);
1510                return err;
1511        }
1512
1513        pages_queue = is_manage_pages(in);
1514        gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1515
1516        inb = alloc_msg(dev, in_size, gfp);
1517        if (IS_ERR(inb)) {
1518                err = PTR_ERR(inb);
1519                return err;
1520        }
1521
1522        token = alloc_token(&dev->cmd);
1523
1524        err = mlx5_copy_to_msg(inb, in, in_size, token);
1525        if (err) {
1526                mlx5_core_warn(dev, "err %d\n", err);
1527                goto out_in;
1528        }
1529
1530        outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1531        if (IS_ERR(outb)) {
1532                err = PTR_ERR(outb);
1533                goto out_in;
1534        }
1535
1536        err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1537                              pages_queue, &status, token);
1538        if (err)
1539                goto out_out;
1540
1541        mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1542        if (status) {
1543                err = status_to_err(status);
1544                goto out_out;
1545        }
1546
1547        if (!callback)
1548                err = mlx5_copy_from_msg(out, outb, out_size);
1549
1550out_out:
1551        if (!callback)
1552                mlx5_free_cmd_msg(dev, outb);
1553
1554out_in:
1555        if (!callback)
1556                free_msg(dev, inb);
1557        return err;
1558}
1559
1560int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1561                  int out_size)
1562{
1563        int err;
1564
1565        err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
1566        return err ? : mlx5_cmd_check(dev, in, out);
1567}
1568EXPORT_SYMBOL(mlx5_cmd_exec);
1569
1570int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1571                     void *out, int out_size, mlx5_cmd_cbk_t callback,
1572                     void *context)
1573{
1574        return cmd_exec(dev, in, in_size, out, out_size, callback, context);
1575}
1576EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1577
1578static void destroy_msg_cache(struct mlx5_core_dev *dev)
1579{
1580        struct mlx5_cmd *cmd = &dev->cmd;
1581        struct mlx5_cmd_msg *msg;
1582        struct mlx5_cmd_msg *n;
1583
1584        list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1585                list_del(&msg->list);
1586                mlx5_free_cmd_msg(dev, msg);
1587        }
1588
1589        list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1590                list_del(&msg->list);
1591                mlx5_free_cmd_msg(dev, msg);
1592        }
1593}
1594
1595static int create_msg_cache(struct mlx5_core_dev *dev)
1596{
1597        struct mlx5_cmd *cmd = &dev->cmd;
1598        struct mlx5_cmd_msg *msg;
1599        int err;
1600        int i;
1601
1602        spin_lock_init(&cmd->cache.large.lock);
1603        INIT_LIST_HEAD(&cmd->cache.large.head);
1604        spin_lock_init(&cmd->cache.med.lock);
1605        INIT_LIST_HEAD(&cmd->cache.med.head);
1606
1607        for (i = 0; i < NUM_LONG_LISTS; i++) {
1608                msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
1609                if (IS_ERR(msg)) {
1610                        err = PTR_ERR(msg);
1611                        goto ex_err;
1612                }
1613                msg->cache = &cmd->cache.large;
1614                list_add_tail(&msg->list, &cmd->cache.large.head);
1615        }
1616
1617        for (i = 0; i < NUM_MED_LISTS; i++) {
1618                msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
1619                if (IS_ERR(msg)) {
1620                        err = PTR_ERR(msg);
1621                        goto ex_err;
1622                }
1623                msg->cache = &cmd->cache.med;
1624                list_add_tail(&msg->list, &cmd->cache.med.head);
1625        }
1626
1627        return 0;
1628
1629ex_err:
1630        destroy_msg_cache(dev);
1631        return err;
1632}
1633
1634static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1635{
1636        struct device *ddev = &dev->pdev->dev;
1637
1638        cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1639                                                 &cmd->alloc_dma, GFP_KERNEL);
1640        if (!cmd->cmd_alloc_buf)
1641                return -ENOMEM;
1642
1643        /* make sure it is aligned to 4K */
1644        if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1645                cmd->cmd_buf = cmd->cmd_alloc_buf;
1646                cmd->dma = cmd->alloc_dma;
1647                cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1648                return 0;
1649        }
1650
1651        dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1652                          cmd->alloc_dma);
1653        cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
1654                                                 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1655                                                 &cmd->alloc_dma, GFP_KERNEL);
1656        if (!cmd->cmd_alloc_buf)
1657                return -ENOMEM;
1658
1659        cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1660        cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1661        cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1662        return 0;
1663}
1664
1665static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1666{
1667        struct device *ddev = &dev->pdev->dev;
1668
1669        dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
1670                          cmd->alloc_dma);
1671}
1672
1673int mlx5_cmd_init(struct mlx5_core_dev *dev)
1674{
1675        int size = sizeof(struct mlx5_cmd_prot_block);
1676        int align = roundup_pow_of_two(size);
1677        struct mlx5_cmd *cmd = &dev->cmd;
1678        u32 cmd_h, cmd_l;
1679        u16 cmd_if_rev;
1680        int err;
1681        int i;
1682
1683        memset(cmd, 0, sizeof(*cmd));
1684        cmd_if_rev = cmdif_rev(dev);
1685        if (cmd_if_rev != CMD_IF_REV) {
1686                dev_err(&dev->pdev->dev,
1687                        "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1688                        CMD_IF_REV, cmd_if_rev);
1689                return -EINVAL;
1690        }
1691
1692        cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1693        if (!cmd->pool)
1694                return -ENOMEM;
1695
1696        err = alloc_cmd_page(dev, cmd);
1697        if (err)
1698                goto err_free_pool;
1699
1700        cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1701        cmd->log_sz = cmd_l >> 4 & 0xf;
1702        cmd->log_stride = cmd_l & 0xf;
1703        if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1704                dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1705                        1 << cmd->log_sz);
1706                err = -EINVAL;
1707                goto err_free_page;
1708        }
1709
1710        if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1711                dev_err(&dev->pdev->dev, "command queue size overflow\n");
1712                err = -EINVAL;
1713                goto err_free_page;
1714        }
1715
1716        cmd->checksum_disabled = 1;
1717        cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1718        cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1719
1720        cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1721        if (cmd->cmdif_rev > CMD_IF_REV) {
1722                dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1723                        CMD_IF_REV, cmd->cmdif_rev);
1724                err = -ENOTSUPP;
1725                goto err_free_page;
1726        }
1727
1728        spin_lock_init(&cmd->alloc_lock);
1729        spin_lock_init(&cmd->token_lock);
1730        for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1731                spin_lock_init(&cmd->stats[i].lock);
1732
1733        sema_init(&cmd->sem, cmd->max_reg_cmds);
1734        sema_init(&cmd->pages_sem, 1);
1735
1736        cmd_h = (u32)((u64)(cmd->dma) >> 32);
1737        cmd_l = (u32)(cmd->dma);
1738        if (cmd_l & 0xfff) {
1739                dev_err(&dev->pdev->dev, "invalid command queue address\n");
1740                err = -ENOMEM;
1741                goto err_free_page;
1742        }
1743
1744        iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1745        iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1746
1747        /* Make sure firmware sees the complete address before we proceed */
1748        wmb();
1749
1750        mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1751
1752        cmd->mode = CMD_MODE_POLLING;
1753
1754        err = create_msg_cache(dev);
1755        if (err) {
1756                dev_err(&dev->pdev->dev, "failed to create command cache\n");
1757                goto err_free_page;
1758        }
1759
1760        set_wqname(dev);
1761        cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1762        if (!cmd->wq) {
1763                dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1764                err = -ENOMEM;
1765                goto err_cache;
1766        }
1767
1768        err = create_debugfs_files(dev);
1769        if (err) {
1770                err = -ENOMEM;
1771                goto err_wq;
1772        }
1773
1774        return 0;
1775
1776err_wq:
1777        destroy_workqueue(cmd->wq);
1778
1779err_cache:
1780        destroy_msg_cache(dev);
1781
1782err_free_page:
1783        free_cmd_page(dev, cmd);
1784
1785err_free_pool:
1786        pci_pool_destroy(cmd->pool);
1787
1788        return err;
1789}
1790EXPORT_SYMBOL(mlx5_cmd_init);
1791
1792void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1793{
1794        struct mlx5_cmd *cmd = &dev->cmd;
1795
1796        clean_debug_files(dev);
1797        destroy_workqueue(cmd->wq);
1798        destroy_msg_cache(dev);
1799        free_cmd_page(dev, cmd);
1800        pci_pool_destroy(cmd->pool);
1801}
1802EXPORT_SYMBOL(mlx5_cmd_cleanup);
1803