linux/drivers/net/mlx4/cmd.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
   4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#include <linux/sched.h>
  36#include <linux/pci.h>
  37#include <linux/errno.h>
  38
  39#include <linux/mlx4/cmd.h>
  40
  41#include <asm/io.h>
  42
  43#include "mlx4.h"
  44
  45#define CMD_POLL_TOKEN 0xffff
  46
  47enum {
  48        /* command completed successfully: */
  49        CMD_STAT_OK             = 0x00,
  50        /* Internal error (such as a bus error) occurred while processing command: */
  51        CMD_STAT_INTERNAL_ERR   = 0x01,
  52        /* Operation/command not supported or opcode modifier not supported: */
  53        CMD_STAT_BAD_OP         = 0x02,
  54        /* Parameter not supported or parameter out of range: */
  55        CMD_STAT_BAD_PARAM      = 0x03,
  56        /* System not enabled or bad system state: */
  57        CMD_STAT_BAD_SYS_STATE  = 0x04,
  58        /* Attempt to access reserved or unallocaterd resource: */
  59        CMD_STAT_BAD_RESOURCE   = 0x05,
  60        /* Requested resource is currently executing a command, or is otherwise busy: */
  61        CMD_STAT_RESOURCE_BUSY  = 0x06,
  62        /* Required capability exceeds device limits: */
  63        CMD_STAT_EXCEED_LIM     = 0x08,
  64        /* Resource is not in the appropriate state or ownership: */
  65        CMD_STAT_BAD_RES_STATE  = 0x09,
  66        /* Index out of range: */
  67        CMD_STAT_BAD_INDEX      = 0x0a,
  68        /* FW image corrupted: */
  69        CMD_STAT_BAD_NVMEM      = 0x0b,
  70        /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
  71        CMD_STAT_ICM_ERROR      = 0x0c,
  72        /* Attempt to modify a QP/EE which is not in the presumed state: */
  73        CMD_STAT_BAD_QP_STATE   = 0x10,
  74        /* Bad segment parameters (Address/Size): */
  75        CMD_STAT_BAD_SEG_PARAM  = 0x20,
  76        /* Memory Region has Memory Windows bound to: */
  77        CMD_STAT_REG_BOUND      = 0x21,
  78        /* HCA local attached memory not present: */
  79        CMD_STAT_LAM_NOT_PRE    = 0x22,
  80        /* Bad management packet (silently discarded): */
  81        CMD_STAT_BAD_PKT        = 0x30,
  82        /* More outstanding CQEs in CQ than new CQ size: */
  83        CMD_STAT_BAD_SIZE       = 0x40,
  84        /* Multi Function device support required: */
  85        CMD_STAT_MULTI_FUNC_REQ = 0x50,
  86};
  87
  88enum {
  89        HCR_IN_PARAM_OFFSET     = 0x00,
  90        HCR_IN_MODIFIER_OFFSET  = 0x08,
  91        HCR_OUT_PARAM_OFFSET    = 0x0c,
  92        HCR_TOKEN_OFFSET        = 0x14,
  93        HCR_STATUS_OFFSET       = 0x18,
  94
  95        HCR_OPMOD_SHIFT         = 12,
  96        HCR_T_BIT               = 21,
  97        HCR_E_BIT               = 22,
  98        HCR_GO_BIT              = 23
  99};
 100
 101enum {
 102        GO_BIT_TIMEOUT_MSECS    = 10000
 103};
 104
 105struct mlx4_cmd_context {
 106        struct completion       done;
 107        int                     result;
 108        int                     next;
 109        u64                     out_param;
 110        u16                     token;
 111};
 112
 113static int mlx4_status_to_errno(u8 status)
 114{
 115        static const int trans_table[] = {
 116                [CMD_STAT_INTERNAL_ERR]   = -EIO,
 117                [CMD_STAT_BAD_OP]         = -EPERM,
 118                [CMD_STAT_BAD_PARAM]      = -EINVAL,
 119                [CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
 120                [CMD_STAT_BAD_RESOURCE]   = -EBADF,
 121                [CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
 122                [CMD_STAT_EXCEED_LIM]     = -ENOMEM,
 123                [CMD_STAT_BAD_RES_STATE]  = -EBADF,
 124                [CMD_STAT_BAD_INDEX]      = -EBADF,
 125                [CMD_STAT_BAD_NVMEM]      = -EFAULT,
 126                [CMD_STAT_ICM_ERROR]      = -ENFILE,
 127                [CMD_STAT_BAD_QP_STATE]   = -EINVAL,
 128                [CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
 129                [CMD_STAT_REG_BOUND]      = -EBUSY,
 130                [CMD_STAT_LAM_NOT_PRE]    = -EAGAIN,
 131                [CMD_STAT_BAD_PKT]        = -EINVAL,
 132                [CMD_STAT_BAD_SIZE]       = -ENOMEM,
 133                [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
 134        };
 135
 136        if (status >= ARRAY_SIZE(trans_table) ||
 137            (status != CMD_STAT_OK && trans_table[status] == 0))
 138                return -EIO;
 139
 140        return trans_table[status];
 141}
 142
 143static int cmd_pending(struct mlx4_dev *dev)
 144{
 145        u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
 146
 147        return (status & swab32(1 << HCR_GO_BIT)) ||
 148                (mlx4_priv(dev)->cmd.toggle ==
 149                 !!(status & swab32(1 << HCR_T_BIT)));
 150}
 151
 152static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
 153                         u32 in_modifier, u8 op_modifier, u16 op, u16 token,
 154                         int event)
 155{
 156        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 157        u32 __iomem *hcr = cmd->hcr;
 158        int ret = -EAGAIN;
 159        unsigned long end;
 160
 161        mutex_lock(&cmd->hcr_mutex);
 162
 163        end = jiffies;
 164        if (event)
 165                end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
 166
 167        while (cmd_pending(dev)) {
 168                if (time_after_eq(jiffies, end))
 169                        goto out;
 170                cond_resched();
 171        }
 172
 173        /*
 174         * We use writel (instead of something like memcpy_toio)
 175         * because writes of less than 32 bits to the HCR don't work
 176         * (and some architectures such as ia64 implement memcpy_toio
 177         * in terms of writeb).
 178         */
 179        __raw_writel((__force u32) cpu_to_be32(in_param >> 32),           hcr + 0);
 180        __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
 181        __raw_writel((__force u32) cpu_to_be32(in_modifier),              hcr + 2);
 182        __raw_writel((__force u32) cpu_to_be32(out_param >> 32),          hcr + 3);
 183        __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
 184        __raw_writel((__force u32) cpu_to_be32(token << 16),              hcr + 5);
 185
 186        /* __raw_writel may not order writes. */
 187        wmb();
 188
 189        __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)                |
 190                                               (cmd->toggle << HCR_T_BIT)       |
 191                                               (event ? (1 << HCR_E_BIT) : 0)   |
 192                                               (op_modifier << HCR_OPMOD_SHIFT) |
 193                                               op),                       hcr + 6);
 194
 195        /*
 196         * Make sure that our HCR writes don't get mixed in with
 197         * writes from another CPU starting a FW command.
 198         */
 199        mmiowb();
 200
 201        cmd->toggle = cmd->toggle ^ 1;
 202
 203        ret = 0;
 204
 205out:
 206        mutex_unlock(&cmd->hcr_mutex);
 207        return ret;
 208}
 209
 210static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 211                         int out_is_imm, u32 in_modifier, u8 op_modifier,
 212                         u16 op, unsigned long timeout)
 213{
 214        struct mlx4_priv *priv = mlx4_priv(dev);
 215        void __iomem *hcr = priv->cmd.hcr;
 216        int err = 0;
 217        unsigned long end;
 218
 219        down(&priv->cmd.poll_sem);
 220
 221        err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
 222                            in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
 223        if (err)
 224                goto out;
 225
 226        end = msecs_to_jiffies(timeout) + jiffies;
 227        while (cmd_pending(dev) && time_before(jiffies, end))
 228                cond_resched();
 229
 230        if (cmd_pending(dev)) {
 231                err = -ETIMEDOUT;
 232                goto out;
 233        }
 234
 235        if (out_is_imm)
 236                *out_param =
 237                        (u64) be32_to_cpu((__force __be32)
 238                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
 239                        (u64) be32_to_cpu((__force __be32)
 240                                          __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
 241
 242        err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
 243                                               __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
 244
 245out:
 246        up(&priv->cmd.poll_sem);
 247        return err;
 248}
 249
 250void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
 251{
 252        struct mlx4_priv *priv = mlx4_priv(dev);
 253        struct mlx4_cmd_context *context =
 254                &priv->cmd.context[token & priv->cmd.token_mask];
 255
 256        /* previously timed out command completing at long last */
 257        if (token != context->token)
 258                return;
 259
 260        context->result    = mlx4_status_to_errno(status);
 261        context->out_param = out_param;
 262
 263        complete(&context->done);
 264}
 265
 266static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 267                         int out_is_imm, u32 in_modifier, u8 op_modifier,
 268                         u16 op, unsigned long timeout)
 269{
 270        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
 271        struct mlx4_cmd_context *context;
 272        int err = 0;
 273
 274        down(&cmd->event_sem);
 275
 276        spin_lock(&cmd->context_lock);
 277        BUG_ON(cmd->free_head < 0);
 278        context = &cmd->context[cmd->free_head];
 279        context->token += cmd->token_mask + 1;
 280        cmd->free_head = context->next;
 281        spin_unlock(&cmd->context_lock);
 282
 283        init_completion(&context->done);
 284
 285        mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
 286                      in_modifier, op_modifier, op, context->token, 1);
 287
 288        if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
 289                err = -EBUSY;
 290                goto out;
 291        }
 292
 293        err = context->result;
 294        if (err)
 295                goto out;
 296
 297        if (out_is_imm)
 298                *out_param = context->out_param;
 299
 300out:
 301        spin_lock(&cmd->context_lock);
 302        context->next = cmd->free_head;
 303        cmd->free_head = context - cmd->context;
 304        spin_unlock(&cmd->context_lock);
 305
 306        up(&cmd->event_sem);
 307        return err;
 308}
 309
 310int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 311               int out_is_imm, u32 in_modifier, u8 op_modifier,
 312               u16 op, unsigned long timeout)
 313{
 314        if (mlx4_priv(dev)->cmd.use_events)
 315                return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
 316                                     in_modifier, op_modifier, op, timeout);
 317        else
 318                return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
 319                                     in_modifier, op_modifier, op, timeout);
 320}
 321EXPORT_SYMBOL_GPL(__mlx4_cmd);
 322
 323int mlx4_cmd_init(struct mlx4_dev *dev)
 324{
 325        struct mlx4_priv *priv = mlx4_priv(dev);
 326
 327        mutex_init(&priv->cmd.hcr_mutex);
 328        sema_init(&priv->cmd.poll_sem, 1);
 329        priv->cmd.use_events = 0;
 330        priv->cmd.toggle     = 1;
 331
 332        priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
 333                                MLX4_HCR_SIZE);
 334        if (!priv->cmd.hcr) {
 335                mlx4_err(dev, "Couldn't map command register.");
 336                return -ENOMEM;
 337        }
 338
 339        priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
 340                                         MLX4_MAILBOX_SIZE,
 341                                         MLX4_MAILBOX_SIZE, 0);
 342        if (!priv->cmd.pool) {
 343                iounmap(priv->cmd.hcr);
 344                return -ENOMEM;
 345        }
 346
 347        return 0;
 348}
 349
 350void mlx4_cmd_cleanup(struct mlx4_dev *dev)
 351{
 352        struct mlx4_priv *priv = mlx4_priv(dev);
 353
 354        pci_pool_destroy(priv->cmd.pool);
 355        iounmap(priv->cmd.hcr);
 356}
 357
 358/*
 359 * Switch to using events to issue FW commands (can only be called
 360 * after event queue for command events has been initialized).
 361 */
 362int mlx4_cmd_use_events(struct mlx4_dev *dev)
 363{
 364        struct mlx4_priv *priv = mlx4_priv(dev);
 365        int i;
 366
 367        priv->cmd.context = kmalloc(priv->cmd.max_cmds *
 368                                   sizeof (struct mlx4_cmd_context),
 369                                   GFP_KERNEL);
 370        if (!priv->cmd.context)
 371                return -ENOMEM;
 372
 373        for (i = 0; i < priv->cmd.max_cmds; ++i) {
 374                priv->cmd.context[i].token = i;
 375                priv->cmd.context[i].next  = i + 1;
 376        }
 377
 378        priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
 379        priv->cmd.free_head = 0;
 380
 381        sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
 382        spin_lock_init(&priv->cmd.context_lock);
 383
 384        for (priv->cmd.token_mask = 1;
 385             priv->cmd.token_mask < priv->cmd.max_cmds;
 386             priv->cmd.token_mask <<= 1)
 387                ; /* nothing */
 388        --priv->cmd.token_mask;
 389
 390        priv->cmd.use_events = 1;
 391
 392        down(&priv->cmd.poll_sem);
 393
 394        return 0;
 395}
 396
 397/*
 398 * Switch back to polling (used when shutting down the device)
 399 */
 400void mlx4_cmd_use_polling(struct mlx4_dev *dev)
 401{
 402        struct mlx4_priv *priv = mlx4_priv(dev);
 403        int i;
 404
 405        priv->cmd.use_events = 0;
 406
 407        for (i = 0; i < priv->cmd.max_cmds; ++i)
 408                down(&priv->cmd.event_sem);
 409
 410        kfree(priv->cmd.context);
 411
 412        up(&priv->cmd.poll_sem);
 413}
 414
 415struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
 416{
 417        struct mlx4_cmd_mailbox *mailbox;
 418
 419        mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
 420        if (!mailbox)
 421                return ERR_PTR(-ENOMEM);
 422
 423        mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
 424                                      &mailbox->dma);
 425        if (!mailbox->buf) {
 426                kfree(mailbox);
 427                return ERR_PTR(-ENOMEM);
 428        }
 429
 430        return mailbox;
 431}
 432EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
 433
 434void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
 435{
 436        if (!mailbox)
 437                return;
 438
 439        pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
 440        kfree(mailbox);
 441}
 442EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
 443