linux/drivers/net/ethernet/mellanox/mlx5/core/cq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/kernel.h>
  34#include <linux/module.h>
  35#include <linux/hardirq.h>
  36#include <linux/mlx5/driver.h>
  37#include <linux/mlx5/cmd.h>
  38#include <rdma/ib_verbs.h>
  39#include <linux/mlx5/cq.h>
  40#include "mlx5_core.h"
  41
  42#define TASKLET_MAX_TIME 2
  43#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
  44
  45void mlx5_cq_tasklet_cb(unsigned long data)
  46{
  47        unsigned long flags;
  48        unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
  49        struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
  50        struct mlx5_core_cq *mcq;
  51        struct mlx5_core_cq *temp;
  52
  53        spin_lock_irqsave(&ctx->lock, flags);
  54        list_splice_tail_init(&ctx->list, &ctx->process_list);
  55        spin_unlock_irqrestore(&ctx->lock, flags);
  56
  57        list_for_each_entry_safe(mcq, temp, &ctx->process_list,
  58                                 tasklet_ctx.list) {
  59                list_del_init(&mcq->tasklet_ctx.list);
  60                mcq->tasklet_ctx.comp(mcq);
  61                if (atomic_dec_and_test(&mcq->refcount))
  62                        complete(&mcq->free);
  63                if (time_after(jiffies, end))
  64                        break;
  65        }
  66
  67        if (!list_empty(&ctx->process_list))
  68                tasklet_schedule(&ctx->task);
  69}
  70
  71static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
  72{
  73        unsigned long flags;
  74        struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
  75
  76        spin_lock_irqsave(&tasklet_ctx->lock, flags);
  77        /* When migrating CQs between EQs will be implemented, please note
  78         * that you need to sync this point. It is possible that
  79         * while migrating a CQ, completions on the old EQs could
  80         * still arrive.
  81         */
  82        if (list_empty_careful(&cq->tasklet_ctx.list)) {
  83                atomic_inc(&cq->refcount);
  84                list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
  85        }
  86        spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
  87}
  88
  89void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
  90{
  91        struct mlx5_core_cq *cq;
  92        struct mlx5_cq_table *table = &dev->priv.cq_table;
  93
  94        spin_lock(&table->lock);
  95        cq = radix_tree_lookup(&table->tree, cqn);
  96        if (likely(cq))
  97                atomic_inc(&cq->refcount);
  98        spin_unlock(&table->lock);
  99
 100        if (!cq) {
 101                mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
 102                return;
 103        }
 104
 105        ++cq->arm_sn;
 106
 107        cq->comp(cq);
 108
 109        if (atomic_dec_and_test(&cq->refcount))
 110                complete(&cq->free);
 111}
 112
 113void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
 114{
 115        struct mlx5_cq_table *table = &dev->priv.cq_table;
 116        struct mlx5_core_cq *cq;
 117
 118        spin_lock(&table->lock);
 119
 120        cq = radix_tree_lookup(&table->tree, cqn);
 121        if (cq)
 122                atomic_inc(&cq->refcount);
 123
 124        spin_unlock(&table->lock);
 125
 126        if (!cq) {
 127                mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
 128                return;
 129        }
 130
 131        cq->event(cq, event_type);
 132
 133        if (atomic_dec_and_test(&cq->refcount))
 134                complete(&cq->free);
 135}
 136
 137int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
 138                        u32 *in, int inlen)
 139{
 140        struct mlx5_cq_table *table = &dev->priv.cq_table;
 141        u32 out[MLX5_ST_SZ_DW(create_cq_out)];
 142        u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
 143        u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
 144        int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
 145                           c_eqn);
 146        struct mlx5_eq *eq;
 147        int err;
 148
 149        eq = mlx5_eqn2eq(dev, eqn);
 150        if (IS_ERR(eq))
 151                return PTR_ERR(eq);
 152
 153        memset(out, 0, sizeof(out));
 154        MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
 155        err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 156        if (err)
 157                return err;
 158
 159        cq->cqn = MLX5_GET(create_cq_out, out, cqn);
 160        cq->cons_index = 0;
 161        cq->arm_sn     = 0;
 162        atomic_set(&cq->refcount, 1);
 163        init_completion(&cq->free);
 164        if (!cq->comp)
 165                cq->comp = mlx5_add_cq_to_tasklet;
 166        /* assuming CQ will be deleted before the EQ */
 167        cq->tasklet_ctx.priv = &eq->tasklet_ctx;
 168        INIT_LIST_HEAD(&cq->tasklet_ctx.list);
 169
 170        spin_lock_irq(&table->lock);
 171        err = radix_tree_insert(&table->tree, cq->cqn, cq);
 172        spin_unlock_irq(&table->lock);
 173        if (err)
 174                goto err_cmd;
 175
 176        cq->pid = current->pid;
 177        err = mlx5_debug_cq_add(dev, cq);
 178        if (err)
 179                mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
 180                              cq->cqn);
 181
 182        cq->uar = dev->priv.uar;
 183
 184        return 0;
 185
 186err_cmd:
 187        memset(din, 0, sizeof(din));
 188        memset(dout, 0, sizeof(dout));
 189        MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
 190        MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
 191        mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
 192        return err;
 193}
 194EXPORT_SYMBOL(mlx5_core_create_cq);
 195
 196int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
 197{
 198        struct mlx5_cq_table *table = &dev->priv.cq_table;
 199        u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
 200        u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
 201        struct mlx5_core_cq *tmp;
 202        int err;
 203
 204        spin_lock_irq(&table->lock);
 205        tmp = radix_tree_delete(&table->tree, cq->cqn);
 206        spin_unlock_irq(&table->lock);
 207        if (!tmp) {
 208                mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
 209                return -EINVAL;
 210        }
 211        if (tmp != cq) {
 212                mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
 213                return -EINVAL;
 214        }
 215
 216        MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
 217        MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
 218        err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 219        if (err)
 220                return err;
 221
 222        synchronize_irq(cq->irqn);
 223
 224        mlx5_debug_cq_remove(dev, cq);
 225        if (atomic_dec_and_test(&cq->refcount))
 226                complete(&cq->free);
 227        wait_for_completion(&cq->free);
 228
 229        return 0;
 230}
 231EXPORT_SYMBOL(mlx5_core_destroy_cq);
 232
 233int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
 234                       u32 *out, int outlen)
 235{
 236        u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
 237
 238        MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
 239        MLX5_SET(query_cq_in, in, cqn, cq->cqn);
 240        return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
 241}
 242EXPORT_SYMBOL(mlx5_core_query_cq);
 243
 244int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
 245                        u32 *in, int inlen)
 246{
 247        u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
 248
 249        MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
 250        return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 251}
 252EXPORT_SYMBOL(mlx5_core_modify_cq);
 253
 254int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
 255                                   struct mlx5_core_cq *cq,
 256                                   u16 cq_period,
 257                                   u16 cq_max_count)
 258{
 259        u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
 260        void *cqc;
 261
 262        MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
 263        cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
 264        MLX5_SET(cqc, cqc, cq_period, cq_period);
 265        MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
 266        MLX5_SET(modify_cq_in, in,
 267                 modify_field_select_resize_field_select.modify_field_select.modify_field_select,
 268                 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
 269
 270        return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
 271}
 272EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
 273
 274int mlx5_init_cq_table(struct mlx5_core_dev *dev)
 275{
 276        struct mlx5_cq_table *table = &dev->priv.cq_table;
 277        int err;
 278
 279        memset(table, 0, sizeof(*table));
 280        spin_lock_init(&table->lock);
 281        INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
 282        err = mlx5_cq_debugfs_init(dev);
 283
 284        return err;
 285}
 286
 287void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
 288{
 289        mlx5_cq_debugfs_cleanup(dev);
 290}
 291