linux/drivers/net/ethernet/mellanox/mlx4/srq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
   3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34
  35#include <linux/mlx4/cmd.h>
  36#include <linux/mlx4/srq.h>
  37#include <linux/export.h>
  38#include <linux/gfp.h>
  39
  40#include "mlx4.h"
  41#include "icm.h"
  42
  43void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
  44{
  45        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
  46        struct mlx4_srq *srq;
  47
  48        spin_lock(&srq_table->lock);
  49
  50        srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
  51        if (srq)
  52                atomic_inc(&srq->refcount);
  53
  54        spin_unlock(&srq_table->lock);
  55
  56        if (!srq) {
  57                mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
  58                return;
  59        }
  60
  61        srq->event(srq, event_type);
  62
  63        if (atomic_dec_and_test(&srq->refcount))
  64                complete(&srq->free);
  65}
  66
  67static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  68                          int srq_num)
  69{
  70        return mlx4_cmd(dev, mailbox->dma, srq_num, 0,
  71                        MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
  72                        MLX4_CMD_WRAPPED);
  73}
  74
  75static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  76                          int srq_num)
  77{
  78        return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
  79                            mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
  80                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  81}
  82
  83static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
  84{
  85        return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
  86                        MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
  87}
  88
  89static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  90                          int srq_num)
  91{
  92        return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
  93                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  94}
  95
  96int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
  97{
  98        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
  99        int err;
 100
 101
 102        *srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
 103        if (*srqn == -1)
 104                return -ENOMEM;
 105
 106        err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL);
 107        if (err)
 108                goto err_out;
 109
 110        err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL);
 111        if (err)
 112                goto err_put;
 113        return 0;
 114
 115err_put:
 116        mlx4_table_put(dev, &srq_table->table, *srqn);
 117
 118err_out:
 119        mlx4_bitmap_free(&srq_table->bitmap, *srqn, MLX4_NO_RR);
 120        return err;
 121}
 122
 123static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
 124{
 125        u64 out_param;
 126        int err;
 127
 128        if (mlx4_is_mfunc(dev)) {
 129                err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
 130                                   RES_OP_RESERVE_AND_MAP,
 131                                   MLX4_CMD_ALLOC_RES,
 132                                   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 133                if (!err)
 134                        *srqn = get_param_l(&out_param);
 135
 136                return err;
 137        }
 138        return __mlx4_srq_alloc_icm(dev, srqn);
 139}
 140
 141void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
 142{
 143        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
 144
 145        mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
 146        mlx4_table_put(dev, &srq_table->table, srqn);
 147        mlx4_bitmap_free(&srq_table->bitmap, srqn, MLX4_NO_RR);
 148}
 149
 150static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
 151{
 152        u64 in_param = 0;
 153
 154        if (mlx4_is_mfunc(dev)) {
 155                set_param_l(&in_param, srqn);
 156                if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
 157                             MLX4_CMD_FREE_RES,
 158                             MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
 159                        mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
 160                return;
 161        }
 162        __mlx4_srq_free_icm(dev, srqn);
 163}
 164
 165int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
 166                   struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
 167{
 168        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
 169        struct mlx4_cmd_mailbox *mailbox;
 170        struct mlx4_srq_context *srq_context;
 171        u64 mtt_addr;
 172        int err;
 173
 174        err = mlx4_srq_alloc_icm(dev, &srq->srqn);
 175        if (err)
 176                return err;
 177
 178        spin_lock_irq(&srq_table->lock);
 179        err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
 180        spin_unlock_irq(&srq_table->lock);
 181        if (err)
 182                goto err_icm;
 183
 184        mailbox = mlx4_alloc_cmd_mailbox(dev);
 185        if (IS_ERR(mailbox)) {
 186                err = PTR_ERR(mailbox);
 187                goto err_radix;
 188        }
 189
 190        srq_context = mailbox->buf;
 191        srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) |
 192                                                      srq->srqn);
 193        srq_context->logstride          = srq->wqe_shift - 4;
 194        srq_context->xrcd               = cpu_to_be16(xrcd);
 195        srq_context->pg_offset_cqn      = cpu_to_be32(cqn & 0xffffff);
 196        srq_context->log_page_size      = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
 197
 198        mtt_addr = mlx4_mtt_addr(dev, mtt);
 199        srq_context->mtt_base_addr_h    = mtt_addr >> 32;
 200        srq_context->mtt_base_addr_l    = cpu_to_be32(mtt_addr & 0xffffffff);
 201        srq_context->pd                 = cpu_to_be32(pdn);
 202        srq_context->db_rec_addr        = cpu_to_be64(db_rec);
 203
 204        err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn);
 205        mlx4_free_cmd_mailbox(dev, mailbox);
 206        if (err)
 207                goto err_radix;
 208
 209        atomic_set(&srq->refcount, 1);
 210        init_completion(&srq->free);
 211
 212        return 0;
 213
 214err_radix:
 215        spin_lock_irq(&srq_table->lock);
 216        radix_tree_delete(&srq_table->tree, srq->srqn);
 217        spin_unlock_irq(&srq_table->lock);
 218
 219err_icm:
 220        mlx4_srq_free_icm(dev, srq->srqn);
 221        return err;
 222}
 223EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
 224
 225void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
 226{
 227        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
 228        int err;
 229
 230        err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn);
 231        if (err)
 232                mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn);
 233
 234        spin_lock_irq(&srq_table->lock);
 235        radix_tree_delete(&srq_table->tree, srq->srqn);
 236        spin_unlock_irq(&srq_table->lock);
 237
 238        if (atomic_dec_and_test(&srq->refcount))
 239                complete(&srq->free);
 240        wait_for_completion(&srq->free);
 241
 242        mlx4_srq_free_icm(dev, srq->srqn);
 243}
 244EXPORT_SYMBOL_GPL(mlx4_srq_free);
 245
 246int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark)
 247{
 248        return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark);
 249}
 250EXPORT_SYMBOL_GPL(mlx4_srq_arm);
 251
 252int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark)
 253{
 254        struct mlx4_cmd_mailbox *mailbox;
 255        struct mlx4_srq_context *srq_context;
 256        int err;
 257
 258        mailbox = mlx4_alloc_cmd_mailbox(dev);
 259        if (IS_ERR(mailbox))
 260                return PTR_ERR(mailbox);
 261
 262        srq_context = mailbox->buf;
 263
 264        err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn);
 265        if (err)
 266                goto err_out;
 267        *limit_watermark = be16_to_cpu(srq_context->limit_watermark);
 268
 269err_out:
 270        mlx4_free_cmd_mailbox(dev, mailbox);
 271        return err;
 272}
 273EXPORT_SYMBOL_GPL(mlx4_srq_query);
 274
 275int mlx4_init_srq_table(struct mlx4_dev *dev)
 276{
 277        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
 278        int err;
 279
 280        spin_lock_init(&srq_table->lock);
 281        INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
 282        if (mlx4_is_slave(dev))
 283                return 0;
 284
 285        err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
 286                               dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
 287        if (err)
 288                return err;
 289
 290        return 0;
 291}
 292
 293void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
 294{
 295        if (mlx4_is_slave(dev))
 296                return;
 297        mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
 298}
 299
 300struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
 301{
 302        struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
 303        struct mlx4_srq *srq;
 304        unsigned long flags;
 305
 306        spin_lock_irqsave(&srq_table->lock, flags);
 307        srq = radix_tree_lookup(&srq_table->tree,
 308                                srqn & (dev->caps.num_srqs - 1));
 309        spin_unlock_irqrestore(&srq_table->lock, flags);
 310
 311        return srq;
 312}
 313EXPORT_SYMBOL_GPL(mlx4_srq_lookup);
 314