linux/drivers/net/ethernet/mellanox/mlx5/core/wq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/mlx5/driver.h>
  34#include "wq.h"
  35#include "mlx5_core.h"
  36
  37int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
  38                       void *wqc, struct mlx5_wq_cyc *wq,
  39                       struct mlx5_wq_ctrl *wq_ctrl)
  40{
  41        u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
  42        u8 log_wq_sz     = MLX5_GET(wq, wqc, log_wq_sz);
  43        struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
  44        int err;
  45
  46        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
  47        if (err) {
  48                mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
  49                return err;
  50        }
  51
  52        wq->db  = wq_ctrl->db.db;
  53
  54        err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
  55                                       &wq_ctrl->buf, param->buf_numa_node);
  56        if (err) {
  57                mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
  58                goto err_db_free;
  59        }
  60
  61        mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
  62        wq->sz = mlx5_wq_cyc_get_size(wq);
  63
  64        wq_ctrl->mdev = mdev;
  65
  66        return 0;
  67
  68err_db_free:
  69        mlx5_db_free(mdev, &wq_ctrl->db);
  70
  71        return err;
  72}
  73
  74void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
  75{
  76        size_t len;
  77        void *wqe;
  78
  79        if (!net_ratelimit())
  80                return;
  81
  82        nstrides = max_t(u8, nstrides, 1);
  83
  84        len = nstrides << wq->fbc.log_stride;
  85        wqe = mlx5_wq_cyc_get_wqe(wq, ix);
  86
  87        pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %zu\n",
  88                mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len);
  89        print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
  90}
  91
  92void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq)
  93{
  94        wq->wqe_ctr = 0;
  95        wq->cur_sz = 0;
  96        mlx5_wq_cyc_update_db_record(wq);
  97}
  98
  99int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
 100                      void *qpc, struct mlx5_wq_qp *wq,
 101                      struct mlx5_wq_ctrl *wq_ctrl)
 102{
 103        u8 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
 104        u8 log_rq_sz     = MLX5_GET(qpc, qpc, log_rq_size);
 105        u8 log_sq_stride = ilog2(MLX5_SEND_WQE_BB);
 106        u8 log_sq_sz     = MLX5_GET(qpc, qpc, log_sq_size);
 107
 108        u32 rq_byte_size;
 109        int err;
 110
 111
 112
 113        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 114        if (err) {
 115                mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
 116                return err;
 117        }
 118
 119        err = mlx5_frag_buf_alloc_node(mdev,
 120                                       wq_get_byte_sz(log_rq_sz, log_rq_stride) +
 121                                       wq_get_byte_sz(log_sq_sz, log_sq_stride),
 122                                       &wq_ctrl->buf, param->buf_numa_node);
 123        if (err) {
 124                mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
 125                goto err_db_free;
 126        }
 127
 128        mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc);
 129
 130        rq_byte_size = wq_get_byte_sz(log_rq_sz, log_rq_stride);
 131
 132        if (rq_byte_size < PAGE_SIZE) {
 133                /* SQ starts within the same page of the RQ */
 134                u16 sq_strides_offset = rq_byte_size / MLX5_SEND_WQE_BB;
 135
 136                mlx5_init_fbc_offset(wq_ctrl->buf.frags,
 137                                     log_sq_stride, log_sq_sz, sq_strides_offset,
 138                                     &wq->sq.fbc);
 139        } else {
 140                u16 rq_npages = rq_byte_size >> PAGE_SHIFT;
 141
 142                mlx5_init_fbc(wq_ctrl->buf.frags + rq_npages,
 143                              log_sq_stride, log_sq_sz, &wq->sq.fbc);
 144        }
 145
 146        wq->rq.db  = &wq_ctrl->db.db[MLX5_RCV_DBR];
 147        wq->sq.db  = &wq_ctrl->db.db[MLX5_SND_DBR];
 148
 149        wq_ctrl->mdev = mdev;
 150
 151        return 0;
 152
 153err_db_free:
 154        mlx5_db_free(mdev, &wq_ctrl->db);
 155
 156        return err;
 157}
 158
 159int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
 160                     void *cqc, struct mlx5_cqwq *wq,
 161                     struct mlx5_wq_ctrl *wq_ctrl)
 162{
 163        /* CQE_STRIDE_128 and CQE_STRIDE_128_PAD both mean 128B stride */
 164        u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7;
 165        u8 log_wq_sz     = MLX5_GET(cqc, cqc, log_cq_size);
 166        int err;
 167
 168        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 169        if (err) {
 170                mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
 171                return err;
 172        }
 173
 174        wq->db  = wq_ctrl->db.db;
 175
 176        err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
 177                                       &wq_ctrl->buf,
 178                                       param->buf_numa_node);
 179        if (err) {
 180                mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
 181                               err);
 182                goto err_db_free;
 183        }
 184
 185        mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc);
 186
 187        wq_ctrl->mdev = mdev;
 188
 189        return 0;
 190
 191err_db_free:
 192        mlx5_db_free(mdev, &wq_ctrl->db);
 193
 194        return err;
 195}
 196
 197static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq)
 198{
 199        struct mlx5_wqe_srq_next_seg *next_seg;
 200        int i;
 201
 202        for (i = 0; i < wq->fbc.sz_m1; i++) {
 203                next_seg = mlx5_wq_ll_get_wqe(wq, i);
 204                next_seg->next_wqe_index = cpu_to_be16(i + 1);
 205        }
 206        next_seg = mlx5_wq_ll_get_wqe(wq, i);
 207        wq->tail_next = &next_seg->next_wqe_index;
 208}
 209
 210int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
 211                      void *wqc, struct mlx5_wq_ll *wq,
 212                      struct mlx5_wq_ctrl *wq_ctrl)
 213{
 214        u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
 215        u8 log_wq_sz     = MLX5_GET(wq, wqc, log_wq_sz);
 216        struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
 217        int err;
 218
 219        err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 220        if (err) {
 221                mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
 222                return err;
 223        }
 224
 225        wq->db  = wq_ctrl->db.db;
 226
 227        err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
 228                                       &wq_ctrl->buf, param->buf_numa_node);
 229        if (err) {
 230                mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
 231                goto err_db_free;
 232        }
 233
 234        mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
 235
 236        mlx5_wq_ll_init_list(wq);
 237        wq_ctrl->mdev = mdev;
 238
 239        return 0;
 240
 241err_db_free:
 242        mlx5_db_free(mdev, &wq_ctrl->db);
 243
 244        return err;
 245}
 246
 247void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq)
 248{
 249        wq->head = 0;
 250        wq->wqe_ctr = 0;
 251        wq->cur_sz = 0;
 252        mlx5_wq_ll_init_list(wq);
 253        mlx5_wq_ll_update_db_record(wq);
 254}
 255
 256void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
 257{
 258        mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
 259        mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
 260}
 261
 262