linux/drivers/net/ethernet/mellanox/mlx5/core/eq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/interrupt.h>
  34#include <linux/module.h>
  35#include <linux/mlx5/driver.h>
  36#include <linux/mlx5/cmd.h>
  37#include "mlx5_core.h"
  38
  39enum {
  40        MLX5_EQE_SIZE           = sizeof(struct mlx5_eqe),
  41        MLX5_EQE_OWNER_INIT_VAL = 0x1,
  42};
  43
  44enum {
  45        MLX5_EQ_STATE_ARMED             = 0x9,
  46        MLX5_EQ_STATE_FIRED             = 0xa,
  47        MLX5_EQ_STATE_ALWAYS_ARMED      = 0xb,
  48};
  49
  50enum {
  51        MLX5_NUM_SPARE_EQE      = 0x80,
  52        MLX5_NUM_ASYNC_EQE      = 0x100,
  53        MLX5_NUM_CMD_EQE        = 32,
  54};
  55
  56enum {
  57        MLX5_EQ_DOORBEL_OFFSET  = 0x40,
  58};
  59
  60#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)           | \
  61                               (1ull << MLX5_EVENT_TYPE_COMM_EST)           | \
  62                               (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)         | \
  63                               (1ull << MLX5_EVENT_TYPE_CQ_ERROR)           | \
  64                               (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)     | \
  65                               (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
  66                               (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  67                               (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
  68                               (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)        | \
  69                               (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
  70                               (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)       | \
  71                               (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
  72
  73struct map_eq_in {
  74        u64     mask;
  75        u32     reserved;
  76        u32     unmap_eqn;
  77};
  78
  79struct cre_des_eq {
  80        u8      reserved[15];
  81        u8      eqn;
  82};
  83
  84static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
  85{
  86        struct mlx5_destroy_eq_mbox_in in;
  87        struct mlx5_destroy_eq_mbox_out out;
  88        int err;
  89
  90        memset(&in, 0, sizeof(in));
  91        memset(&out, 0, sizeof(out));
  92        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ);
  93        in.eqn = eqn;
  94        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
  95        if (!err)
  96                goto ex;
  97
  98        if (out.hdr.status)
  99                err = mlx5_cmd_status_to_err(&out.hdr);
 100
 101ex:
 102        return err;
 103}
 104
 105static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
 106{
 107        return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
 108}
 109
 110static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
 111{
 112        struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
 113
 114        return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
 115}
 116
 117static const char *eqe_type_str(u8 type)
 118{
 119        switch (type) {
 120        case MLX5_EVENT_TYPE_COMP:
 121                return "MLX5_EVENT_TYPE_COMP";
 122        case MLX5_EVENT_TYPE_PATH_MIG:
 123                return "MLX5_EVENT_TYPE_PATH_MIG";
 124        case MLX5_EVENT_TYPE_COMM_EST:
 125                return "MLX5_EVENT_TYPE_COMM_EST";
 126        case MLX5_EVENT_TYPE_SQ_DRAINED:
 127                return "MLX5_EVENT_TYPE_SQ_DRAINED";
 128        case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 129                return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
 130        case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 131                return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
 132        case MLX5_EVENT_TYPE_CQ_ERROR:
 133                return "MLX5_EVENT_TYPE_CQ_ERROR";
 134        case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 135                return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
 136        case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 137                return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
 138        case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 139                return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
 140        case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 141                return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
 142        case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 143                return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
 144        case MLX5_EVENT_TYPE_INTERNAL_ERROR:
 145                return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
 146        case MLX5_EVENT_TYPE_PORT_CHANGE:
 147                return "MLX5_EVENT_TYPE_PORT_CHANGE";
 148        case MLX5_EVENT_TYPE_GPIO_EVENT:
 149                return "MLX5_EVENT_TYPE_GPIO_EVENT";
 150        case MLX5_EVENT_TYPE_REMOTE_CONFIG:
 151                return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
 152        case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
 153                return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
 154        case MLX5_EVENT_TYPE_STALL_EVENT:
 155                return "MLX5_EVENT_TYPE_STALL_EVENT";
 156        case MLX5_EVENT_TYPE_CMD:
 157                return "MLX5_EVENT_TYPE_CMD";
 158        case MLX5_EVENT_TYPE_PAGE_REQUEST:
 159                return "MLX5_EVENT_TYPE_PAGE_REQUEST";
 160        default:
 161                return "Unrecognized event";
 162        }
 163}
 164
 165static enum mlx5_dev_event port_subtype_event(u8 subtype)
 166{
 167        switch (subtype) {
 168        case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
 169                return MLX5_DEV_EVENT_PORT_DOWN;
 170        case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
 171                return MLX5_DEV_EVENT_PORT_UP;
 172        case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
 173                return MLX5_DEV_EVENT_PORT_INITIALIZED;
 174        case MLX5_PORT_CHANGE_SUBTYPE_LID:
 175                return MLX5_DEV_EVENT_LID_CHANGE;
 176        case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
 177                return MLX5_DEV_EVENT_PKEY_CHANGE;
 178        case MLX5_PORT_CHANGE_SUBTYPE_GUID:
 179                return MLX5_DEV_EVENT_GUID_CHANGE;
 180        case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
 181                return MLX5_DEV_EVENT_CLIENT_REREG;
 182        }
 183        return -1;
 184}
 185
 186static void eq_update_ci(struct mlx5_eq *eq, int arm)
 187{
 188        __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
 189        u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
 190        __raw_writel((__force u32) cpu_to_be32(val), addr);
 191        /* We still want ordering, just not swabbing, so add a barrier */
 192        mb();
 193}
 194
 195static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 196{
 197        struct mlx5_eqe *eqe;
 198        int eqes_found = 0;
 199        int set_ci = 0;
 200        u32 cqn;
 201        u32 srqn;
 202        u8 port;
 203
 204        while ((eqe = next_eqe_sw(eq))) {
 205                /*
 206                 * Make sure we read EQ entry contents after we've
 207                 * checked the ownership bit.
 208                 */
 209                rmb();
 210
 211                mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type));
 212                switch (eqe->type) {
 213                case MLX5_EVENT_TYPE_COMP:
 214                        cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
 215                        mlx5_cq_completion(dev, cqn);
 216                        break;
 217
 218                case MLX5_EVENT_TYPE_PATH_MIG:
 219                case MLX5_EVENT_TYPE_COMM_EST:
 220                case MLX5_EVENT_TYPE_SQ_DRAINED:
 221                case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 222                case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 223                case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 224                case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 225                case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 226                        mlx5_core_dbg(dev, "event %s(%d) arrived\n",
 227                                      eqe_type_str(eqe->type), eqe->type);
 228                        mlx5_qp_event(dev, be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff,
 229                                      eqe->type);
 230                        break;
 231
 232                case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 233                case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 234                        srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
 235                        mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
 236                                      eqe_type_str(eqe->type), eqe->type, srqn);
 237                        mlx5_srq_event(dev, srqn, eqe->type);
 238                        break;
 239
 240                case MLX5_EVENT_TYPE_CMD:
 241                        mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
 242                        break;
 243
 244                case MLX5_EVENT_TYPE_PORT_CHANGE:
 245                        port = (eqe->data.port.port >> 4) & 0xf;
 246                        switch (eqe->sub_type) {
 247                        case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
 248                        case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
 249                        case MLX5_PORT_CHANGE_SUBTYPE_LID:
 250                        case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
 251                        case MLX5_PORT_CHANGE_SUBTYPE_GUID:
 252                        case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
 253                        case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
 254                                dev->event(dev, port_subtype_event(eqe->sub_type), &port);
 255                                break;
 256                        default:
 257                                mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
 258                                               port, eqe->sub_type);
 259                        }
 260                        break;
 261                case MLX5_EVENT_TYPE_CQ_ERROR:
 262                        cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
 263                        mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
 264                                       cqn, eqe->data.cq_err.syndrome);
 265                        mlx5_cq_event(dev, cqn, eqe->type);
 266                        break;
 267
 268                case MLX5_EVENT_TYPE_PAGE_REQUEST:
 269                        {
 270                                u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
 271                                s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
 272
 273                                mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
 274                                mlx5_core_req_pages_handler(dev, func_id, npages);
 275                        }
 276                        break;
 277
 278
 279                default:
 280                        mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn);
 281                        break;
 282                }
 283
 284                ++eq->cons_index;
 285                eqes_found = 1;
 286                ++set_ci;
 287
 288                /* The HCA will think the queue has overflowed if we
 289                 * don't tell it we've been processing events.  We
 290                 * create our EQs with MLX5_NUM_SPARE_EQE extra
 291                 * entries, so we must update our consumer index at
 292                 * least that often.
 293                 */
 294                if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
 295                        eq_update_ci(eq, 0);
 296                        set_ci = 0;
 297                }
 298        }
 299
 300        eq_update_ci(eq, 1);
 301
 302        return eqes_found;
 303}
 304
 305static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
 306{
 307        struct mlx5_eq *eq = eq_ptr;
 308        struct mlx5_core_dev *dev = eq->dev;
 309
 310        mlx5_eq_int(dev, eq);
 311
 312        /* MSI-X vectors always belong to us */
 313        return IRQ_HANDLED;
 314}
 315
 316static void init_eq_buf(struct mlx5_eq *eq)
 317{
 318        struct mlx5_eqe *eqe;
 319        int i;
 320
 321        for (i = 0; i < eq->nent; i++) {
 322                eqe = get_eqe(eq, i);
 323                eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
 324        }
 325}
 326
 327int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 328                       int nent, u64 mask, const char *name, struct mlx5_uar *uar)
 329{
 330        struct mlx5_eq_table *table = &dev->priv.eq_table;
 331        struct mlx5_create_eq_mbox_in *in;
 332        struct mlx5_create_eq_mbox_out out;
 333        int err;
 334        int inlen;
 335
 336        eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
 337        err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
 338                             &eq->buf);
 339        if (err)
 340                return err;
 341
 342        init_eq_buf(eq);
 343
 344        inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
 345        in = mlx5_vzalloc(inlen);
 346        if (!in) {
 347                err = -ENOMEM;
 348                goto err_buf;
 349        }
 350        memset(&out, 0, sizeof(out));
 351
 352        mlx5_fill_page_array(&eq->buf, in->pas);
 353
 354        in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
 355        in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
 356        in->ctx.intr = vecidx;
 357        in->ctx.log_page_size = PAGE_SHIFT - 12;
 358        in->events_mask = cpu_to_be64(mask);
 359
 360        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
 361        if (err)
 362                goto err_in;
 363
 364        if (out.hdr.status) {
 365                err = mlx5_cmd_status_to_err(&out.hdr);
 366                goto err_in;
 367        }
 368
 369        snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
 370                 name, pci_name(dev->pdev));
 371        eq->eqn = out.eq_number;
 372        err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
 373                          eq->name, eq);
 374        if (err)
 375                goto err_eq;
 376
 377        eq->irqn = vecidx;
 378        eq->dev = dev;
 379        eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
 380
 381        err = mlx5_debug_eq_add(dev, eq);
 382        if (err)
 383                goto err_irq;
 384
 385        /* EQs are created in ARMED state
 386         */
 387        eq_update_ci(eq, 1);
 388
 389        mlx5_vfree(in);
 390        return 0;
 391
 392err_irq:
 393        free_irq(table->msix_arr[vecidx].vector, eq);
 394
 395err_eq:
 396        mlx5_cmd_destroy_eq(dev, eq->eqn);
 397
 398err_in:
 399        mlx5_vfree(in);
 400
 401err_buf:
 402        mlx5_buf_free(dev, &eq->buf);
 403        return err;
 404}
 405EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
 406
 407int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 408{
 409        struct mlx5_eq_table *table = &dev->priv.eq_table;
 410        int err;
 411
 412        mlx5_debug_eq_remove(dev, eq);
 413        free_irq(table->msix_arr[eq->irqn].vector, eq);
 414        err = mlx5_cmd_destroy_eq(dev, eq->eqn);
 415        if (err)
 416                mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
 417                               eq->eqn);
 418        mlx5_buf_free(dev, &eq->buf);
 419
 420        return err;
 421}
 422EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
 423
 424int mlx5_eq_init(struct mlx5_core_dev *dev)
 425{
 426        int err;
 427
 428        spin_lock_init(&dev->priv.eq_table.lock);
 429
 430        err = mlx5_eq_debugfs_init(dev);
 431
 432        return err;
 433}
 434
 435
 436void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
 437{
 438        mlx5_eq_debugfs_cleanup(dev);
 439}
 440
 441int mlx5_start_eqs(struct mlx5_core_dev *dev)
 442{
 443        struct mlx5_eq_table *table = &dev->priv.eq_table;
 444        int err;
 445
 446        err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
 447                                 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
 448                                 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
 449        if (err) {
 450                mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
 451                return err;
 452        }
 453
 454        mlx5_cmd_use_events(dev);
 455
 456        err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
 457                                 MLX5_NUM_ASYNC_EQE, MLX5_ASYNC_EVENT_MASK,
 458                                 "mlx5_async_eq", &dev->priv.uuari.uars[0]);
 459        if (err) {
 460                mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
 461                goto err1;
 462        }
 463
 464        err = mlx5_create_map_eq(dev, &table->pages_eq,
 465                                 MLX5_EQ_VEC_PAGES,
 466                                 dev->caps.max_vf + 1,
 467                                 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
 468                                 &dev->priv.uuari.uars[0]);
 469        if (err) {
 470                mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
 471                goto err2;
 472        }
 473
 474        return err;
 475
 476err2:
 477        mlx5_destroy_unmap_eq(dev, &table->async_eq);
 478
 479err1:
 480        mlx5_cmd_use_polling(dev);
 481        mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
 482        return err;
 483}
 484
 485int mlx5_stop_eqs(struct mlx5_core_dev *dev)
 486{
 487        struct mlx5_eq_table *table = &dev->priv.eq_table;
 488        int err;
 489
 490        err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
 491        if (err)
 492                return err;
 493
 494        mlx5_destroy_unmap_eq(dev, &table->async_eq);
 495        mlx5_cmd_use_polling(dev);
 496
 497        err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
 498        if (err)
 499                mlx5_cmd_use_events(dev);
 500
 501        return err;
 502}
 503
 504int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
 505                       struct mlx5_query_eq_mbox_out *out, int outlen)
 506{
 507        struct mlx5_query_eq_mbox_in in;
 508        int err;
 509
 510        memset(&in, 0, sizeof(in));
 511        memset(out, 0, outlen);
 512        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
 513        in.eqn = eq->eqn;
 514        err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
 515        if (err)
 516                return err;
 517
 518        if (out->hdr.status)
 519                err = mlx5_cmd_status_to_err(&out->hdr);
 520
 521        return err;
 522}
 523EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
 524