linux/drivers/net/ethernet/mellanox/mlx5/core/eq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#include <linux/interrupt.h>
  34#include <linux/module.h>
  35#include <linux/mlx5/driver.h>
  36#include <linux/mlx5/cmd.h>
  37#ifdef CONFIG_RFS_ACCEL
  38#include <linux/cpu_rmap.h>
  39#endif
  40#include "mlx5_core.h"
  41#include "fpga/core.h"
  42#include "eswitch.h"
  43#include "lib/clock.h"
  44#include "diag/fw_tracer.h"
  45
  46enum {
  47        MLX5_EQE_SIZE           = sizeof(struct mlx5_eqe),
  48        MLX5_EQE_OWNER_INIT_VAL = 0x1,
  49};
  50
  51enum {
  52        MLX5_EQ_STATE_ARMED             = 0x9,
  53        MLX5_EQ_STATE_FIRED             = 0xa,
  54        MLX5_EQ_STATE_ALWAYS_ARMED      = 0xb,
  55};
  56
  57enum {
  58        MLX5_NUM_SPARE_EQE      = 0x80,
  59        MLX5_NUM_ASYNC_EQE      = 0x1000,
  60        MLX5_NUM_CMD_EQE        = 32,
  61        MLX5_NUM_PF_DRAIN       = 64,
  62};
  63
  64enum {
  65        MLX5_EQ_DOORBEL_OFFSET  = 0x40,
  66};
  67
  68#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)           | \
  69                               (1ull << MLX5_EVENT_TYPE_COMM_EST)           | \
  70                               (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)         | \
  71                               (1ull << MLX5_EVENT_TYPE_CQ_ERROR)           | \
  72                               (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)     | \
  73                               (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
  74                               (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  75                               (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
  76                               (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)        | \
  77                               (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
  78                               (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)       | \
  79                               (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
  80
  81struct map_eq_in {
  82        u64     mask;
  83        u32     reserved;
  84        u32     unmap_eqn;
  85};
  86
  87struct cre_des_eq {
  88        u8      reserved[15];
  89        u8      eqn;
  90};
  91
  92static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
  93{
  94        u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
  95        u32 in[MLX5_ST_SZ_DW(destroy_eq_in)]   = {0};
  96
  97        MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
  98        MLX5_SET(destroy_eq_in, in, eq_number, eqn);
  99        return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 100}
 101
 102static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
 103{
 104        return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
 105}
 106
 107static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
 108{
 109        struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
 110
 111        return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
 112}
 113
 114static const char *eqe_type_str(u8 type)
 115{
 116        switch (type) {
 117        case MLX5_EVENT_TYPE_COMP:
 118                return "MLX5_EVENT_TYPE_COMP";
 119        case MLX5_EVENT_TYPE_PATH_MIG:
 120                return "MLX5_EVENT_TYPE_PATH_MIG";
 121        case MLX5_EVENT_TYPE_COMM_EST:
 122                return "MLX5_EVENT_TYPE_COMM_EST";
 123        case MLX5_EVENT_TYPE_SQ_DRAINED:
 124                return "MLX5_EVENT_TYPE_SQ_DRAINED";
 125        case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 126                return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
 127        case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 128                return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
 129        case MLX5_EVENT_TYPE_CQ_ERROR:
 130                return "MLX5_EVENT_TYPE_CQ_ERROR";
 131        case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 132                return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
 133        case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 134                return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
 135        case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 136                return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
 137        case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 138                return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
 139        case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 140                return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
 141        case MLX5_EVENT_TYPE_INTERNAL_ERROR:
 142                return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
 143        case MLX5_EVENT_TYPE_PORT_CHANGE:
 144                return "MLX5_EVENT_TYPE_PORT_CHANGE";
 145        case MLX5_EVENT_TYPE_GPIO_EVENT:
 146                return "MLX5_EVENT_TYPE_GPIO_EVENT";
 147        case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
 148                return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
 149        case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
 150                return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
 151        case MLX5_EVENT_TYPE_REMOTE_CONFIG:
 152                return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
 153        case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
 154                return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
 155        case MLX5_EVENT_TYPE_STALL_EVENT:
 156                return "MLX5_EVENT_TYPE_STALL_EVENT";
 157        case MLX5_EVENT_TYPE_CMD:
 158                return "MLX5_EVENT_TYPE_CMD";
 159        case MLX5_EVENT_TYPE_PAGE_REQUEST:
 160                return "MLX5_EVENT_TYPE_PAGE_REQUEST";
 161        case MLX5_EVENT_TYPE_PAGE_FAULT:
 162                return "MLX5_EVENT_TYPE_PAGE_FAULT";
 163        case MLX5_EVENT_TYPE_PPS_EVENT:
 164                return "MLX5_EVENT_TYPE_PPS_EVENT";
 165        case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
 166                return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
 167        case MLX5_EVENT_TYPE_FPGA_ERROR:
 168                return "MLX5_EVENT_TYPE_FPGA_ERROR";
 169        case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
 170                return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
 171        case MLX5_EVENT_TYPE_GENERAL_EVENT:
 172                return "MLX5_EVENT_TYPE_GENERAL_EVENT";
 173        case MLX5_EVENT_TYPE_DEVICE_TRACER:
 174                return "MLX5_EVENT_TYPE_DEVICE_TRACER";
 175        default:
 176                return "Unrecognized event";
 177        }
 178}
 179
 180static enum mlx5_dev_event port_subtype_event(u8 subtype)
 181{
 182        switch (subtype) {
 183        case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
 184                return MLX5_DEV_EVENT_PORT_DOWN;
 185        case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
 186                return MLX5_DEV_EVENT_PORT_UP;
 187        case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
 188                return MLX5_DEV_EVENT_PORT_INITIALIZED;
 189        case MLX5_PORT_CHANGE_SUBTYPE_LID:
 190                return MLX5_DEV_EVENT_LID_CHANGE;
 191        case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
 192                return MLX5_DEV_EVENT_PKEY_CHANGE;
 193        case MLX5_PORT_CHANGE_SUBTYPE_GUID:
 194                return MLX5_DEV_EVENT_GUID_CHANGE;
 195        case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
 196                return MLX5_DEV_EVENT_CLIENT_REREG;
 197        }
 198        return -1;
 199}
 200
 201static void eq_update_ci(struct mlx5_eq *eq, int arm)
 202{
 203        __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
 204        u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
 205
 206        __raw_writel((__force u32)cpu_to_be32(val), addr);
 207        /* We still want ordering, just not swabbing, so add a barrier */
 208        mb();
 209}
 210
 211#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 212static void eqe_pf_action(struct work_struct *work)
 213{
 214        struct mlx5_pagefault *pfault = container_of(work,
 215                                                     struct mlx5_pagefault,
 216                                                     work);
 217        struct mlx5_eq *eq = pfault->eq;
 218
 219        mlx5_core_page_fault(eq->dev, pfault);
 220        mempool_free(pfault, eq->pf_ctx.pool);
 221}
 222
 223static void eq_pf_process(struct mlx5_eq *eq)
 224{
 225        struct mlx5_core_dev *dev = eq->dev;
 226        struct mlx5_eqe_page_fault *pf_eqe;
 227        struct mlx5_pagefault *pfault;
 228        struct mlx5_eqe *eqe;
 229        int set_ci = 0;
 230
 231        while ((eqe = next_eqe_sw(eq))) {
 232                pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
 233                if (!pfault) {
 234                        schedule_work(&eq->pf_ctx.work);
 235                        break;
 236                }
 237
 238                dma_rmb();
 239                pf_eqe = &eqe->data.page_fault;
 240                pfault->event_subtype = eqe->sub_type;
 241                pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
 242
 243                mlx5_core_dbg(dev,
 244                              "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
 245                              eqe->sub_type, pfault->bytes_committed);
 246
 247                switch (eqe->sub_type) {
 248                case MLX5_PFAULT_SUBTYPE_RDMA:
 249                        /* RDMA based event */
 250                        pfault->type =
 251                                be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
 252                        pfault->token =
 253                                be32_to_cpu(pf_eqe->rdma.pftype_token) &
 254                                MLX5_24BIT_MASK;
 255                        pfault->rdma.r_key =
 256                                be32_to_cpu(pf_eqe->rdma.r_key);
 257                        pfault->rdma.packet_size =
 258                                be16_to_cpu(pf_eqe->rdma.packet_length);
 259                        pfault->rdma.rdma_op_len =
 260                                be32_to_cpu(pf_eqe->rdma.rdma_op_len);
 261                        pfault->rdma.rdma_va =
 262                                be64_to_cpu(pf_eqe->rdma.rdma_va);
 263                        mlx5_core_dbg(dev,
 264                                      "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
 265                                      pfault->type, pfault->token,
 266                                      pfault->rdma.r_key);
 267                        mlx5_core_dbg(dev,
 268                                      "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
 269                                      pfault->rdma.rdma_op_len,
 270                                      pfault->rdma.rdma_va);
 271                        break;
 272
 273                case MLX5_PFAULT_SUBTYPE_WQE:
 274                        /* WQE based event */
 275                        pfault->type =
 276                                (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
 277                        pfault->token =
 278                                be32_to_cpu(pf_eqe->wqe.token);
 279                        pfault->wqe.wq_num =
 280                                be32_to_cpu(pf_eqe->wqe.pftype_wq) &
 281                                MLX5_24BIT_MASK;
 282                        pfault->wqe.wqe_index =
 283                                be16_to_cpu(pf_eqe->wqe.wqe_index);
 284                        pfault->wqe.packet_size =
 285                                be16_to_cpu(pf_eqe->wqe.packet_length);
 286                        mlx5_core_dbg(dev,
 287                                      "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
 288                                      pfault->type, pfault->token,
 289                                      pfault->wqe.wq_num,
 290                                      pfault->wqe.wqe_index);
 291                        break;
 292
 293                default:
 294                        mlx5_core_warn(dev,
 295                                       "Unsupported page fault event sub-type: 0x%02hhx\n",
 296                                       eqe->sub_type);
 297                        /* Unsupported page faults should still be
 298                         * resolved by the page fault handler
 299                         */
 300                }
 301
 302                pfault->eq = eq;
 303                INIT_WORK(&pfault->work, eqe_pf_action);
 304                queue_work(eq->pf_ctx.wq, &pfault->work);
 305
 306                ++eq->cons_index;
 307                ++set_ci;
 308
 309                if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
 310                        eq_update_ci(eq, 0);
 311                        set_ci = 0;
 312                }
 313        }
 314
 315        eq_update_ci(eq, 1);
 316}
 317
 318static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
 319{
 320        struct mlx5_eq *eq = eq_ptr;
 321        unsigned long flags;
 322
 323        if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
 324                eq_pf_process(eq);
 325                spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
 326        } else {
 327                schedule_work(&eq->pf_ctx.work);
 328        }
 329
 330        return IRQ_HANDLED;
 331}
 332
 333/* mempool_refill() was proposed but unfortunately wasn't accepted
 334 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
 335 * Chip workaround.
 336 */
 337static void mempool_refill(mempool_t *pool)
 338{
 339        while (pool->curr_nr < pool->min_nr)
 340                mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
 341}
 342
 343static void eq_pf_action(struct work_struct *work)
 344{
 345        struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
 346
 347        mempool_refill(eq->pf_ctx.pool);
 348
 349        spin_lock_irq(&eq->pf_ctx.lock);
 350        eq_pf_process(eq);
 351        spin_unlock_irq(&eq->pf_ctx.lock);
 352}
 353
 354static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
 355{
 356        spin_lock_init(&pf_ctx->lock);
 357        INIT_WORK(&pf_ctx->work, eq_pf_action);
 358
 359        pf_ctx->wq = alloc_ordered_workqueue(name,
 360                                             WQ_MEM_RECLAIM);
 361        if (!pf_ctx->wq)
 362                return -ENOMEM;
 363
 364        pf_ctx->pool = mempool_create_kmalloc_pool
 365                (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
 366        if (!pf_ctx->pool)
 367                goto err_wq;
 368
 369        return 0;
 370err_wq:
 371        destroy_workqueue(pf_ctx->wq);
 372        return -ENOMEM;
 373}
 374
 375int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
 376                                u32 wq_num, u8 type, int error)
 377{
 378        u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
 379        u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)]   = {0};
 380
 381        MLX5_SET(page_fault_resume_in, in, opcode,
 382                 MLX5_CMD_OP_PAGE_FAULT_RESUME);
 383        MLX5_SET(page_fault_resume_in, in, error, !!error);
 384        MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
 385        MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
 386        MLX5_SET(page_fault_resume_in, in, token, token);
 387
 388        return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
 389}
 390EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
 391#endif
 392
 393static void general_event_handler(struct mlx5_core_dev *dev,
 394                                  struct mlx5_eqe *eqe)
 395{
 396        switch (eqe->sub_type) {
 397        case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
 398                if (dev->event)
 399                        dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0);
 400                break;
 401        default:
 402                mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n",
 403                              eqe->sub_type);
 404        }
 405}
 406
 407static void mlx5_temp_warning_event(struct mlx5_core_dev *dev,
 408                                    struct mlx5_eqe *eqe)
 409{
 410        u64 value_lsb;
 411        u64 value_msb;
 412
 413        value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
 414        value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
 415
 416        mlx5_core_warn(dev,
 417                       "High temperature on sensors with bit set %llx %llx",
 418                       value_msb, value_lsb);
 419}
 420
 421/* caller must eventually call mlx5_cq_put on the returned cq */
 422static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
 423{
 424        struct mlx5_cq_table *table = &eq->cq_table;
 425        struct mlx5_core_cq *cq = NULL;
 426
 427        spin_lock(&table->lock);
 428        cq = radix_tree_lookup(&table->tree, cqn);
 429        if (likely(cq))
 430                mlx5_cq_hold(cq);
 431        spin_unlock(&table->lock);
 432
 433        return cq;
 434}
 435
 436static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
 437{
 438        struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
 439
 440        if (unlikely(!cq)) {
 441                mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
 442                return;
 443        }
 444
 445        ++cq->arm_sn;
 446
 447        cq->comp(cq);
 448
 449        mlx5_cq_put(cq);
 450}
 451
 452static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
 453{
 454        struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
 455
 456        if (unlikely(!cq)) {
 457                mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
 458                return;
 459        }
 460
 461        cq->event(cq, event_type);
 462
 463        mlx5_cq_put(cq);
 464}
 465
 466static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
 467{
 468        struct mlx5_eq *eq = eq_ptr;
 469        struct mlx5_core_dev *dev = eq->dev;
 470        struct mlx5_eqe *eqe;
 471        int set_ci = 0;
 472        u32 cqn = -1;
 473        u32 rsn;
 474        u8 port;
 475
 476        while ((eqe = next_eqe_sw(eq))) {
 477                /*
 478                 * Make sure we read EQ entry contents after we've
 479                 * checked the ownership bit.
 480                 */
 481                dma_rmb();
 482
 483                mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
 484                              eq->eqn, eqe_type_str(eqe->type));
 485                switch (eqe->type) {
 486                case MLX5_EVENT_TYPE_COMP:
 487                        cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
 488                        mlx5_eq_cq_completion(eq, cqn);
 489                        break;
 490                case MLX5_EVENT_TYPE_DCT_DRAINED:
 491                        rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
 492                        rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
 493                        mlx5_rsc_event(dev, rsn, eqe->type);
 494                        break;
 495                case MLX5_EVENT_TYPE_PATH_MIG:
 496                case MLX5_EVENT_TYPE_COMM_EST:
 497                case MLX5_EVENT_TYPE_SQ_DRAINED:
 498                case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
 499                case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
 500                case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
 501                case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 502                case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
 503                        rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
 504                        rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
 505                        mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
 506                                      eqe_type_str(eqe->type), eqe->type, rsn);
 507                        mlx5_rsc_event(dev, rsn, eqe->type);
 508                        break;
 509
 510                case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
 511                case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
 512                        rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
 513                        mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
 514                                      eqe_type_str(eqe->type), eqe->type, rsn);
 515                        mlx5_srq_event(dev, rsn, eqe->type);
 516                        break;
 517
 518                case MLX5_EVENT_TYPE_CMD:
 519                        mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
 520                        break;
 521
 522                case MLX5_EVENT_TYPE_PORT_CHANGE:
 523                        port = (eqe->data.port.port >> 4) & 0xf;
 524                        switch (eqe->sub_type) {
 525                        case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
 526                        case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
 527                        case MLX5_PORT_CHANGE_SUBTYPE_LID:
 528                        case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
 529                        case MLX5_PORT_CHANGE_SUBTYPE_GUID:
 530                        case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
 531                        case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
 532                                if (dev->event)
 533                                        dev->event(dev, port_subtype_event(eqe->sub_type),
 534                                                   (unsigned long)port);
 535                                break;
 536                        default:
 537                                mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
 538                                               port, eqe->sub_type);
 539                        }
 540                        break;
 541                case MLX5_EVENT_TYPE_CQ_ERROR:
 542                        cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
 543                        mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
 544                                       cqn, eqe->data.cq_err.syndrome);
 545                        mlx5_eq_cq_event(eq, cqn, eqe->type);
 546                        break;
 547
 548                case MLX5_EVENT_TYPE_PAGE_REQUEST:
 549                        {
 550                                u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
 551                                s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
 552
 553                                mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
 554                                              func_id, npages);
 555                                mlx5_core_req_pages_handler(dev, func_id, npages);
 556                        }
 557                        break;
 558
 559                case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
 560                        mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
 561                        break;
 562
 563                case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
 564                        mlx5_port_module_event(dev, eqe);
 565                        break;
 566
 567                case MLX5_EVENT_TYPE_PPS_EVENT:
 568                        mlx5_pps_event(dev, eqe);
 569                        break;
 570
 571                case MLX5_EVENT_TYPE_FPGA_ERROR:
 572                case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
 573                        mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
 574                        break;
 575
 576                case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
 577                        mlx5_temp_warning_event(dev, eqe);
 578                        break;
 579
 580                case MLX5_EVENT_TYPE_GENERAL_EVENT:
 581                        general_event_handler(dev, eqe);
 582                        break;
 583
 584                case MLX5_EVENT_TYPE_DEVICE_TRACER:
 585                        mlx5_fw_tracer_event(dev, eqe);
 586                        break;
 587
 588                default:
 589                        mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
 590                                       eqe->type, eq->eqn);
 591                        break;
 592                }
 593
 594                ++eq->cons_index;
 595                ++set_ci;
 596
 597                /* The HCA will think the queue has overflowed if we
 598                 * don't tell it we've been processing events.  We
 599                 * create our EQs with MLX5_NUM_SPARE_EQE extra
 600                 * entries, so we must update our consumer index at
 601                 * least that often.
 602                 */
 603                if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
 604                        eq_update_ci(eq, 0);
 605                        set_ci = 0;
 606                }
 607        }
 608
 609        eq_update_ci(eq, 1);
 610
 611        if (cqn != -1)
 612                tasklet_schedule(&eq->tasklet_ctx.task);
 613
 614        return IRQ_HANDLED;
 615}
 616
 617/* Some architectures don't latch interrupts when they are disabled, so using
 618 * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
 619 * avoid losing them.  It is not recommended to use it, unless this is the last
 620 * resort.
 621 */
 622u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
 623{
 624        u32 count_eqe;
 625
 626        disable_irq(eq->irqn);
 627        count_eqe = eq->cons_index;
 628        mlx5_eq_int(eq->irqn, eq);
 629        count_eqe = eq->cons_index - count_eqe;
 630        enable_irq(eq->irqn);
 631
 632        return count_eqe;
 633}
 634
 635static void init_eq_buf(struct mlx5_eq *eq)
 636{
 637        struct mlx5_eqe *eqe;
 638        int i;
 639
 640        for (i = 0; i < eq->nent; i++) {
 641                eqe = get_eqe(eq, i);
 642                eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
 643        }
 644}
 645
 646int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 647                       int nent, u64 mask, const char *name,
 648                       enum mlx5_eq_type type)
 649{
 650        struct mlx5_cq_table *cq_table = &eq->cq_table;
 651        u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
 652        struct mlx5_priv *priv = &dev->priv;
 653        irq_handler_t handler;
 654        __be64 *pas;
 655        void *eqc;
 656        int inlen;
 657        u32 *in;
 658        int err;
 659
 660        /* Init CQ table */
 661        memset(cq_table, 0, sizeof(*cq_table));
 662        spin_lock_init(&cq_table->lock);
 663        INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
 664
 665        eq->type = type;
 666        eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
 667        eq->cons_index = 0;
 668        err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
 669        if (err)
 670                return err;
 671
 672#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 673        if (type == MLX5_EQ_TYPE_PF)
 674                handler = mlx5_eq_pf_int;
 675        else
 676#endif
 677                handler = mlx5_eq_int;
 678
 679        init_eq_buf(eq);
 680
 681        inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
 682                MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
 683
 684        in = kvzalloc(inlen, GFP_KERNEL);
 685        if (!in) {
 686                err = -ENOMEM;
 687                goto err_buf;
 688        }
 689
 690        pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
 691        mlx5_fill_page_array(&eq->buf, pas);
 692
 693        MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
 694        MLX5_SET64(create_eq_in, in, event_bitmask, mask);
 695
 696        eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
 697        MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
 698        MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
 699        MLX5_SET(eqc, eqc, intr, vecidx);
 700        MLX5_SET(eqc, eqc, log_page_size,
 701                 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 702
 703        err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 704        if (err)
 705                goto err_in;
 706
 707        snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
 708                 name, pci_name(dev->pdev));
 709
 710        eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
 711        eq->irqn = pci_irq_vector(dev->pdev, vecidx);
 712        eq->dev = dev;
 713        eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
 714        err = request_irq(eq->irqn, handler, 0,
 715                          priv->irq_info[vecidx].name, eq);
 716        if (err)
 717                goto err_eq;
 718
 719        err = mlx5_debug_eq_add(dev, eq);
 720        if (err)
 721                goto err_irq;
 722
 723#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 724        if (type == MLX5_EQ_TYPE_PF) {
 725                err = init_pf_ctx(&eq->pf_ctx, name);
 726                if (err)
 727                        goto err_irq;
 728        } else
 729#endif
 730        {
 731                INIT_LIST_HEAD(&eq->tasklet_ctx.list);
 732                INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
 733                spin_lock_init(&eq->tasklet_ctx.lock);
 734                tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
 735                             (unsigned long)&eq->tasklet_ctx);
 736        }
 737
 738        /* EQs are created in ARMED state
 739         */
 740        eq_update_ci(eq, 1);
 741
 742        kvfree(in);
 743        return 0;
 744
 745err_irq:
 746        free_irq(eq->irqn, eq);
 747
 748err_eq:
 749        mlx5_cmd_destroy_eq(dev, eq->eqn);
 750
 751err_in:
 752        kvfree(in);
 753
 754err_buf:
 755        mlx5_buf_free(dev, &eq->buf);
 756        return err;
 757}
 758
 759int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 760{
 761        int err;
 762
 763        mlx5_debug_eq_remove(dev, eq);
 764        free_irq(eq->irqn, eq);
 765        err = mlx5_cmd_destroy_eq(dev, eq->eqn);
 766        if (err)
 767                mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
 768                               eq->eqn);
 769        synchronize_irq(eq->irqn);
 770
 771        if (eq->type == MLX5_EQ_TYPE_COMP) {
 772                tasklet_disable(&eq->tasklet_ctx.task);
 773#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 774        } else if (eq->type == MLX5_EQ_TYPE_PF) {
 775                cancel_work_sync(&eq->pf_ctx.work);
 776                destroy_workqueue(eq->pf_ctx.wq);
 777                mempool_destroy(eq->pf_ctx.pool);
 778#endif
 779        }
 780        mlx5_buf_free(dev, &eq->buf);
 781
 782        return err;
 783}
 784
 785int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
 786{
 787        struct mlx5_cq_table *table = &eq->cq_table;
 788        int err;
 789
 790        spin_lock_irq(&table->lock);
 791        err = radix_tree_insert(&table->tree, cq->cqn, cq);
 792        spin_unlock_irq(&table->lock);
 793
 794        return err;
 795}
 796
 797int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
 798{
 799        struct mlx5_cq_table *table = &eq->cq_table;
 800        struct mlx5_core_cq *tmp;
 801
 802        spin_lock_irq(&table->lock);
 803        tmp = radix_tree_delete(&table->tree, cq->cqn);
 804        spin_unlock_irq(&table->lock);
 805
 806        if (!tmp) {
 807                mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
 808                return -ENOENT;
 809        }
 810
 811        if (tmp != cq) {
 812                mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
 813                return -EINVAL;
 814        }
 815
 816        return 0;
 817}
 818
 819int mlx5_eq_init(struct mlx5_core_dev *dev)
 820{
 821        int err;
 822
 823        spin_lock_init(&dev->priv.eq_table.lock);
 824
 825        err = mlx5_eq_debugfs_init(dev);
 826
 827        return err;
 828}
 829
 830void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
 831{
 832        mlx5_eq_debugfs_cleanup(dev);
 833}
 834
 835int mlx5_start_eqs(struct mlx5_core_dev *dev)
 836{
 837        struct mlx5_eq_table *table = &dev->priv.eq_table;
 838        u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
 839        int err;
 840
 841        if (MLX5_VPORT_MANAGER(dev))
 842                async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
 843
 844        if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
 845            MLX5_CAP_GEN(dev, general_notification_event))
 846                async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
 847
 848        if (MLX5_CAP_GEN(dev, port_module_event))
 849                async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
 850        else
 851                mlx5_core_dbg(dev, "port_module_event is not set\n");
 852
 853        if (MLX5_PPS_CAP(dev))
 854                async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
 855
 856        if (MLX5_CAP_GEN(dev, fpga))
 857                async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
 858                                    (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
 859        if (MLX5_CAP_GEN_MAX(dev, dct))
 860                async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED);
 861
 862        if (MLX5_CAP_GEN(dev, temp_warn_event))
 863                async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
 864
 865        if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
 866                async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
 867
 868        err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
 869                                 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
 870                                 "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
 871        if (err) {
 872                mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
 873                return err;
 874        }
 875
 876        mlx5_cmd_use_events(dev);
 877
 878        err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
 879                                 MLX5_NUM_ASYNC_EQE, async_event_mask,
 880                                 "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
 881        if (err) {
 882                mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
 883                goto err1;
 884        }
 885
 886        err = mlx5_create_map_eq(dev, &table->pages_eq,
 887                                 MLX5_EQ_VEC_PAGES,
 888                                 /* TODO: sriov max_vf + */ 1,
 889                                 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
 890                                 MLX5_EQ_TYPE_ASYNC);
 891        if (err) {
 892                mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
 893                goto err2;
 894        }
 895
 896#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 897        if (MLX5_CAP_GEN(dev, pg)) {
 898                err = mlx5_create_map_eq(dev, &table->pfault_eq,
 899                                         MLX5_EQ_VEC_PFAULT,
 900                                         MLX5_NUM_ASYNC_EQE,
 901                                         1 << MLX5_EVENT_TYPE_PAGE_FAULT,
 902                                         "mlx5_page_fault_eq",
 903                                         MLX5_EQ_TYPE_PF);
 904                if (err) {
 905                        mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
 906                                       err);
 907                        goto err3;
 908                }
 909        }
 910
 911        return err;
 912err3:
 913        mlx5_destroy_unmap_eq(dev, &table->pages_eq);
 914#else
 915        return err;
 916#endif
 917
 918err2:
 919        mlx5_destroy_unmap_eq(dev, &table->async_eq);
 920
 921err1:
 922        mlx5_cmd_use_polling(dev);
 923        mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
 924        return err;
 925}
 926
 927void mlx5_stop_eqs(struct mlx5_core_dev *dev)
 928{
 929        struct mlx5_eq_table *table = &dev->priv.eq_table;
 930        int err;
 931
 932#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 933        if (MLX5_CAP_GEN(dev, pg)) {
 934                err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
 935                if (err)
 936                        mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
 937                                      err);
 938        }
 939#endif
 940
 941        err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
 942        if (err)
 943                mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
 944                              err);
 945
 946        err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
 947        if (err)
 948                mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
 949                              err);
 950        mlx5_cmd_use_polling(dev);
 951
 952        err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
 953        if (err)
 954                mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
 955                              err);
 956}
 957
 958int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
 959                       u32 *out, int outlen)
 960{
 961        u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
 962
 963        MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
 964        MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
 965        return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
 966}
 967
 968/* This function should only be called after mlx5_cmd_force_teardown_hca */
 969void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
 970{
 971        struct mlx5_eq_table *table = &dev->priv.eq_table;
 972        struct mlx5_eq *eq;
 973
 974#ifdef CONFIG_RFS_ACCEL
 975        if (dev->rmap) {
 976                free_irq_cpu_rmap(dev->rmap);
 977                dev->rmap = NULL;
 978        }
 979#endif
 980        list_for_each_entry(eq, &table->comp_eqs_list, list)
 981                free_irq(eq->irqn, eq);
 982
 983        free_irq(table->pages_eq.irqn, &table->pages_eq);
 984        free_irq(table->async_eq.irqn, &table->async_eq);
 985        free_irq(table->cmd_eq.irqn, &table->cmd_eq);
 986#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 987        if (MLX5_CAP_GEN(dev, pg))
 988                free_irq(table->pfault_eq.irqn, &table->pfault_eq);
 989#endif
 990        pci_free_irq_vectors(dev->pdev);
 991}
 992