linux/drivers/net/mlx4/eq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
   3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      - Redistributions of source code must retain the above
  16 *        copyright notice, this list of conditions and the following
  17 *        disclaimer.
  18 *
  19 *      - Redistributions in binary form must reproduce the above
  20 *        copyright notice, this list of conditions and the following
  21 *        disclaimer in the documentation and/or other materials
  22 *        provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/interrupt.h>
  35#include <linux/mm.h>
  36#include <linux/dma-mapping.h>
  37
  38#include <linux/mlx4/cmd.h>
  39
  40#include "mlx4.h"
  41#include "fw.h"
  42
  43enum {
  44        MLX4_IRQNAME_SIZE       = 64
  45};
  46
  47enum {
  48        MLX4_NUM_ASYNC_EQE      = 0x100,
  49        MLX4_NUM_SPARE_EQE      = 0x80,
  50        MLX4_EQ_ENTRY_SIZE      = 0x20
  51};
  52
  53/*
  54 * Must be packed because start is 64 bits but only aligned to 32 bits.
  55 */
  56struct mlx4_eq_context {
  57        __be32                  flags;
  58        u16                     reserved1[3];
  59        __be16                  page_offset;
  60        u8                      log_eq_size;
  61        u8                      reserved2[4];
  62        u8                      eq_period;
  63        u8                      reserved3;
  64        u8                      eq_max_count;
  65        u8                      reserved4[3];
  66        u8                      intr;
  67        u8                      log_page_size;
  68        u8                      reserved5[2];
  69        u8                      mtt_base_addr_h;
  70        __be32                  mtt_base_addr_l;
  71        u32                     reserved6[2];
  72        __be32                  consumer_index;
  73        __be32                  producer_index;
  74        u32                     reserved7[4];
  75};
  76
  77#define MLX4_EQ_STATUS_OK          ( 0 << 28)
  78#define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
  79#define MLX4_EQ_OWNER_SW           ( 0 << 24)
  80#define MLX4_EQ_OWNER_HW           ( 1 << 24)
  81#define MLX4_EQ_FLAG_EC            ( 1 << 18)
  82#define MLX4_EQ_FLAG_OI            ( 1 << 17)
  83#define MLX4_EQ_STATE_ARMED        ( 9 <<  8)
  84#define MLX4_EQ_STATE_FIRED        (10 <<  8)
  85#define MLX4_EQ_STATE_ALWAYS_ARMED (11 <<  8)
  86
  87#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG)           | \
  88                               (1ull << MLX4_EVENT_TYPE_COMM_EST)           | \
  89                               (1ull << MLX4_EVENT_TYPE_SQ_DRAINED)         | \
  90                               (1ull << MLX4_EVENT_TYPE_CQ_ERROR)           | \
  91                               (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR)     | \
  92                               (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR)    | \
  93                               (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED)    | \
  94                               (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  95                               (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
  96                               (1ull << MLX4_EVENT_TYPE_PORT_CHANGE)        | \
  97                               (1ull << MLX4_EVENT_TYPE_ECC_DETECT)         | \
  98                               (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
  99                               (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
 100                               (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)          | \
 101                               (1ull << MLX4_EVENT_TYPE_CMD))
 102
 103struct mlx4_eqe {
 104        u8                      reserved1;
 105        u8                      type;
 106        u8                      reserved2;
 107        u8                      subtype;
 108        union {
 109                u32             raw[6];
 110                struct {
 111                        __be32  cqn;
 112                } __attribute__((packed)) comp;
 113                struct {
 114                        u16     reserved1;
 115                        __be16  token;
 116                        u32     reserved2;
 117                        u8      reserved3[3];
 118                        u8      status;
 119                        __be64  out_param;
 120                } __attribute__((packed)) cmd;
 121                struct {
 122                        __be32  qpn;
 123                } __attribute__((packed)) qp;
 124                struct {
 125                        __be32  srqn;
 126                } __attribute__((packed)) srq;
 127                struct {
 128                        __be32  cqn;
 129                        u32     reserved1;
 130                        u8      reserved2[3];
 131                        u8      syndrome;
 132                } __attribute__((packed)) cq_err;
 133                struct {
 134                        u32     reserved1[2];
 135                        __be32  port;
 136                } __attribute__((packed)) port_change;
 137        }                       event;
 138        u8                      reserved3[3];
 139        u8                      owner;
 140} __attribute__((packed));
 141
 142static void eq_set_ci(struct mlx4_eq *eq, int req_not)
 143{
 144        __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
 145                                               req_not << 31),
 146                     eq->doorbell);
 147        /* We still want ordering, just not swabbing, so add a barrier */
 148        mb();
 149}
 150
 151static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
 152{
 153        unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
 154        return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
 155}
 156
 157static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
 158{
 159        struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
 160        return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
 161}
 162
 163static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 164{
 165        struct mlx4_eqe *eqe;
 166        int cqn;
 167        int eqes_found = 0;
 168        int set_ci = 0;
 169        int port;
 170
 171        while ((eqe = next_eqe_sw(eq))) {
 172                /*
 173                 * Make sure we read EQ entry contents after we've
 174                 * checked the ownership bit.
 175                 */
 176                rmb();
 177
 178                switch (eqe->type) {
 179                case MLX4_EVENT_TYPE_COMP:
 180                        cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
 181                        mlx4_cq_completion(dev, cqn);
 182                        break;
 183
 184                case MLX4_EVENT_TYPE_PATH_MIG:
 185                case MLX4_EVENT_TYPE_COMM_EST:
 186                case MLX4_EVENT_TYPE_SQ_DRAINED:
 187                case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
 188                case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
 189                case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
 190                case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
 191                case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
 192                        mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
 193                                      eqe->type);
 194                        break;
 195
 196                case MLX4_EVENT_TYPE_SRQ_LIMIT:
 197                case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
 198                        mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
 199                                      eqe->type);
 200                        break;
 201
 202                case MLX4_EVENT_TYPE_CMD:
 203                        mlx4_cmd_event(dev,
 204                                       be16_to_cpu(eqe->event.cmd.token),
 205                                       eqe->event.cmd.status,
 206                                       be64_to_cpu(eqe->event.cmd.out_param));
 207                        break;
 208
 209                case MLX4_EVENT_TYPE_PORT_CHANGE:
 210                        port = be32_to_cpu(eqe->event.port_change.port) >> 28;
 211                        if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
 212                                mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
 213                                                    port);
 214                                mlx4_priv(dev)->sense.do_sense_port[port] = 1;
 215                        } else {
 216                                mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
 217                                                    port);
 218                                mlx4_priv(dev)->sense.do_sense_port[port] = 0;
 219                        }
 220                        break;
 221
 222                case MLX4_EVENT_TYPE_CQ_ERROR:
 223                        mlx4_warn(dev, "CQ %s on CQN %06x\n",
 224                                  eqe->event.cq_err.syndrome == 1 ?
 225                                  "overrun" : "access violation",
 226                                  be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
 227                        mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
 228                                      eqe->type);
 229                        break;
 230
 231                case MLX4_EVENT_TYPE_EQ_OVERFLOW:
 232                        mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
 233                        break;
 234
 235                case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
 236                case MLX4_EVENT_TYPE_ECC_DETECT:
 237                default:
 238                        mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
 239                                  eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
 240                        break;
 241                };
 242
 243                ++eq->cons_index;
 244                eqes_found = 1;
 245                ++set_ci;
 246
 247                /*
 248                 * The HCA will think the queue has overflowed if we
 249                 * don't tell it we've been processing events.  We
 250                 * create our EQs with MLX4_NUM_SPARE_EQE extra
 251                 * entries, so we must update our consumer index at
 252                 * least that often.
 253                 */
 254                if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
 255                        eq_set_ci(eq, 0);
 256                        set_ci = 0;
 257                }
 258        }
 259
 260        eq_set_ci(eq, 1);
 261
 262        return eqes_found;
 263}
 264
 265static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
 266{
 267        struct mlx4_dev *dev = dev_ptr;
 268        struct mlx4_priv *priv = mlx4_priv(dev);
 269        int work = 0;
 270        int i;
 271
 272        writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
 273
 274        for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
 275                work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
 276
 277        return IRQ_RETVAL(work);
 278}
 279
 280static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
 281{
 282        struct mlx4_eq  *eq  = eq_ptr;
 283        struct mlx4_dev *dev = eq->dev;
 284
 285        mlx4_eq_int(dev, eq);
 286
 287        /* MSI-X vectors always belong to us */
 288        return IRQ_HANDLED;
 289}
 290
 291static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
 292                        int eq_num)
 293{
 294        return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
 295                        0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
 296}
 297
 298static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 299                         int eq_num)
 300{
 301        return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
 302                        MLX4_CMD_TIME_CLASS_A);
 303}
 304
 305static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 306                         int eq_num)
 307{
 308        return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
 309                            MLX4_CMD_TIME_CLASS_A);
 310}
 311
 312static int mlx4_num_eq_uar(struct mlx4_dev *dev)
 313{
 314        /*
 315         * Each UAR holds 4 EQ doorbells.  To figure out how many UARs
 316         * we need to map, take the difference of highest index and
 317         * the lowest index we'll use and add 1.
 318         */
 319        return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
 320                dev->caps.reserved_eqs / 4 + 1;
 321}
 322
 323static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
 324{
 325        struct mlx4_priv *priv = mlx4_priv(dev);
 326        int index;
 327
 328        index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
 329
 330        if (!priv->eq_table.uar_map[index]) {
 331                priv->eq_table.uar_map[index] =
 332                        ioremap(pci_resource_start(dev->pdev, 2) +
 333                                ((eq->eqn / 4) << PAGE_SHIFT),
 334                                PAGE_SIZE);
 335                if (!priv->eq_table.uar_map[index]) {
 336                        mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
 337                                 eq->eqn);
 338                        return NULL;
 339                }
 340        }
 341
 342        return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
 343}
 344
 345static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
 346                          u8 intr, struct mlx4_eq *eq)
 347{
 348        struct mlx4_priv *priv = mlx4_priv(dev);
 349        struct mlx4_cmd_mailbox *mailbox;
 350        struct mlx4_eq_context *eq_context;
 351        int npages;
 352        u64 *dma_list = NULL;
 353        dma_addr_t t;
 354        u64 mtt_addr;
 355        int err = -ENOMEM;
 356        int i;
 357
 358        eq->dev   = dev;
 359        eq->nent  = roundup_pow_of_two(max(nent, 2));
 360        npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
 361
 362        eq->page_list = kmalloc(npages * sizeof *eq->page_list,
 363                                GFP_KERNEL);
 364        if (!eq->page_list)
 365                goto err_out;
 366
 367        for (i = 0; i < npages; ++i)
 368                eq->page_list[i].buf = NULL;
 369
 370        dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
 371        if (!dma_list)
 372                goto err_out_free;
 373
 374        mailbox = mlx4_alloc_cmd_mailbox(dev);
 375        if (IS_ERR(mailbox))
 376                goto err_out_free;
 377        eq_context = mailbox->buf;
 378
 379        for (i = 0; i < npages; ++i) {
 380                eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
 381                                                          PAGE_SIZE, &t, GFP_KERNEL);
 382                if (!eq->page_list[i].buf)
 383                        goto err_out_free_pages;
 384
 385                dma_list[i] = t;
 386                eq->page_list[i].map = t;
 387
 388                memset(eq->page_list[i].buf, 0, PAGE_SIZE);
 389        }
 390
 391        eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
 392        if (eq->eqn == -1)
 393                goto err_out_free_pages;
 394
 395        eq->doorbell = mlx4_get_eq_uar(dev, eq);
 396        if (!eq->doorbell) {
 397                err = -ENOMEM;
 398                goto err_out_free_eq;
 399        }
 400
 401        err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
 402        if (err)
 403                goto err_out_free_eq;
 404
 405        err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
 406        if (err)
 407                goto err_out_free_mtt;
 408
 409        memset(eq_context, 0, sizeof *eq_context);
 410        eq_context->flags         = cpu_to_be32(MLX4_EQ_STATUS_OK   |
 411                                                MLX4_EQ_STATE_ARMED);
 412        eq_context->log_eq_size   = ilog2(eq->nent);
 413        eq_context->intr          = intr;
 414        eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
 415
 416        mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
 417        eq_context->mtt_base_addr_h = mtt_addr >> 32;
 418        eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
 419
 420        err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
 421        if (err) {
 422                mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
 423                goto err_out_free_mtt;
 424        }
 425
 426        kfree(dma_list);
 427        mlx4_free_cmd_mailbox(dev, mailbox);
 428
 429        eq->cons_index = 0;
 430
 431        return err;
 432
 433err_out_free_mtt:
 434        mlx4_mtt_cleanup(dev, &eq->mtt);
 435
 436err_out_free_eq:
 437        mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
 438
 439err_out_free_pages:
 440        for (i = 0; i < npages; ++i)
 441                if (eq->page_list[i].buf)
 442                        dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
 443                                          eq->page_list[i].buf,
 444                                          eq->page_list[i].map);
 445
 446        mlx4_free_cmd_mailbox(dev, mailbox);
 447
 448err_out_free:
 449        kfree(eq->page_list);
 450        kfree(dma_list);
 451
 452err_out:
 453        return err;
 454}
 455
 456static void mlx4_free_eq(struct mlx4_dev *dev,
 457                         struct mlx4_eq *eq)
 458{
 459        struct mlx4_priv *priv = mlx4_priv(dev);
 460        struct mlx4_cmd_mailbox *mailbox;
 461        int err;
 462        int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
 463        int i;
 464
 465        mailbox = mlx4_alloc_cmd_mailbox(dev);
 466        if (IS_ERR(mailbox))
 467                return;
 468
 469        err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
 470        if (err)
 471                mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
 472
 473        if (0) {
 474                mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
 475                for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
 476                        if (i % 4 == 0)
 477                                printk("[%02x] ", i * 4);
 478                        printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
 479                        if ((i + 1) % 4 == 0)
 480                                printk("\n");
 481                }
 482        }
 483
 484        mlx4_mtt_cleanup(dev, &eq->mtt);
 485        for (i = 0; i < npages; ++i)
 486                pci_free_consistent(dev->pdev, PAGE_SIZE,
 487                                    eq->page_list[i].buf,
 488                                    eq->page_list[i].map);
 489
 490        kfree(eq->page_list);
 491        mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
 492        mlx4_free_cmd_mailbox(dev, mailbox);
 493}
 494
 495static void mlx4_free_irqs(struct mlx4_dev *dev)
 496{
 497        struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
 498        int i;
 499
 500        if (eq_table->have_irq)
 501                free_irq(dev->pdev->irq, dev);
 502        for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
 503                if (eq_table->eq[i].have_irq) {
 504                        free_irq(eq_table->eq[i].irq, eq_table->eq + i);
 505                        eq_table->eq[i].have_irq = 0;
 506                }
 507
 508        kfree(eq_table->irq_names);
 509}
 510
 511static int mlx4_map_clr_int(struct mlx4_dev *dev)
 512{
 513        struct mlx4_priv *priv = mlx4_priv(dev);
 514
 515        priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
 516                                 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
 517        if (!priv->clr_base) {
 518                mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
 519                return -ENOMEM;
 520        }
 521
 522        return 0;
 523}
 524
 525static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
 526{
 527        struct mlx4_priv *priv = mlx4_priv(dev);
 528
 529        iounmap(priv->clr_base);
 530}
 531
 532int mlx4_alloc_eq_table(struct mlx4_dev *dev)
 533{
 534        struct mlx4_priv *priv = mlx4_priv(dev);
 535
 536        priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
 537                                    sizeof *priv->eq_table.eq, GFP_KERNEL);
 538        if (!priv->eq_table.eq)
 539                return -ENOMEM;
 540
 541        return 0;
 542}
 543
 544void mlx4_free_eq_table(struct mlx4_dev *dev)
 545{
 546        kfree(mlx4_priv(dev)->eq_table.eq);
 547}
 548
 549int mlx4_init_eq_table(struct mlx4_dev *dev)
 550{
 551        struct mlx4_priv *priv = mlx4_priv(dev);
 552        int err;
 553        int i;
 554
 555        priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
 556                                         mlx4_num_eq_uar(dev), GFP_KERNEL);
 557        if (!priv->eq_table.uar_map) {
 558                err = -ENOMEM;
 559                goto err_out_free;
 560        }
 561
 562        err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
 563                               dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
 564        if (err)
 565                goto err_out_free;
 566
 567        for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
 568                priv->eq_table.uar_map[i] = NULL;
 569
 570        err = mlx4_map_clr_int(dev);
 571        if (err)
 572                goto err_out_bitmap;
 573
 574        priv->eq_table.clr_mask =
 575                swab32(1 << (priv->eq_table.inta_pin & 31));
 576        priv->eq_table.clr_int  = priv->clr_base +
 577                (priv->eq_table.inta_pin < 32 ? 4 : 0);
 578
 579        priv->eq_table.irq_names =
 580                kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
 581                        GFP_KERNEL);
 582        if (!priv->eq_table.irq_names) {
 583                err = -ENOMEM;
 584                goto err_out_bitmap;
 585        }
 586
 587        for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
 588                err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
 589                                     (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
 590                                     &priv->eq_table.eq[i]);
 591                if (err) {
 592                        --i;
 593                        goto err_out_unmap;
 594                }
 595        }
 596
 597        err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
 598                             (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
 599                             &priv->eq_table.eq[dev->caps.num_comp_vectors]);
 600        if (err)
 601                goto err_out_comp;
 602
 603        if (dev->flags & MLX4_FLAG_MSI_X) {
 604                const char *eq_name;
 605
 606                for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
 607                        if (i < dev->caps.num_comp_vectors) {
 608                                snprintf(priv->eq_table.irq_names +
 609                                         i * MLX4_IRQNAME_SIZE,
 610                                         MLX4_IRQNAME_SIZE,
 611                                         "mlx4-comp-%d@pci:%s", i,
 612                                         pci_name(dev->pdev));
 613                        } else {
 614                                snprintf(priv->eq_table.irq_names +
 615                                         i * MLX4_IRQNAME_SIZE,
 616                                         MLX4_IRQNAME_SIZE,
 617                                         "mlx4-async@pci:%s",
 618                                         pci_name(dev->pdev));
 619                        }
 620
 621                        eq_name = priv->eq_table.irq_names +
 622                                  i * MLX4_IRQNAME_SIZE;
 623                        err = request_irq(priv->eq_table.eq[i].irq,
 624                                          mlx4_msi_x_interrupt, 0, eq_name,
 625                                          priv->eq_table.eq + i);
 626                        if (err)
 627                                goto err_out_async;
 628
 629                        priv->eq_table.eq[i].have_irq = 1;
 630                }
 631        } else {
 632                snprintf(priv->eq_table.irq_names,
 633                         MLX4_IRQNAME_SIZE,
 634                         DRV_NAME "@pci:%s",
 635                         pci_name(dev->pdev));
 636                err = request_irq(dev->pdev->irq, mlx4_interrupt,
 637                                  IRQF_SHARED, priv->eq_table.irq_names, dev);
 638                if (err)
 639                        goto err_out_async;
 640
 641                priv->eq_table.have_irq = 1;
 642        }
 643
 644        err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
 645                          priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
 646        if (err)
 647                mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
 648                           priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
 649
 650        for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
 651                eq_set_ci(&priv->eq_table.eq[i], 1);
 652
 653        return 0;
 654
 655err_out_async:
 656        mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
 657
 658err_out_comp:
 659        i = dev->caps.num_comp_vectors - 1;
 660
 661err_out_unmap:
 662        while (i >= 0) {
 663                mlx4_free_eq(dev, &priv->eq_table.eq[i]);
 664                --i;
 665        }
 666        mlx4_unmap_clr_int(dev);
 667        mlx4_free_irqs(dev);
 668
 669err_out_bitmap:
 670        mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
 671
 672err_out_free:
 673        kfree(priv->eq_table.uar_map);
 674
 675        return err;
 676}
 677
 678void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
 679{
 680        struct mlx4_priv *priv = mlx4_priv(dev);
 681        int i;
 682
 683        mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
 684                    priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
 685
 686        mlx4_free_irqs(dev);
 687
 688        for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
 689                mlx4_free_eq(dev, &priv->eq_table.eq[i]);
 690
 691        mlx4_unmap_clr_int(dev);
 692
 693        for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
 694                if (priv->eq_table.uar_map[i])
 695                        iounmap(priv->eq_table.uar_map[i]);
 696
 697        mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
 698
 699        kfree(priv->eq_table.uar_map);
 700}
 701