linux/drivers/infiniband/core/cq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015 HGST, a Western Digital Company.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 */
  13#include <linux/module.h>
  14#include <linux/err.h>
  15#include <linux/slab.h>
  16#include <rdma/ib_verbs.h>
  17
  18/* # of WCs to poll for with a single call to ib_poll_cq */
  19#define IB_POLL_BATCH                   16
  20
  21/* # of WCs to iterate over before yielding */
  22#define IB_POLL_BUDGET_IRQ              256
  23#define IB_POLL_BUDGET_WORKQUEUE        65536
  24
  25#define IB_POLL_FLAGS \
  26        (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
  27
  28static int __ib_process_cq(struct ib_cq *cq, int budget)
  29{
  30        int i, n, completed = 0;
  31
  32        while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) {
  33                for (i = 0; i < n; i++) {
  34                        struct ib_wc *wc = &cq->wc[i];
  35
  36                        if (wc->wr_cqe)
  37                                wc->wr_cqe->done(cq, wc);
  38                        else
  39                                WARN_ON_ONCE(wc->status == IB_WC_SUCCESS);
  40                }
  41
  42                completed += n;
  43
  44                if (n != IB_POLL_BATCH ||
  45                    (budget != -1 && completed >= budget))
  46                        break;
  47        }
  48
  49        return completed;
  50}
  51
  52/**
  53 * ib_process_direct_cq - process a CQ in caller context
  54 * @cq:         CQ to process
  55 * @budget:     number of CQEs to poll for
  56 *
  57 * This function is used to process all outstanding CQ entries on a
  58 * %IB_POLL_DIRECT CQ.  It does not offload CQ processing to a different
  59 * context and does not ask for completion interrupts from the HCA.
  60 *
  61 * Note: for compatibility reasons -1 can be passed in %budget for unlimited
  62 * polling.  Do not use this feature in new code, it will be removed soon.
  63 */
  64int ib_process_cq_direct(struct ib_cq *cq, int budget)
  65{
  66        WARN_ON_ONCE(cq->poll_ctx != IB_POLL_DIRECT);
  67
  68        return __ib_process_cq(cq, budget);
  69}
  70EXPORT_SYMBOL(ib_process_cq_direct);
  71
  72static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
  73{
  74        WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
  75}
  76
  77static int ib_poll_handler(struct irq_poll *iop, int budget)
  78{
  79        struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
  80        int completed;
  81
  82        completed = __ib_process_cq(cq, budget);
  83        if (completed < budget) {
  84                irq_poll_complete(&cq->iop);
  85                if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
  86                        irq_poll_sched(&cq->iop);
  87        }
  88
  89        return completed;
  90}
  91
  92static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
  93{
  94        irq_poll_sched(&cq->iop);
  95}
  96
  97static void ib_cq_poll_work(struct work_struct *work)
  98{
  99        struct ib_cq *cq = container_of(work, struct ib_cq, work);
 100        int completed;
 101
 102        completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE);
 103        if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
 104            ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
 105                queue_work(ib_comp_wq, &cq->work);
 106}
 107
 108static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
 109{
 110        queue_work(ib_comp_wq, &cq->work);
 111}
 112
 113/**
 114 * ib_alloc_cq - allocate a completion queue
 115 * @dev:                device to allocate the CQ for
 116 * @private:            driver private data, accessible from cq->cq_context
 117 * @nr_cqe:             number of CQEs to allocate
 118 * @comp_vector:        HCA completion vectors for this CQ
 119 * @poll_ctx:           context to poll the CQ from.
 120 *
 121 * This is the proper interface to allocate a CQ for in-kernel users. A
 122 * CQ allocated with this interface will automatically be polled from the
 123 * specified context.  The ULP needs must use wr->wr_cqe instead of wr->wr_id
 124 * to use this CQ abstraction.
 125 */
 126struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
 127                int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx)
 128{
 129        struct ib_cq_init_attr cq_attr = {
 130                .cqe            = nr_cqe,
 131                .comp_vector    = comp_vector,
 132        };
 133        struct ib_cq *cq;
 134        int ret = -ENOMEM;
 135
 136        cq = dev->create_cq(dev, &cq_attr, NULL, NULL);
 137        if (IS_ERR(cq))
 138                return cq;
 139
 140        cq->device = dev;
 141        cq->uobject = NULL;
 142        cq->event_handler = NULL;
 143        cq->cq_context = private;
 144        cq->poll_ctx = poll_ctx;
 145        atomic_set(&cq->usecnt, 0);
 146
 147        cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
 148        if (!cq->wc)
 149                goto out_destroy_cq;
 150
 151        switch (cq->poll_ctx) {
 152        case IB_POLL_DIRECT:
 153                cq->comp_handler = ib_cq_completion_direct;
 154                break;
 155        case IB_POLL_SOFTIRQ:
 156                cq->comp_handler = ib_cq_completion_softirq;
 157
 158                irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
 159                ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
 160                break;
 161        case IB_POLL_WORKQUEUE:
 162                cq->comp_handler = ib_cq_completion_workqueue;
 163                INIT_WORK(&cq->work, ib_cq_poll_work);
 164                ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
 165                break;
 166        default:
 167                ret = -EINVAL;
 168                goto out_free_wc;
 169        }
 170
 171        return cq;
 172
 173out_free_wc:
 174        kfree(cq->wc);
 175out_destroy_cq:
 176        cq->device->destroy_cq(cq);
 177        return ERR_PTR(ret);
 178}
 179EXPORT_SYMBOL(ib_alloc_cq);
 180
 181/**
 182 * ib_free_cq - free a completion queue
 183 * @cq:         completion queue to free.
 184 */
 185void ib_free_cq(struct ib_cq *cq)
 186{
 187        int ret;
 188
 189        if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
 190                return;
 191
 192        switch (cq->poll_ctx) {
 193        case IB_POLL_DIRECT:
 194                break;
 195        case IB_POLL_SOFTIRQ:
 196                irq_poll_disable(&cq->iop);
 197                break;
 198        case IB_POLL_WORKQUEUE:
 199                flush_work(&cq->work);
 200                break;
 201        default:
 202                WARN_ON_ONCE(1);
 203        }
 204
 205        kfree(cq->wc);
 206        ret = cq->device->destroy_cq(cq);
 207        WARN_ON_ONCE(ret);
 208}
 209EXPORT_SYMBOL(ib_free_cq);
 210