linux/drivers/infiniband/sw/rxe/rxe_cq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
   2/*
   3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   5 */
   6#include <linux/vmalloc.h>
   7#include "rxe.h"
   8#include "rxe_loc.h"
   9#include "rxe_queue.h"
  10
  11int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
  12                    int cqe, int comp_vector)
  13{
  14        int count;
  15
  16        if (cqe <= 0) {
  17                pr_warn("cqe(%d) <= 0\n", cqe);
  18                goto err1;
  19        }
  20
  21        if (cqe > rxe->attr.max_cqe) {
  22                pr_warn("cqe(%d) > max_cqe(%d)\n",
  23                        cqe, rxe->attr.max_cqe);
  24                goto err1;
  25        }
  26
  27        if (cq) {
  28                if (cq->is_user)
  29                        count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
  30                else
  31                        count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
  32
  33                if (cqe < count) {
  34                        pr_warn("cqe(%d) < current # elements in queue (%d)",
  35                                cqe, count);
  36                        goto err1;
  37                }
  38        }
  39
  40        return 0;
  41
  42err1:
  43        return -EINVAL;
  44}
  45
  46static void rxe_send_complete(struct tasklet_struct *t)
  47{
  48        struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
  49        unsigned long flags;
  50
  51        spin_lock_irqsave(&cq->cq_lock, flags);
  52        if (cq->is_dying) {
  53                spin_unlock_irqrestore(&cq->cq_lock, flags);
  54                return;
  55        }
  56        spin_unlock_irqrestore(&cq->cq_lock, flags);
  57
  58        cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
  59}
  60
  61int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
  62                     int comp_vector, struct ib_udata *udata,
  63                     struct rxe_create_cq_resp __user *uresp)
  64{
  65        int err;
  66        enum queue_type type;
  67
  68        type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL;
  69        cq->queue = rxe_queue_init(rxe, &cqe,
  70                        sizeof(struct rxe_cqe), type);
  71        if (!cq->queue) {
  72                pr_warn("unable to create cq\n");
  73                return -ENOMEM;
  74        }
  75
  76        err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
  77                           cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
  78        if (err) {
  79                vfree(cq->queue->buf);
  80                kfree(cq->queue);
  81                return err;
  82        }
  83
  84        if (uresp)
  85                cq->is_user = 1;
  86
  87        cq->is_dying = false;
  88
  89        tasklet_setup(&cq->comp_task, rxe_send_complete);
  90
  91        spin_lock_init(&cq->cq_lock);
  92        cq->ibcq.cqe = cqe;
  93        return 0;
  94}
  95
  96int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
  97                        struct rxe_resize_cq_resp __user *uresp,
  98                        struct ib_udata *udata)
  99{
 100        int err;
 101
 102        err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
 103                               sizeof(struct rxe_cqe), udata,
 104                               uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
 105        if (!err)
 106                cq->ibcq.cqe = cqe;
 107
 108        return err;
 109}
 110
 111int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
 112{
 113        struct ib_event ev;
 114        unsigned long flags;
 115        int full;
 116        void *addr;
 117
 118        spin_lock_irqsave(&cq->cq_lock, flags);
 119
 120        if (cq->is_user)
 121                full = queue_full(cq->queue, QUEUE_TYPE_TO_USER);
 122        else
 123                full = queue_full(cq->queue, QUEUE_TYPE_KERNEL);
 124
 125        if (unlikely(full)) {
 126                spin_unlock_irqrestore(&cq->cq_lock, flags);
 127                if (cq->ibcq.event_handler) {
 128                        ev.device = cq->ibcq.device;
 129                        ev.element.cq = &cq->ibcq;
 130                        ev.event = IB_EVENT_CQ_ERR;
 131                        cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
 132                }
 133
 134                return -EBUSY;
 135        }
 136
 137        if (cq->is_user)
 138                addr = producer_addr(cq->queue, QUEUE_TYPE_TO_USER);
 139        else
 140                addr = producer_addr(cq->queue, QUEUE_TYPE_KERNEL);
 141
 142        memcpy(addr, cqe, sizeof(*cqe));
 143
 144        if (cq->is_user)
 145                advance_producer(cq->queue, QUEUE_TYPE_TO_USER);
 146        else
 147                advance_producer(cq->queue, QUEUE_TYPE_KERNEL);
 148
 149        spin_unlock_irqrestore(&cq->cq_lock, flags);
 150
 151        if ((cq->notify == IB_CQ_NEXT_COMP) ||
 152            (cq->notify == IB_CQ_SOLICITED && solicited)) {
 153                cq->notify = 0;
 154                tasklet_schedule(&cq->comp_task);
 155        }
 156
 157        return 0;
 158}
 159
 160void rxe_cq_disable(struct rxe_cq *cq)
 161{
 162        unsigned long flags;
 163
 164        spin_lock_irqsave(&cq->cq_lock, flags);
 165        cq->is_dying = true;
 166        spin_unlock_irqrestore(&cq->cq_lock, flags);
 167}
 168
 169void rxe_cq_cleanup(struct rxe_pool_entry *arg)
 170{
 171        struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
 172
 173        if (cq->queue)
 174                rxe_queue_cleanup(cq->queue);
 175}
 176