1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/vmalloc.h>
34#include "rxe.h"
35#include "rxe_loc.h"
36#include "rxe_queue.h"
37
38int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
39 int cqe, int comp_vector)
40{
41 int count;
42
43 if (cqe <= 0) {
44 pr_warn("cqe(%d) <= 0\n", cqe);
45 goto err1;
46 }
47
48 if (cqe > rxe->attr.max_cqe) {
49 pr_warn("cqe(%d) > max_cqe(%d)\n",
50 cqe, rxe->attr.max_cqe);
51 goto err1;
52 }
53
54 if (cq) {
55 count = queue_count(cq->queue);
56 if (cqe < count) {
57 pr_warn("cqe(%d) < current # elements in queue (%d)",
58 cqe, count);
59 goto err1;
60 }
61 }
62
63 return 0;
64
65err1:
66 return -EINVAL;
67}
68
69static void rxe_send_complete(unsigned long data)
70{
71 struct rxe_cq *cq = (struct rxe_cq *)data;
72 unsigned long flags;
73
74 spin_lock_irqsave(&cq->cq_lock, flags);
75 if (cq->is_dying) {
76 spin_unlock_irqrestore(&cq->cq_lock, flags);
77 return;
78 }
79 spin_unlock_irqrestore(&cq->cq_lock, flags);
80
81 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
82}
83
84int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
85 int comp_vector, struct ib_udata *udata,
86 struct rxe_create_cq_resp __user *uresp)
87{
88 int err;
89
90 cq->queue = rxe_queue_init(rxe, &cqe,
91 sizeof(struct rxe_cqe));
92 if (!cq->queue) {
93 pr_warn("unable to create cq\n");
94 return -ENOMEM;
95 }
96
97 err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
98 cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
99 if (err) {
100 vfree(cq->queue->buf);
101 kfree(cq->queue);
102 return err;
103 }
104
105 if (uresp)
106 cq->is_user = 1;
107
108 cq->is_dying = false;
109
110 tasklet_init(&cq->comp_task, rxe_send_complete, (unsigned long)cq);
111
112 spin_lock_init(&cq->cq_lock);
113 cq->ibcq.cqe = cqe;
114 return 0;
115}
116
117int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
118 struct rxe_resize_cq_resp __user *uresp,
119 struct ib_udata *udata)
120{
121 int err;
122
123 err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
124 sizeof(struct rxe_cqe), udata,
125 uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
126 if (!err)
127 cq->ibcq.cqe = cqe;
128
129 return err;
130}
131
132int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
133{
134 struct ib_event ev;
135 unsigned long flags;
136
137 spin_lock_irqsave(&cq->cq_lock, flags);
138
139 if (unlikely(queue_full(cq->queue))) {
140 spin_unlock_irqrestore(&cq->cq_lock, flags);
141 if (cq->ibcq.event_handler) {
142 ev.device = cq->ibcq.device;
143 ev.element.cq = &cq->ibcq;
144 ev.event = IB_EVENT_CQ_ERR;
145 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
146 }
147
148 return -EBUSY;
149 }
150
151 memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
152
153
154
155
156 smp_wmb();
157
158 advance_producer(cq->queue);
159 spin_unlock_irqrestore(&cq->cq_lock, flags);
160
161 if ((cq->notify == IB_CQ_NEXT_COMP) ||
162 (cq->notify == IB_CQ_SOLICITED && solicited)) {
163 cq->notify = 0;
164 tasklet_schedule(&cq->comp_task);
165 }
166
167 return 0;
168}
169
170void rxe_cq_disable(struct rxe_cq *cq)
171{
172 unsigned long flags;
173
174 spin_lock_irqsave(&cq->cq_lock, flags);
175 cq->is_dying = true;
176 spin_unlock_irqrestore(&cq->cq_lock, flags);
177}
178
179void rxe_cq_cleanup(struct rxe_pool_entry *arg)
180{
181 struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
182
183 if (cq->queue)
184 rxe_queue_cleanup(cq->queue);
185}
186