1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/slab.h>
49#include <linux/sched.h>
50#include <linux/rculist.h>
51#include <rdma/rdma_vt.h>
52#include <rdma/rdmavt_qp.h>
53
54#include "mcast.h"
55
56
57
58
59
60
61
62void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
63{
64
65
66
67
68 spin_lock_init(&rdi->n_mcast_grps_lock);
69}
70
71
72
73
74
75static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
76{
77 struct rvt_mcast_qp *mqp;
78
79 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
80 if (!mqp)
81 goto bail;
82
83 mqp->qp = qp;
84 atomic_inc(&qp->refcount);
85
86bail:
87 return mqp;
88}
89
90static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
91{
92 struct rvt_qp *qp = mqp->qp;
93
94
95 if (atomic_dec_and_test(&qp->refcount))
96 wake_up(&qp->wait);
97
98 kfree(mqp);
99}
100
101
102
103
104
105
106
107static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid)
108{
109 struct rvt_mcast *mcast;
110
111 mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
112 if (!mcast)
113 goto bail;
114
115 mcast->mgid = *mgid;
116 INIT_LIST_HEAD(&mcast->qp_list);
117 init_waitqueue_head(&mcast->wait);
118 atomic_set(&mcast->refcount, 0);
119
120bail:
121 return mcast;
122}
123
124static void rvt_mcast_free(struct rvt_mcast *mcast)
125{
126 struct rvt_mcast_qp *p, *tmp;
127
128 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
129 rvt_mcast_qp_free(p);
130
131 kfree(mcast);
132}
133
134
135
136
137
138
139
140
141
142
143struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid)
144{
145 struct rb_node *n;
146 unsigned long flags;
147 struct rvt_mcast *found = NULL;
148
149 spin_lock_irqsave(&ibp->lock, flags);
150 n = ibp->mcast_tree.rb_node;
151 while (n) {
152 int ret;
153 struct rvt_mcast *mcast;
154
155 mcast = rb_entry(n, struct rvt_mcast, rb_node);
156
157 ret = memcmp(mgid->raw, mcast->mgid.raw,
158 sizeof(union ib_gid));
159 if (ret < 0) {
160 n = n->rb_left;
161 } else if (ret > 0) {
162 n = n->rb_right;
163 } else {
164 atomic_inc(&mcast->refcount);
165 found = mcast;
166 break;
167 }
168 }
169 spin_unlock_irqrestore(&ibp->lock, flags);
170 return found;
171}
172EXPORT_SYMBOL(rvt_mcast_find);
173
174
175
176
177
178
179
180
181
182
183static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
184 struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
185{
186 struct rb_node **n = &ibp->mcast_tree.rb_node;
187 struct rb_node *pn = NULL;
188 int ret;
189
190 spin_lock_irq(&ibp->lock);
191
192 while (*n) {
193 struct rvt_mcast *tmcast;
194 struct rvt_mcast_qp *p;
195
196 pn = *n;
197 tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
198
199 ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
200 sizeof(union ib_gid));
201 if (ret < 0) {
202 n = &pn->rb_left;
203 continue;
204 }
205 if (ret > 0) {
206 n = &pn->rb_right;
207 continue;
208 }
209
210
211 list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
212 if (p->qp == mqp->qp) {
213 ret = ESRCH;
214 goto bail;
215 }
216 }
217 if (tmcast->n_attached ==
218 rdi->dparms.props.max_mcast_qp_attach) {
219 ret = ENOMEM;
220 goto bail;
221 }
222
223 tmcast->n_attached++;
224
225 list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
226 ret = EEXIST;
227 goto bail;
228 }
229
230 spin_lock(&rdi->n_mcast_grps_lock);
231 if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
232 spin_unlock(&rdi->n_mcast_grps_lock);
233 ret = ENOMEM;
234 goto bail;
235 }
236
237 rdi->n_mcast_grps_allocated++;
238 spin_unlock(&rdi->n_mcast_grps_lock);
239
240 mcast->n_attached++;
241
242 list_add_tail_rcu(&mqp->list, &mcast->qp_list);
243
244 atomic_inc(&mcast->refcount);
245 rb_link_node(&mcast->rb_node, pn, n);
246 rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
247
248 ret = 0;
249
250bail:
251 spin_unlock_irq(&ibp->lock);
252
253 return ret;
254}
255
256
257
258
259
260
261
262
263
264int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
265{
266 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
267 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
268 struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
269 struct rvt_mcast *mcast;
270 struct rvt_mcast_qp *mqp;
271 int ret = -ENOMEM;
272
273 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
274 return -EINVAL;
275
276
277
278
279
280 mcast = rvt_mcast_alloc(gid);
281 if (!mcast)
282 return -ENOMEM;
283
284 mqp = rvt_mcast_qp_alloc(qp);
285 if (!mqp)
286 goto bail_mcast;
287
288 switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
289 case ESRCH:
290
291 ret = 0;
292 goto bail_mqp;
293 case EEXIST:
294 ret = 0;
295 goto bail_mcast;
296 case ENOMEM:
297
298 ret = -ENOMEM;
299 goto bail_mqp;
300 default:
301 break;
302 }
303
304 return 0;
305
306bail_mqp:
307 rvt_mcast_qp_free(mqp);
308
309bail_mcast:
310 rvt_mcast_free(mcast);
311
312 return ret;
313}
314
315
316
317
318
319
320
321
322
323int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
324{
325 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
326 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
327 struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
328 struct rvt_mcast *mcast = NULL;
329 struct rvt_mcast_qp *p, *tmp, *delp = NULL;
330 struct rb_node *n;
331 int last = 0;
332 int ret = 0;
333
334 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
335 return -EINVAL;
336
337 spin_lock_irq(&ibp->lock);
338
339
340 n = ibp->mcast_tree.rb_node;
341 while (1) {
342 if (!n) {
343 spin_unlock_irq(&ibp->lock);
344 return -EINVAL;
345 }
346
347 mcast = rb_entry(n, struct rvt_mcast, rb_node);
348 ret = memcmp(gid->raw, mcast->mgid.raw,
349 sizeof(union ib_gid));
350 if (ret < 0)
351 n = n->rb_left;
352 else if (ret > 0)
353 n = n->rb_right;
354 else
355 break;
356 }
357
358
359 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
360 if (p->qp != qp)
361 continue;
362
363
364
365
366 list_del_rcu(&p->list);
367 mcast->n_attached--;
368 delp = p;
369
370
371 if (list_empty(&mcast->qp_list)) {
372 rb_erase(&mcast->rb_node, &ibp->mcast_tree);
373 last = 1;
374 }
375 break;
376 }
377
378 spin_unlock_irq(&ibp->lock);
379
380 if (!delp)
381 return -EINVAL;
382
383
384
385
386
387 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
388 rvt_mcast_qp_free(delp);
389
390 if (last) {
391 atomic_dec(&mcast->refcount);
392 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
393 rvt_mcast_free(mcast);
394 spin_lock_irq(&rdi->n_mcast_grps_lock);
395 rdi->n_mcast_grps_allocated--;
396 spin_unlock_irq(&rdi->n_mcast_grps_lock);
397 }
398
399 return 0;
400}
401
402
403
404
405
406
407
408int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
409{
410 int i;
411 int in_use = 0;
412
413 for (i = 0; i < rdi->dparms.nports; i++)
414 if (rdi->ports[i]->mcast_tree.rb_node)
415 in_use++;
416 return in_use;
417}
418