1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/slab.h>
49#include <linux/sched.h>
50#include <linux/rculist.h>
51#include <rdma/rdma_vt.h>
52#include <rdma/rdmavt_qp.h>
53
54#include "mcast.h"
55
56
57
58
59
60
61
62void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
63{
64
65
66
67
68 spin_lock_init(&rdi->n_mcast_grps_lock);
69}
70
71
72
73
74
75static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
76{
77 struct rvt_mcast_qp *mqp;
78
79 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
80 if (!mqp)
81 goto bail;
82
83 mqp->qp = qp;
84 rvt_get_qp(qp);
85
86bail:
87 return mqp;
88}
89
90static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
91{
92 struct rvt_qp *qp = mqp->qp;
93
94
95 rvt_put_qp(qp);
96
97 kfree(mqp);
98}
99
100
101
102
103
104
105
106
107static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid, u16 lid)
108{
109 struct rvt_mcast *mcast;
110
111 mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
112 if (!mcast)
113 goto bail;
114
115 mcast->mcast_addr.mgid = *mgid;
116 mcast->mcast_addr.lid = lid;
117
118 INIT_LIST_HEAD(&mcast->qp_list);
119 init_waitqueue_head(&mcast->wait);
120 atomic_set(&mcast->refcount, 0);
121
122bail:
123 return mcast;
124}
125
126static void rvt_mcast_free(struct rvt_mcast *mcast)
127{
128 struct rvt_mcast_qp *p, *tmp;
129
130 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
131 rvt_mcast_qp_free(p);
132
133 kfree(mcast);
134}
135
136
137
138
139
140
141
142
143
144
145
146
147
148struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
149 u16 lid)
150{
151 struct rb_node *n;
152 unsigned long flags;
153 struct rvt_mcast *found = NULL;
154
155 spin_lock_irqsave(&ibp->lock, flags);
156 n = ibp->mcast_tree.rb_node;
157 while (n) {
158 int ret;
159 struct rvt_mcast *mcast;
160
161 mcast = rb_entry(n, struct rvt_mcast, rb_node);
162
163 ret = memcmp(mgid->raw, mcast->mcast_addr.mgid.raw,
164 sizeof(*mgid));
165 if (ret < 0) {
166 n = n->rb_left;
167 } else if (ret > 0) {
168 n = n->rb_right;
169 } else {
170
171 if (mcast->mcast_addr.lid == lid) {
172 atomic_inc(&mcast->refcount);
173 found = mcast;
174 }
175 break;
176 }
177 }
178 spin_unlock_irqrestore(&ibp->lock, flags);
179 return found;
180}
181EXPORT_SYMBOL(rvt_mcast_find);
182
183
184
185
186
187
188
189
190
191
192
193static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
194 struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
195{
196 struct rb_node **n = &ibp->mcast_tree.rb_node;
197 struct rb_node *pn = NULL;
198 int ret;
199
200 spin_lock_irq(&ibp->lock);
201
202 while (*n) {
203 struct rvt_mcast *tmcast;
204 struct rvt_mcast_qp *p;
205
206 pn = *n;
207 tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
208
209 ret = memcmp(mcast->mcast_addr.mgid.raw,
210 tmcast->mcast_addr.mgid.raw,
211 sizeof(mcast->mcast_addr.mgid));
212 if (ret < 0) {
213 n = &pn->rb_left;
214 continue;
215 }
216 if (ret > 0) {
217 n = &pn->rb_right;
218 continue;
219 }
220
221 if (tmcast->mcast_addr.lid != mcast->mcast_addr.lid) {
222 ret = EINVAL;
223 goto bail;
224 }
225
226
227 list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
228 if (p->qp == mqp->qp) {
229 ret = ESRCH;
230 goto bail;
231 }
232 }
233 if (tmcast->n_attached ==
234 rdi->dparms.props.max_mcast_qp_attach) {
235 ret = ENOMEM;
236 goto bail;
237 }
238
239 tmcast->n_attached++;
240
241 list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
242 ret = EEXIST;
243 goto bail;
244 }
245
246 spin_lock(&rdi->n_mcast_grps_lock);
247 if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
248 spin_unlock(&rdi->n_mcast_grps_lock);
249 ret = ENOMEM;
250 goto bail;
251 }
252
253 rdi->n_mcast_grps_allocated++;
254 spin_unlock(&rdi->n_mcast_grps_lock);
255
256 mcast->n_attached++;
257
258 list_add_tail_rcu(&mqp->list, &mcast->qp_list);
259
260 atomic_inc(&mcast->refcount);
261 rb_link_node(&mcast->rb_node, pn, n);
262 rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
263
264 ret = 0;
265
266bail:
267 spin_unlock_irq(&ibp->lock);
268
269 return ret;
270}
271
272
273
274
275
276
277
278
279
280int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
281{
282 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
283 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
284 struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
285 struct rvt_mcast *mcast;
286 struct rvt_mcast_qp *mqp;
287 int ret = -ENOMEM;
288
289 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
290 return -EINVAL;
291
292
293
294
295
296 mcast = rvt_mcast_alloc(gid, lid);
297 if (!mcast)
298 return -ENOMEM;
299
300 mqp = rvt_mcast_qp_alloc(qp);
301 if (!mqp)
302 goto bail_mcast;
303
304 switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
305 case ESRCH:
306
307 ret = 0;
308 goto bail_mqp;
309 case EEXIST:
310 ret = 0;
311 goto bail_mcast;
312 case ENOMEM:
313
314 ret = -ENOMEM;
315 goto bail_mqp;
316 case EINVAL:
317
318 ret = -EINVAL;
319 goto bail_mqp;
320 default:
321 break;
322 }
323
324 return 0;
325
326bail_mqp:
327 rvt_mcast_qp_free(mqp);
328
329bail_mcast:
330 rvt_mcast_free(mcast);
331
332 return ret;
333}
334
335
336
337
338
339
340
341
342
343int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
344{
345 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
346 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
347 struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
348 struct rvt_mcast *mcast = NULL;
349 struct rvt_mcast_qp *p, *tmp, *delp = NULL;
350 struct rb_node *n;
351 int last = 0;
352 int ret = 0;
353
354 if (ibqp->qp_num <= 1)
355 return -EINVAL;
356
357 spin_lock_irq(&ibp->lock);
358
359
360 n = ibp->mcast_tree.rb_node;
361 while (1) {
362 if (!n) {
363 spin_unlock_irq(&ibp->lock);
364 return -EINVAL;
365 }
366
367 mcast = rb_entry(n, struct rvt_mcast, rb_node);
368 ret = memcmp(gid->raw, mcast->mcast_addr.mgid.raw,
369 sizeof(*gid));
370 if (ret < 0) {
371 n = n->rb_left;
372 } else if (ret > 0) {
373 n = n->rb_right;
374 } else {
375
376 if (mcast->mcast_addr.lid != lid) {
377 spin_unlock_irq(&ibp->lock);
378 return -EINVAL;
379 }
380 break;
381 }
382 }
383
384
385 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
386 if (p->qp != qp)
387 continue;
388
389
390
391
392 list_del_rcu(&p->list);
393 mcast->n_attached--;
394 delp = p;
395
396
397 if (list_empty(&mcast->qp_list)) {
398 rb_erase(&mcast->rb_node, &ibp->mcast_tree);
399 last = 1;
400 }
401 break;
402 }
403
404 spin_unlock_irq(&ibp->lock);
405
406 if (!delp)
407 return -EINVAL;
408
409
410
411
412
413 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
414 rvt_mcast_qp_free(delp);
415
416 if (last) {
417 atomic_dec(&mcast->refcount);
418 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
419 rvt_mcast_free(mcast);
420 spin_lock_irq(&rdi->n_mcast_grps_lock);
421 rdi->n_mcast_grps_allocated--;
422 spin_unlock_irq(&rdi->n_mcast_grps_lock);
423 }
424
425 return 0;
426}
427
428
429
430
431
432
433
434int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
435{
436 int i;
437 int in_use = 0;
438
439 for (i = 0; i < rdi->dparms.nports; i++)
440 if (rdi->ports[i]->mcast_tree.rb_node)
441 in_use++;
442 return in_use;
443}
444