1
2
3
4
5
6#include <linux/slab.h>
7#include <linux/sched.h>
8#include <linux/rculist.h>
9#include <rdma/rdma_vt.h>
10#include <rdma/rdmavt_qp.h>
11
12#include "mcast.h"
13
14
15
16
17
18
19
20void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
21{
22
23
24
25
26 spin_lock_init(&rdi->n_mcast_grps_lock);
27}
28
29
30
31
32
33static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
34{
35 struct rvt_mcast_qp *mqp;
36
37 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
38 if (!mqp)
39 goto bail;
40
41 mqp->qp = qp;
42 rvt_get_qp(qp);
43
44bail:
45 return mqp;
46}
47
48static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
49{
50 struct rvt_qp *qp = mqp->qp;
51
52
53 rvt_put_qp(qp);
54
55 kfree(mqp);
56}
57
58
59
60
61
62
63
64
65static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid, u16 lid)
66{
67 struct rvt_mcast *mcast;
68
69 mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
70 if (!mcast)
71 goto bail;
72
73 mcast->mcast_addr.mgid = *mgid;
74 mcast->mcast_addr.lid = lid;
75
76 INIT_LIST_HEAD(&mcast->qp_list);
77 init_waitqueue_head(&mcast->wait);
78 atomic_set(&mcast->refcount, 0);
79
80bail:
81 return mcast;
82}
83
84static void rvt_mcast_free(struct rvt_mcast *mcast)
85{
86 struct rvt_mcast_qp *p, *tmp;
87
88 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
89 rvt_mcast_qp_free(p);
90
91 kfree(mcast);
92}
93
94
95
96
97
98
99
100
101
102
103
104
105
106struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
107 u16 lid)
108{
109 struct rb_node *n;
110 unsigned long flags;
111 struct rvt_mcast *found = NULL;
112
113 spin_lock_irqsave(&ibp->lock, flags);
114 n = ibp->mcast_tree.rb_node;
115 while (n) {
116 int ret;
117 struct rvt_mcast *mcast;
118
119 mcast = rb_entry(n, struct rvt_mcast, rb_node);
120
121 ret = memcmp(mgid->raw, mcast->mcast_addr.mgid.raw,
122 sizeof(*mgid));
123 if (ret < 0) {
124 n = n->rb_left;
125 } else if (ret > 0) {
126 n = n->rb_right;
127 } else {
128
129 if (mcast->mcast_addr.lid == lid) {
130 atomic_inc(&mcast->refcount);
131 found = mcast;
132 }
133 break;
134 }
135 }
136 spin_unlock_irqrestore(&ibp->lock, flags);
137 return found;
138}
139EXPORT_SYMBOL(rvt_mcast_find);
140
141
142
143
144
145
146
147
148
149
150
151static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
152 struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
153{
154 struct rb_node **n = &ibp->mcast_tree.rb_node;
155 struct rb_node *pn = NULL;
156 int ret;
157
158 spin_lock_irq(&ibp->lock);
159
160 while (*n) {
161 struct rvt_mcast *tmcast;
162 struct rvt_mcast_qp *p;
163
164 pn = *n;
165 tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
166
167 ret = memcmp(mcast->mcast_addr.mgid.raw,
168 tmcast->mcast_addr.mgid.raw,
169 sizeof(mcast->mcast_addr.mgid));
170 if (ret < 0) {
171 n = &pn->rb_left;
172 continue;
173 }
174 if (ret > 0) {
175 n = &pn->rb_right;
176 continue;
177 }
178
179 if (tmcast->mcast_addr.lid != mcast->mcast_addr.lid) {
180 ret = EINVAL;
181 goto bail;
182 }
183
184
185 list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
186 if (p->qp == mqp->qp) {
187 ret = ESRCH;
188 goto bail;
189 }
190 }
191 if (tmcast->n_attached ==
192 rdi->dparms.props.max_mcast_qp_attach) {
193 ret = ENOMEM;
194 goto bail;
195 }
196
197 tmcast->n_attached++;
198
199 list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
200 ret = EEXIST;
201 goto bail;
202 }
203
204 spin_lock(&rdi->n_mcast_grps_lock);
205 if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
206 spin_unlock(&rdi->n_mcast_grps_lock);
207 ret = ENOMEM;
208 goto bail;
209 }
210
211 rdi->n_mcast_grps_allocated++;
212 spin_unlock(&rdi->n_mcast_grps_lock);
213
214 mcast->n_attached++;
215
216 list_add_tail_rcu(&mqp->list, &mcast->qp_list);
217
218 atomic_inc(&mcast->refcount);
219 rb_link_node(&mcast->rb_node, pn, n);
220 rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
221
222 ret = 0;
223
224bail:
225 spin_unlock_irq(&ibp->lock);
226
227 return ret;
228}
229
230
231
232
233
234
235
236
237
238int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
239{
240 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
241 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
242 struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
243 struct rvt_mcast *mcast;
244 struct rvt_mcast_qp *mqp;
245 int ret = -ENOMEM;
246
247 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
248 return -EINVAL;
249
250
251
252
253
254 mcast = rvt_mcast_alloc(gid, lid);
255 if (!mcast)
256 return -ENOMEM;
257
258 mqp = rvt_mcast_qp_alloc(qp);
259 if (!mqp)
260 goto bail_mcast;
261
262 switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
263 case ESRCH:
264
265 ret = 0;
266 goto bail_mqp;
267 case EEXIST:
268 ret = 0;
269 goto bail_mcast;
270 case ENOMEM:
271
272 ret = -ENOMEM;
273 goto bail_mqp;
274 case EINVAL:
275
276 ret = -EINVAL;
277 goto bail_mqp;
278 default:
279 break;
280 }
281
282 return 0;
283
284bail_mqp:
285 rvt_mcast_qp_free(mqp);
286
287bail_mcast:
288 rvt_mcast_free(mcast);
289
290 return ret;
291}
292
293
294
295
296
297
298
299
300
301int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
302{
303 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
304 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
305 struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
306 struct rvt_mcast *mcast = NULL;
307 struct rvt_mcast_qp *p, *tmp, *delp = NULL;
308 struct rb_node *n;
309 int last = 0;
310 int ret = 0;
311
312 if (ibqp->qp_num <= 1)
313 return -EINVAL;
314
315 spin_lock_irq(&ibp->lock);
316
317
318 n = ibp->mcast_tree.rb_node;
319 while (1) {
320 if (!n) {
321 spin_unlock_irq(&ibp->lock);
322 return -EINVAL;
323 }
324
325 mcast = rb_entry(n, struct rvt_mcast, rb_node);
326 ret = memcmp(gid->raw, mcast->mcast_addr.mgid.raw,
327 sizeof(*gid));
328 if (ret < 0) {
329 n = n->rb_left;
330 } else if (ret > 0) {
331 n = n->rb_right;
332 } else {
333
334 if (mcast->mcast_addr.lid != lid) {
335 spin_unlock_irq(&ibp->lock);
336 return -EINVAL;
337 }
338 break;
339 }
340 }
341
342
343 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
344 if (p->qp != qp)
345 continue;
346
347
348
349
350 list_del_rcu(&p->list);
351 mcast->n_attached--;
352 delp = p;
353
354
355 if (list_empty(&mcast->qp_list)) {
356 rb_erase(&mcast->rb_node, &ibp->mcast_tree);
357 last = 1;
358 }
359 break;
360 }
361
362 spin_unlock_irq(&ibp->lock);
363
364 if (!delp)
365 return -EINVAL;
366
367
368
369
370
371 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
372 rvt_mcast_qp_free(delp);
373
374 if (last) {
375 atomic_dec(&mcast->refcount);
376 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
377 rvt_mcast_free(mcast);
378 spin_lock_irq(&rdi->n_mcast_grps_lock);
379 rdi->n_mcast_grps_allocated--;
380 spin_unlock_irq(&rdi->n_mcast_grps_lock);
381 }
382
383 return 0;
384}
385
386
387
388
389
390
391
392int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
393{
394 int i;
395 int in_use = 0;
396
397 for (i = 0; i < rdi->dparms.nports; i++)
398 if (rdi->ports[i]->mcast_tree.rb_node)
399 in_use++;
400 return in_use;
401}
402