1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/string.h>
35#include <linux/etherdevice.h>
36
37#include <linux/mlx4/cmd.h>
38
39#include "mlx4.h"
40
41#define MGM_QPN_MASK 0x00FFFFFF
42#define MGM_BLCK_LB_BIT 30
43
44static const u8 zero_gid[16];
45
46static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
47 struct mlx4_cmd_mailbox *mailbox)
48{
49 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
50 MLX4_CMD_TIME_CLASS_A);
51}
52
53static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
54 struct mlx4_cmd_mailbox *mailbox)
55{
56 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
57 MLX4_CMD_TIME_CLASS_A);
58}
59
60static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
61 struct mlx4_cmd_mailbox *mailbox)
62{
63 u32 in_mod;
64
65 in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
66 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
67 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
68}
69
70static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
71 u16 *hash, u8 op_mod)
72{
73 u64 imm;
74 int err;
75
76 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
77 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
78
79 if (!err)
80 *hash = imm;
81
82 return err;
83}
84
85static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
86 enum mlx4_steer_type steer,
87 u32 qpn)
88{
89 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
90 struct mlx4_promisc_qp *pqp;
91
92 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
93 if (pqp->qpn == qpn)
94 return pqp;
95 }
96
97 return NULL;
98}
99
100
101
102
103
104static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
105 enum mlx4_steer_type steer,
106 unsigned int index, u32 qpn)
107{
108 struct mlx4_steer *s_steer;
109 struct mlx4_cmd_mailbox *mailbox;
110 struct mlx4_mgm *mgm;
111 u32 members_count;
112 struct mlx4_steer_index *new_entry;
113 struct mlx4_promisc_qp *pqp;
114 struct mlx4_promisc_qp *dqp = NULL;
115 u32 prot;
116 int err;
117 u8 pf_num;
118
119 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
120 s_steer = &mlx4_priv(dev)->steer[pf_num];
121 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
122 if (!new_entry)
123 return -ENOMEM;
124
125 INIT_LIST_HEAD(&new_entry->duplicates);
126 new_entry->index = index;
127 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
128
129
130
131
132 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
133 if (pqp) {
134 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
135 if (!dqp) {
136 err = -ENOMEM;
137 goto out_alloc;
138 }
139 dqp->qpn = qpn;
140 list_add_tail(&dqp->list, &new_entry->duplicates);
141 }
142
143
144 if (list_empty(&s_steer->promisc_qps[steer]))
145 return 0;
146
147
148
149
150 mailbox = mlx4_alloc_cmd_mailbox(dev);
151 if (IS_ERR(mailbox)) {
152 err = -ENOMEM;
153 goto out_alloc;
154 }
155 mgm = mailbox->buf;
156
157 err = mlx4_READ_ENTRY(dev, index, mailbox);
158 if (err)
159 goto out_mailbox;
160
161 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
162 prot = be32_to_cpu(mgm->members_count) >> 30;
163 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
164
165 if (pqp->qpn == qpn)
166 continue;
167 if (members_count == MLX4_QP_PER_MGM) {
168
169 err = -ENOMEM;
170 goto out_mailbox;
171 }
172
173
174 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
175 }
176
177 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
178 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
179
180out_mailbox:
181 mlx4_free_cmd_mailbox(dev, mailbox);
182 if (!err)
183 return 0;
184out_alloc:
185 if (dqp) {
186 list_del(&dqp->list);
187 kfree(dqp);
188 }
189 list_del(&new_entry->list);
190 kfree(new_entry);
191 return err;
192}
193
194
195static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
196 enum mlx4_steer_type steer,
197 unsigned int index, u32 qpn)
198{
199 struct mlx4_steer *s_steer;
200 struct mlx4_steer_index *tmp_entry, *entry = NULL;
201 struct mlx4_promisc_qp *pqp;
202 struct mlx4_promisc_qp *dqp;
203 u8 pf_num;
204
205 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
206 s_steer = &mlx4_priv(dev)->steer[pf_num];
207
208 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
209 if (!pqp)
210 return 0;
211
212 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
213 if (tmp_entry->index == index) {
214 entry = tmp_entry;
215 break;
216 }
217 }
218 if (unlikely(!entry)) {
219 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
220 return -EINVAL;
221 }
222
223
224
225
226 list_for_each_entry(dqp, &entry->duplicates, list) {
227 if (qpn == dqp->qpn)
228 return 0;
229 }
230
231
232 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
233 if (!dqp)
234 return -ENOMEM;
235 dqp->qpn = qpn;
236 list_add_tail(&dqp->list, &entry->duplicates);
237
238 return 0;
239}
240
241
242
243static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
244 enum mlx4_steer_type steer,
245 unsigned int index, u32 qpn)
246{
247 struct mlx4_steer *s_steer;
248 struct mlx4_steer_index *tmp_entry, *entry = NULL;
249 struct mlx4_promisc_qp *dqp, *tmp_dqp;
250 u8 pf_num;
251
252 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
253 s_steer = &mlx4_priv(dev)->steer[pf_num];
254
255
256 if (!get_promisc_qp(dev, pf_num, steer, qpn))
257 return false;
258
259
260
261 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
262 if (tmp_entry->index == index) {
263 entry = tmp_entry;
264 break;
265 }
266 }
267 if (unlikely(!entry)) {
268 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
269 return false;
270 }
271 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
272 if (dqp->qpn == qpn) {
273 list_del(&dqp->list);
274 kfree(dqp);
275 }
276 }
277 return true;
278}
279
280
281static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
282 enum mlx4_steer_type steer,
283 unsigned int index, u32 tqpn)
284{
285 struct mlx4_steer *s_steer;
286 struct mlx4_cmd_mailbox *mailbox;
287 struct mlx4_mgm *mgm;
288 struct mlx4_steer_index *entry = NULL, *tmp_entry;
289 u32 qpn;
290 u32 members_count;
291 bool ret = false;
292 int i;
293 u8 pf_num;
294
295 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
296 s_steer = &mlx4_priv(dev)->steer[pf_num];
297
298 mailbox = mlx4_alloc_cmd_mailbox(dev);
299 if (IS_ERR(mailbox))
300 return false;
301 mgm = mailbox->buf;
302
303 if (mlx4_READ_ENTRY(dev, index, mailbox))
304 goto out;
305 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
306 for (i = 0; i < members_count; i++) {
307 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
308 if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
309
310 goto out;
311 }
312 }
313
314
315 ret = true;
316 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
317 if (entry->index == index) {
318 if (list_empty(&entry->duplicates)) {
319 list_del(&entry->list);
320 kfree(entry);
321 } else {
322
323 ret = false;
324 goto out;
325 }
326 }
327 }
328
329out:
330 mlx4_free_cmd_mailbox(dev, mailbox);
331 return ret;
332}
333
334static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
335 enum mlx4_steer_type steer, u32 qpn)
336{
337 struct mlx4_steer *s_steer;
338 struct mlx4_cmd_mailbox *mailbox;
339 struct mlx4_mgm *mgm;
340 struct mlx4_steer_index *entry;
341 struct mlx4_promisc_qp *pqp;
342 struct mlx4_promisc_qp *dqp;
343 u32 members_count;
344 u32 prot;
345 int i;
346 bool found;
347 int last_index;
348 int err;
349 u8 pf_num;
350 struct mlx4_priv *priv = mlx4_priv(dev);
351 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
352 s_steer = &mlx4_priv(dev)->steer[pf_num];
353
354 mutex_lock(&priv->mcg_table.mutex);
355
356 if (get_promisc_qp(dev, pf_num, steer, qpn)) {
357 err = 0;
358 goto out_mutex;
359 }
360
361 pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
362 if (!pqp) {
363 err = -ENOMEM;
364 goto out_mutex;
365 }
366 pqp->qpn = qpn;
367
368 mailbox = mlx4_alloc_cmd_mailbox(dev);
369 if (IS_ERR(mailbox)) {
370 err = -ENOMEM;
371 goto out_alloc;
372 }
373 mgm = mailbox->buf;
374
375
376
377
378 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
379 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
380 if (err)
381 goto out_mailbox;
382
383 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
384 prot = be32_to_cpu(mgm->members_count) >> 30;
385 found = false;
386 for (i = 0; i < members_count; i++) {
387 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
388
389 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
390 if (!dqp)
391 goto out_mailbox;
392 dqp->qpn = qpn;
393 list_add_tail(&dqp->list, &entry->duplicates);
394 found = true;
395 }
396 }
397 if (!found) {
398
399 if (members_count == MLX4_QP_PER_MGM) {
400
401 err = -ENOMEM;
402 goto out_mailbox;
403 }
404 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
405 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
406 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
407 if (err)
408 goto out_mailbox;
409 }
410 last_index = entry->index;
411 }
412
413
414 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
415
416 memset(mgm, 0, sizeof *mgm);
417 members_count = 0;
418 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
419 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
420 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
421
422 err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
423 if (err)
424 goto out_list;
425
426 mlx4_free_cmd_mailbox(dev, mailbox);
427 mutex_unlock(&priv->mcg_table.mutex);
428 return 0;
429
430out_list:
431 list_del(&pqp->list);
432out_mailbox:
433 mlx4_free_cmd_mailbox(dev, mailbox);
434out_alloc:
435 kfree(pqp);
436out_mutex:
437 mutex_unlock(&priv->mcg_table.mutex);
438 return err;
439}
440
441static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
442 enum mlx4_steer_type steer, u32 qpn)
443{
444 struct mlx4_priv *priv = mlx4_priv(dev);
445 struct mlx4_steer *s_steer;
446 struct mlx4_cmd_mailbox *mailbox;
447 struct mlx4_mgm *mgm;
448 struct mlx4_steer_index *entry;
449 struct mlx4_promisc_qp *pqp;
450 struct mlx4_promisc_qp *dqp;
451 u32 members_count;
452 bool found;
453 bool back_to_list = false;
454 int loc, i;
455 int err;
456 u8 pf_num;
457
458 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
459 s_steer = &mlx4_priv(dev)->steer[pf_num];
460 mutex_lock(&priv->mcg_table.mutex);
461
462 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
463 if (unlikely(!pqp)) {
464 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
465
466 err = 0;
467 goto out_mutex;
468 }
469
470
471 list_del(&pqp->list);
472
473
474 mailbox = mlx4_alloc_cmd_mailbox(dev);
475 if (IS_ERR(mailbox)) {
476 err = -ENOMEM;
477 back_to_list = true;
478 goto out_list;
479 }
480 mgm = mailbox->buf;
481 members_count = 0;
482 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
483 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
484 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
485
486 err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
487 if (err)
488 goto out_mailbox;
489
490
491 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
492 found = false;
493 list_for_each_entry(dqp, &entry->duplicates, list) {
494 if (dqp->qpn == qpn) {
495 found = true;
496 break;
497 }
498 }
499 if (found) {
500
501
502 list_del(&dqp->list);
503 kfree(dqp);
504 } else {
505 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
506 if (err)
507 goto out_mailbox;
508 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
509 for (loc = -1, i = 0; i < members_count; ++i)
510 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
511 loc = i;
512
513 mgm->members_count = cpu_to_be32(--members_count |
514 (MLX4_PROT_ETH << 30));
515 mgm->qp[loc] = mgm->qp[i - 1];
516 mgm->qp[i - 1] = 0;
517
518 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
519 if (err)
520 goto out_mailbox;
521 }
522
523 }
524
525out_mailbox:
526 mlx4_free_cmd_mailbox(dev, mailbox);
527out_list:
528 if (back_to_list)
529 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
530 else
531 kfree(pqp);
532out_mutex:
533 mutex_unlock(&priv->mcg_table.mutex);
534 return err;
535}
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552static int find_entry(struct mlx4_dev *dev, u8 port,
553 u8 *gid, enum mlx4_protocol prot,
554 enum mlx4_steer_type steer,
555 struct mlx4_cmd_mailbox *mgm_mailbox,
556 u16 *hash, int *prev, int *index)
557{
558 struct mlx4_cmd_mailbox *mailbox;
559 struct mlx4_mgm *mgm = mgm_mailbox->buf;
560 u8 *mgid;
561 int err;
562 u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0;
563
564 mailbox = mlx4_alloc_cmd_mailbox(dev);
565 if (IS_ERR(mailbox))
566 return -ENOMEM;
567 mgid = mailbox->buf;
568
569 memcpy(mgid, gid, 16);
570
571 err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
572 mlx4_free_cmd_mailbox(dev, mailbox);
573 if (err)
574 return err;
575
576 if (0)
577 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
578
579 *index = *hash;
580 *prev = -1;
581
582 do {
583 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
584 if (err)
585 return err;
586
587 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
588 if (*index != *hash) {
589 mlx4_err(dev, "Found zero MGID in AMGM.\n");
590 err = -EINVAL;
591 }
592 return err;
593 }
594
595 if (!memcmp(mgm->gid, gid, 16) &&
596 be32_to_cpu(mgm->members_count) >> 30 == prot)
597 return err;
598
599 *prev = *index;
600 *index = be32_to_cpu(mgm->next_gid_index) >> 6;
601 } while (*index);
602
603 *index = -1;
604 return err;
605}
606
607int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
608 int block_mcast_loopback, enum mlx4_protocol prot,
609 enum mlx4_steer_type steer)
610{
611 struct mlx4_priv *priv = mlx4_priv(dev);
612 struct mlx4_cmd_mailbox *mailbox;
613 struct mlx4_mgm *mgm;
614 u32 members_count;
615 u16 hash;
616 int index, prev;
617 int link = 0;
618 int i;
619 int err;
620 u8 port = gid[5];
621 u8 new_entry = 0;
622
623 mailbox = mlx4_alloc_cmd_mailbox(dev);
624 if (IS_ERR(mailbox))
625 return PTR_ERR(mailbox);
626 mgm = mailbox->buf;
627
628 mutex_lock(&priv->mcg_table.mutex);
629 err = find_entry(dev, port, gid, prot, steer,
630 mailbox, &hash, &prev, &index);
631 if (err)
632 goto out;
633
634 if (index != -1) {
635 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
636 new_entry = 1;
637 memcpy(mgm->gid, gid, 16);
638 }
639 } else {
640 link = 1;
641
642 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
643 if (index == -1) {
644 mlx4_err(dev, "No AMGM entries left\n");
645 err = -ENOMEM;
646 goto out;
647 }
648 index += dev->caps.num_mgms;
649
650 memset(mgm, 0, sizeof *mgm);
651 memcpy(mgm->gid, gid, 16);
652 }
653
654 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
655 if (members_count == MLX4_QP_PER_MGM) {
656 mlx4_err(dev, "MGM at index %x is full.\n", index);
657 err = -ENOMEM;
658 goto out;
659 }
660
661 for (i = 0; i < members_count; ++i)
662 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
663 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
664 err = 0;
665 goto out;
666 }
667
668 if (block_mcast_loopback)
669 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
670 (1U << MGM_BLCK_LB_BIT));
671 else
672 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
673
674 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
675
676 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
677 if (err)
678 goto out;
679
680 if (!link)
681 goto out;
682
683 err = mlx4_READ_ENTRY(dev, prev, mailbox);
684 if (err)
685 goto out;
686
687 mgm->next_gid_index = cpu_to_be32(index << 6);
688
689 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
690 if (err)
691 goto out;
692
693out:
694 if (prot == MLX4_PROT_ETH) {
695
696 if (new_entry)
697 new_steering_entry(dev, 0, port, steer, index, qp->qpn);
698 else
699 existing_steering_entry(dev, 0, port, steer,
700 index, qp->qpn);
701 }
702 if (err && link && index != -1) {
703 if (index < dev->caps.num_mgms)
704 mlx4_warn(dev, "Got AMGM index %d < %d",
705 index, dev->caps.num_mgms);
706 else
707 mlx4_bitmap_free(&priv->mcg_table.bitmap,
708 index - dev->caps.num_mgms);
709 }
710 mutex_unlock(&priv->mcg_table.mutex);
711
712 mlx4_free_cmd_mailbox(dev, mailbox);
713 return err;
714}
715
716int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
717 enum mlx4_protocol prot, enum mlx4_steer_type steer)
718{
719 struct mlx4_priv *priv = mlx4_priv(dev);
720 struct mlx4_cmd_mailbox *mailbox;
721 struct mlx4_mgm *mgm;
722 u32 members_count;
723 u16 hash;
724 int prev, index;
725 int i, loc;
726 int err;
727 u8 port = gid[5];
728 bool removed_entry = false;
729
730 mailbox = mlx4_alloc_cmd_mailbox(dev);
731 if (IS_ERR(mailbox))
732 return PTR_ERR(mailbox);
733 mgm = mailbox->buf;
734
735 mutex_lock(&priv->mcg_table.mutex);
736
737 err = find_entry(dev, port, gid, prot, steer,
738 mailbox, &hash, &prev, &index);
739 if (err)
740 goto out;
741
742 if (index == -1) {
743 mlx4_err(dev, "MGID %pI6 not found\n", gid);
744 err = -EINVAL;
745 goto out;
746 }
747
748
749 if (prot == MLX4_PROT_ETH &&
750 check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
751 goto out;
752
753 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
754 for (loc = -1, i = 0; i < members_count; ++i)
755 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
756 loc = i;
757
758 if (loc == -1) {
759 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
760 err = -EINVAL;
761 goto out;
762 }
763
764
765 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
766 mgm->qp[loc] = mgm->qp[i - 1];
767 mgm->qp[i - 1] = 0;
768
769 if (prot == MLX4_PROT_ETH)
770 removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
771 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
772 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
773 goto out;
774 }
775
776
777 mgm->members_count = cpu_to_be32((u32) prot << 30);
778
779 if (prev == -1) {
780
781 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
782 if (amgm_index) {
783 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
784 if (err)
785 goto out;
786 } else
787 memset(mgm->gid, 0, 16);
788
789 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
790 if (err)
791 goto out;
792
793 if (amgm_index) {
794 if (amgm_index < dev->caps.num_mgms)
795 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
796 index, amgm_index, dev->caps.num_mgms);
797 else
798 mlx4_bitmap_free(&priv->mcg_table.bitmap,
799 amgm_index - dev->caps.num_mgms);
800 }
801 } else {
802
803 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
804 err = mlx4_READ_ENTRY(dev, prev, mailbox);
805 if (err)
806 goto out;
807
808 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
809
810 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
811 if (err)
812 goto out;
813
814 if (index < dev->caps.num_mgms)
815 mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
816 prev, index, dev->caps.num_mgms);
817 else
818 mlx4_bitmap_free(&priv->mcg_table.bitmap,
819 index - dev->caps.num_mgms);
820 }
821
822out:
823 mutex_unlock(&priv->mcg_table.mutex);
824
825 mlx4_free_cmd_mailbox(dev, mailbox);
826 return err;
827}
828
829
830int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
831 int block_mcast_loopback, enum mlx4_protocol prot)
832{
833 enum mlx4_steer_type steer;
834
835 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
836
837 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
838 return 0;
839
840 if (prot == MLX4_PROT_ETH)
841 gid[7] |= (steer << 1);
842
843 return mlx4_qp_attach_common(dev, qp, gid,
844 block_mcast_loopback, prot,
845 steer);
846}
847EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
848
849int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
850 enum mlx4_protocol prot)
851{
852 enum mlx4_steer_type steer;
853
854 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
855
856 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
857 return 0;
858
859 if (prot == MLX4_PROT_ETH) {
860 gid[7] |= (steer << 1);
861 }
862
863 return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
864}
865EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
866
867
868int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
869{
870 if (!dev->caps.vep_mc_steering)
871 return 0;
872
873
874 return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
875}
876EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
877
878int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
879{
880 if (!dev->caps.vep_mc_steering)
881 return 0;
882
883
884 return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
885}
886EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
887
888int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
889{
890 if (!dev->caps.vep_mc_steering)
891 return 0;
892
893
894 return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
895}
896EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
897
898int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
899{
900 if (!dev->caps.vep_mc_steering)
901 return 0;
902
903 return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
904}
905EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
906
907int mlx4_init_mcg_table(struct mlx4_dev *dev)
908{
909 struct mlx4_priv *priv = mlx4_priv(dev);
910 int err;
911
912 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
913 dev->caps.num_amgms - 1, 0, 0);
914 if (err)
915 return err;
916
917 mutex_init(&priv->mcg_table.mutex);
918
919 return 0;
920}
921
922void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
923{
924 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
925}
926