1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <rdma/ib_mad.h>
34
35#include <linux/mlx4/cmd.h>
36#include <linux/rbtree.h>
37#include <linux/idr.h>
38#include <rdma/ib_cm.h>
39
40#include "mlx4_ib.h"
41
42#define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
43
44struct id_map_entry {
45 struct rb_node node;
46
47 u32 sl_cm_id;
48 u32 pv_cm_id;
49 int slave_id;
50 int scheduled_delete;
51 struct mlx4_ib_dev *dev;
52
53 struct list_head list;
54 struct delayed_work timeout;
55};
56
57struct rej_tmout_entry {
58 int slave;
59 u32 rem_pv_cm_id;
60 struct delayed_work timeout;
61 struct xarray *xa_rej_tmout;
62};
63
64struct cm_generic_msg {
65 struct ib_mad_hdr hdr;
66
67 __be32 local_comm_id;
68 __be32 remote_comm_id;
69 unsigned char unused[2];
70 __be16 rej_reason;
71};
72
73struct cm_sidr_generic_msg {
74 struct ib_mad_hdr hdr;
75 __be32 request_id;
76};
77
78struct cm_req_msg {
79 unsigned char unused[0x60];
80 union ib_gid primary_path_sgid;
81};
82
83
84static void set_local_comm_id(struct ib_mad *mad, u32 cm_id)
85{
86 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
87 struct cm_sidr_generic_msg *msg =
88 (struct cm_sidr_generic_msg *)mad;
89 msg->request_id = cpu_to_be32(cm_id);
90 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
91 pr_err("trying to set local_comm_id in SIDR_REP\n");
92 return;
93 } else {
94 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
95 msg->local_comm_id = cpu_to_be32(cm_id);
96 }
97}
98
99static u32 get_local_comm_id(struct ib_mad *mad)
100{
101 if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
102 struct cm_sidr_generic_msg *msg =
103 (struct cm_sidr_generic_msg *)mad;
104 return be32_to_cpu(msg->request_id);
105 } else if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
106 pr_err("trying to set local_comm_id in SIDR_REP\n");
107 return -1;
108 } else {
109 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
110 return be32_to_cpu(msg->local_comm_id);
111 }
112}
113
114static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id)
115{
116 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
117 struct cm_sidr_generic_msg *msg =
118 (struct cm_sidr_generic_msg *)mad;
119 msg->request_id = cpu_to_be32(cm_id);
120 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
121 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
122 return;
123 } else {
124 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
125 msg->remote_comm_id = cpu_to_be32(cm_id);
126 }
127}
128
129static u32 get_remote_comm_id(struct ib_mad *mad)
130{
131 if (mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
132 struct cm_sidr_generic_msg *msg =
133 (struct cm_sidr_generic_msg *)mad;
134 return be32_to_cpu(msg->request_id);
135 } else if (mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
136 pr_err("trying to set remote_comm_id in SIDR_REQ\n");
137 return -1;
138 } else {
139 struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
140 return be32_to_cpu(msg->remote_comm_id);
141 }
142}
143
144static union ib_gid gid_from_req_msg(struct ib_device *ibdev, struct ib_mad *mad)
145{
146 struct cm_req_msg *msg = (struct cm_req_msg *)mad;
147
148 return msg->primary_path_sgid;
149}
150
151
152static struct id_map_entry *
153id_map_find_by_sl_id(struct ib_device *ibdev, u32 slave_id, u32 sl_cm_id)
154{
155 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
156 struct rb_node *node = sl_id_map->rb_node;
157
158 while (node) {
159 struct id_map_entry *id_map_entry =
160 rb_entry(node, struct id_map_entry, node);
161
162 if (id_map_entry->sl_cm_id > sl_cm_id)
163 node = node->rb_left;
164 else if (id_map_entry->sl_cm_id < sl_cm_id)
165 node = node->rb_right;
166 else if (id_map_entry->slave_id > slave_id)
167 node = node->rb_left;
168 else if (id_map_entry->slave_id < slave_id)
169 node = node->rb_right;
170 else
171 return id_map_entry;
172 }
173 return NULL;
174}
175
176static void id_map_ent_timeout(struct work_struct *work)
177{
178 struct delayed_work *delay = to_delayed_work(work);
179 struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
180 struct id_map_entry *found_ent;
181 struct mlx4_ib_dev *dev = ent->dev;
182 struct mlx4_ib_sriov *sriov = &dev->sriov;
183 struct rb_root *sl_id_map = &sriov->sl_id_map;
184
185 spin_lock(&sriov->id_map_lock);
186 if (!xa_erase(&sriov->pv_id_table, ent->pv_cm_id))
187 goto out;
188 found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
189 if (found_ent && found_ent == ent)
190 rb_erase(&found_ent->node, sl_id_map);
191
192out:
193 list_del(&ent->list);
194 spin_unlock(&sriov->id_map_lock);
195 kfree(ent);
196}
197
198static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
199{
200 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map;
201 struct rb_node **link = &sl_id_map->rb_node, *parent = NULL;
202 struct id_map_entry *ent;
203 int slave_id = new->slave_id;
204 int sl_cm_id = new->sl_cm_id;
205
206 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
207 if (ent) {
208 pr_debug("overriding existing sl_id_map entry (cm_id = %x)\n",
209 sl_cm_id);
210
211 rb_replace_node(&ent->node, &new->node, sl_id_map);
212 return;
213 }
214
215
216 while (*link) {
217 parent = *link;
218 ent = rb_entry(parent, struct id_map_entry, node);
219
220 if (ent->sl_cm_id > sl_cm_id || (ent->sl_cm_id == sl_cm_id && ent->slave_id > slave_id))
221 link = &(*link)->rb_left;
222 else
223 link = &(*link)->rb_right;
224 }
225
226 rb_link_node(&new->node, parent, link);
227 rb_insert_color(&new->node, sl_id_map);
228}
229
230static struct id_map_entry *
231id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
232{
233 int ret;
234 struct id_map_entry *ent;
235 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
236
237 ent = kmalloc(sizeof (struct id_map_entry), GFP_KERNEL);
238 if (!ent)
239 return ERR_PTR(-ENOMEM);
240
241 ent->sl_cm_id = sl_cm_id;
242 ent->slave_id = slave_id;
243 ent->scheduled_delete = 0;
244 ent->dev = to_mdev(ibdev);
245 INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
246
247 ret = xa_alloc_cyclic(&sriov->pv_id_table, &ent->pv_cm_id, ent,
248 xa_limit_32b, &sriov->pv_id_next, GFP_KERNEL);
249 if (ret >= 0) {
250 spin_lock(&sriov->id_map_lock);
251 sl_id_map_add(ibdev, ent);
252 list_add_tail(&ent->list, &sriov->cm_list);
253 spin_unlock(&sriov->id_map_lock);
254 return ent;
255 }
256
257
258 kfree(ent);
259 mlx4_ib_warn(ibdev, "Allocation failed (err:0x%x)\n", ret);
260 return ERR_PTR(-ENOMEM);
261}
262
263static struct id_map_entry *
264id_map_get(struct ib_device *ibdev, int *pv_cm_id, int slave_id, int sl_cm_id)
265{
266 struct id_map_entry *ent;
267 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
268
269 spin_lock(&sriov->id_map_lock);
270 if (*pv_cm_id == -1) {
271 ent = id_map_find_by_sl_id(ibdev, slave_id, sl_cm_id);
272 if (ent)
273 *pv_cm_id = (int) ent->pv_cm_id;
274 } else
275 ent = xa_load(&sriov->pv_id_table, *pv_cm_id);
276 spin_unlock(&sriov->id_map_lock);
277
278 return ent;
279}
280
281static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
282{
283 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
284 unsigned long flags;
285
286 spin_lock(&sriov->id_map_lock);
287 spin_lock_irqsave(&sriov->going_down_lock, flags);
288
289 if (!sriov->is_going_down && !id->scheduled_delete) {
290 id->scheduled_delete = 1;
291 schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
292 } else if (id->scheduled_delete) {
293
294 mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
295 }
296 spin_unlock_irqrestore(&sriov->going_down_lock, flags);
297 spin_unlock(&sriov->id_map_lock);
298}
299
300#define REJ_REASON(m) be16_to_cpu(((struct cm_generic_msg *)(m))->rej_reason)
301int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
302 struct ib_mad *mad)
303{
304 struct id_map_entry *id;
305 u32 sl_cm_id;
306 int pv_cm_id = -1;
307
308 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
309 mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
310 mad->mad_hdr.attr_id == CM_MRA_ATTR_ID ||
311 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID ||
312 (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID && REJ_REASON(mad) == IB_CM_REJ_TIMEOUT)) {
313 sl_cm_id = get_local_comm_id(mad);
314 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
315 if (id)
316 goto cont;
317 id = id_map_alloc(ibdev, slave_id, sl_cm_id);
318 if (IS_ERR(id)) {
319 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
320 __func__, slave_id, sl_cm_id);
321 return PTR_ERR(id);
322 }
323 } else if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID ||
324 mad->mad_hdr.attr_id == CM_SIDR_REP_ATTR_ID) {
325 return 0;
326 } else {
327 sl_cm_id = get_local_comm_id(mad);
328 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
329 }
330
331 if (!id) {
332 pr_debug("id{slave: %d, sl_cm_id: 0x%x} is NULL! attr_id: 0x%x\n",
333 slave_id, sl_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
334 return -EINVAL;
335 }
336
337cont:
338 set_local_comm_id(mad, id->pv_cm_id);
339
340 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
341 schedule_delayed(ibdev, id);
342 return 0;
343}
344
345static void rej_tmout_timeout(struct work_struct *work)
346{
347 struct delayed_work *delay = to_delayed_work(work);
348 struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout);
349 struct rej_tmout_entry *deleted;
350
351 deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0);
352
353 if (deleted != item)
354 pr_debug("deleted(%p) != item(%p)\n", deleted, item);
355
356 kfree(item);
357}
358
359static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave)
360{
361 struct rej_tmout_entry *item;
362 struct rej_tmout_entry *old;
363 int ret = 0;
364
365 xa_lock(&sriov->xa_rej_tmout);
366 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
367
368 if (item) {
369 if (xa_err(item))
370 ret = xa_err(item);
371 else
372
373 mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
374 goto err_or_exists;
375 }
376 xa_unlock(&sriov->xa_rej_tmout);
377
378 item = kmalloc(sizeof(*item), GFP_KERNEL);
379 if (!item)
380 return -ENOMEM;
381
382 INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout);
383 item->slave = slave;
384 item->rem_pv_cm_id = rem_pv_cm_id;
385 item->xa_rej_tmout = &sriov->xa_rej_tmout;
386
387 old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL);
388 if (old) {
389 pr_debug(
390 "Non-null old entry (%p) or error (%d) when inserting\n",
391 old, xa_err(old));
392 kfree(item);
393 return xa_err(old);
394 }
395
396 schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
397
398 return 0;
399
400err_or_exists:
401 xa_unlock(&sriov->xa_rej_tmout);
402 return ret;
403}
404
405static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id)
406{
407 struct rej_tmout_entry *item;
408 int slave;
409
410 xa_lock(&sriov->xa_rej_tmout);
411 item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
412
413 if (!item || xa_err(item)) {
414 pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n",
415 rem_pv_cm_id, xa_err(item));
416 slave = !item ? -ENOENT : xa_err(item);
417 } else {
418 slave = item->slave;
419 }
420 xa_unlock(&sriov->xa_rej_tmout);
421
422 return slave;
423}
424
425int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
426 struct ib_mad *mad)
427{
428 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
429 u32 rem_pv_cm_id = get_local_comm_id(mad);
430 u32 pv_cm_id;
431 struct id_map_entry *id;
432 int sts;
433
434 if (mad->mad_hdr.attr_id == CM_REQ_ATTR_ID ||
435 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
436 union ib_gid gid;
437
438 if (!slave)
439 return 0;
440
441 gid = gid_from_req_msg(ibdev, mad);
442 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
443 if (*slave < 0) {
444 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
445 be64_to_cpu(gid.global.interface_id));
446 return -ENOENT;
447 }
448
449 sts = alloc_rej_tmout(sriov, rem_pv_cm_id, *slave);
450 if (sts)
451
452 pr_debug("Could not allocate rej_tmout entry. rem_pv_cm_id 0x%x slave %d status %d\n",
453 rem_pv_cm_id, *slave, sts);
454
455 return 0;
456 }
457
458 pv_cm_id = get_remote_comm_id(mad);
459 id = id_map_get(ibdev, (int *)&pv_cm_id, -1, -1);
460
461 if (!id) {
462 if (mad->mad_hdr.attr_id == CM_REJ_ATTR_ID &&
463 REJ_REASON(mad) == IB_CM_REJ_TIMEOUT && slave) {
464 *slave = lookup_rej_tmout_slave(sriov, rem_pv_cm_id);
465
466 return (*slave < 0) ? *slave : 0;
467 }
468 pr_debug("Couldn't find an entry for pv_cm_id 0x%x, attr_id 0x%x\n",
469 pv_cm_id, be16_to_cpu(mad->mad_hdr.attr_id));
470 return -ENOENT;
471 }
472
473 if (slave)
474 *slave = id->slave_id;
475 set_remote_comm_id(mad, id->sl_cm_id);
476
477 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID ||
478 mad->mad_hdr.attr_id == CM_REJ_ATTR_ID)
479 schedule_delayed(ibdev, id);
480
481 return 0;
482}
483
484void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
485{
486 spin_lock_init(&dev->sriov.id_map_lock);
487 INIT_LIST_HEAD(&dev->sriov.cm_list);
488 dev->sriov.sl_id_map = RB_ROOT;
489 xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
490 xa_init(&dev->sriov.xa_rej_tmout);
491}
492
493static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
494{
495 struct rej_tmout_entry *item;
496 bool flush_needed = false;
497 unsigned long id;
498 int cnt = 0;
499
500 xa_lock(&sriov->xa_rej_tmout);
501 xa_for_each(&sriov->xa_rej_tmout, id, item) {
502 if (slave < 0 || slave == item->slave) {
503 mod_delayed_work(system_wq, &item->timeout, 0);
504 flush_needed = true;
505 ++cnt;
506 }
507 }
508 xa_unlock(&sriov->xa_rej_tmout);
509
510 if (flush_needed) {
511 flush_scheduled_work();
512 pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
513 cnt, slave);
514 }
515
516 if (slave < 0)
517 WARN_ON(!xa_empty(&sriov->xa_rej_tmout));
518}
519
520
521
522void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
523{
524 struct mlx4_ib_sriov *sriov = &dev->sriov;
525 struct rb_root *sl_id_map = &sriov->sl_id_map;
526 struct list_head lh;
527 struct rb_node *nd;
528 int need_flush = 0;
529 struct id_map_entry *map, *tmp_map;
530
531 INIT_LIST_HEAD(&lh);
532 spin_lock(&sriov->id_map_lock);
533 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
534 if (slave < 0 || slave == map->slave_id) {
535 if (map->scheduled_delete)
536 need_flush |= !cancel_delayed_work(&map->timeout);
537 }
538 }
539
540 spin_unlock(&sriov->id_map_lock);
541
542 if (need_flush)
543 flush_scheduled_work();
544
545
546 spin_lock(&sriov->id_map_lock);
547 if (slave < 0) {
548 while (rb_first(sl_id_map)) {
549 struct id_map_entry *ent =
550 rb_entry(rb_first(sl_id_map),
551 struct id_map_entry, node);
552
553 rb_erase(&ent->node, sl_id_map);
554 xa_erase(&sriov->pv_id_table, ent->pv_cm_id);
555 }
556 list_splice_init(&dev->sriov.cm_list, &lh);
557 } else {
558
559 nd = rb_first(sl_id_map);
560 while (nd) {
561 struct id_map_entry *ent =
562 rb_entry(nd, struct id_map_entry, node);
563 nd = rb_next(nd);
564 if (ent->slave_id == slave)
565 list_move_tail(&ent->list, &lh);
566 }
567
568 list_for_each_entry_safe(map, tmp_map, &lh, list) {
569 rb_erase(&map->node, sl_id_map);
570 xa_erase(&sriov->pv_id_table, map->pv_cm_id);
571 }
572
573
574 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) {
575 if (slave == map->slave_id)
576 list_move_tail(&map->list, &lh);
577 }
578 }
579
580 spin_unlock(&sriov->id_map_lock);
581
582
583 list_for_each_entry_safe(map, tmp_map, &lh, list) {
584 list_del(&map->list);
585 kfree(map);
586 }
587
588 rej_tmout_xa_cleanup(sriov, slave);
589}
590