1
2
3
4
5
6
7
8
9
10#include <linux/completion.h>
11#include <linux/dma-mapping.h>
12#include <linux/device.h>
13#include <linux/module.h>
14#include <linux/err.h>
15#include <linux/idr.h>
16#include <linux/interrupt.h>
17#include <linux/random.h>
18#include <linux/rbtree.h>
19#include <linux/spinlock.h>
20#include <linux/slab.h>
21#include <linux/sysfs.h>
22#include <linux/workqueue.h>
23#include <linux/kdev_t.h>
24#include <linux/etherdevice.h>
25
26#include <rdma/ib_cache.h>
27#include <rdma/ib_cm.h>
28#include "cm_msgs.h"
29#include "core_priv.h"
30
31MODULE_AUTHOR("Sean Hefty");
32MODULE_DESCRIPTION("InfiniBand CM");
33MODULE_LICENSE("Dual BSD/GPL");
34
35static const char * const ibcm_rej_reason_strs[] = {
36 [IB_CM_REJ_NO_QP] = "no QP",
37 [IB_CM_REJ_NO_EEC] = "no EEC",
38 [IB_CM_REJ_NO_RESOURCES] = "no resources",
39 [IB_CM_REJ_TIMEOUT] = "timeout",
40 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
41 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
42 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
43 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
44 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
45 [IB_CM_REJ_STALE_CONN] = "stale conn",
46 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
47 [IB_CM_REJ_INVALID_GID] = "invalid GID",
48 [IB_CM_REJ_INVALID_LID] = "invalid LID",
49 [IB_CM_REJ_INVALID_SL] = "invalid SL",
50 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
51 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
52 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
53 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
54 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
55 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
56 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
57 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
58 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
59 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
60 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
61 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
62 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
63 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
64 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
65 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
66 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
67 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
68 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
69};
70
71const char *__attribute_const__ ibcm_reject_msg(int reason)
72{
73 size_t index = reason;
74
75 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
76 ibcm_rej_reason_strs[index])
77 return ibcm_rej_reason_strs[index];
78 else
79 return "unrecognized reason";
80}
81EXPORT_SYMBOL(ibcm_reject_msg);
82
83struct cm_id_private;
84static void cm_add_one(struct ib_device *device);
85static void cm_remove_one(struct ib_device *device, void *client_data);
86static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
87 struct ib_cm_sidr_rep_param *param);
88static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
89 const void *private_data, u8 private_data_len);
90static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
91 void *private_data, u8 private_data_len);
92static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
93 enum ib_cm_rej_reason reason, void *ari,
94 u8 ari_length, const void *private_data,
95 u8 private_data_len);
96
97static struct ib_client cm_client = {
98 .name = "cm",
99 .add = cm_add_one,
100 .remove = cm_remove_one
101};
102
103static struct ib_cm {
104 spinlock_t lock;
105 struct list_head device_list;
106 rwlock_t device_lock;
107 struct rb_root listen_service_table;
108 u64 listen_service_id;
109
110 struct rb_root remote_qp_table;
111 struct rb_root remote_id_table;
112 struct rb_root remote_sidr_table;
113 struct xarray local_id_table;
114 u32 local_id_next;
115 __be32 random_id_operand;
116 struct list_head timewait_list;
117 struct workqueue_struct *wq;
118
119 spinlock_t state_lock;
120} cm;
121
122
123enum {
124 CM_REQ_COUNTER,
125 CM_MRA_COUNTER,
126 CM_REJ_COUNTER,
127 CM_REP_COUNTER,
128 CM_RTU_COUNTER,
129 CM_DREQ_COUNTER,
130 CM_DREP_COUNTER,
131 CM_SIDR_REQ_COUNTER,
132 CM_SIDR_REP_COUNTER,
133 CM_LAP_COUNTER,
134 CM_APR_COUNTER,
135 CM_ATTR_COUNT,
136 CM_ATTR_ID_OFFSET = 0x0010,
137};
138
139enum {
140 CM_XMIT,
141 CM_XMIT_RETRIES,
142 CM_RECV,
143 CM_RECV_DUPLICATES,
144 CM_COUNTER_GROUPS
145};
146
147static char const counter_group_names[CM_COUNTER_GROUPS]
148 [sizeof("cm_rx_duplicates")] = {
149 "cm_tx_msgs", "cm_tx_retries",
150 "cm_rx_msgs", "cm_rx_duplicates"
151};
152
153struct cm_counter_group {
154 struct kobject obj;
155 atomic_long_t counter[CM_ATTR_COUNT];
156};
157
158struct cm_counter_attribute {
159 struct attribute attr;
160 int index;
161};
162
163#define CM_COUNTER_ATTR(_name, _index) \
164struct cm_counter_attribute cm_##_name##_counter_attr = { \
165 .attr = { .name = __stringify(_name), .mode = 0444 }, \
166 .index = _index \
167}
168
169static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
170static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
171static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
172static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
173static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
174static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
175static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
176static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
177static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
178static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
179static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
180
181static struct attribute *cm_counter_default_attrs[] = {
182 &cm_req_counter_attr.attr,
183 &cm_mra_counter_attr.attr,
184 &cm_rej_counter_attr.attr,
185 &cm_rep_counter_attr.attr,
186 &cm_rtu_counter_attr.attr,
187 &cm_dreq_counter_attr.attr,
188 &cm_drep_counter_attr.attr,
189 &cm_sidr_req_counter_attr.attr,
190 &cm_sidr_rep_counter_attr.attr,
191 &cm_lap_counter_attr.attr,
192 &cm_apr_counter_attr.attr,
193 NULL
194};
195
196struct cm_port {
197 struct cm_device *cm_dev;
198 struct ib_mad_agent *mad_agent;
199 struct kobject port_obj;
200 u8 port_num;
201 struct list_head cm_priv_prim_list;
202 struct list_head cm_priv_altr_list;
203 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
204};
205
206struct cm_device {
207 struct list_head list;
208 struct ib_device *ib_device;
209 u8 ack_delay;
210 int going_down;
211 struct cm_port *port[];
212};
213
214struct cm_av {
215 struct cm_port *port;
216 union ib_gid dgid;
217 struct rdma_ah_attr ah_attr;
218 u16 pkey_index;
219 u8 timeout;
220};
221
222struct cm_work {
223 struct delayed_work work;
224 struct list_head list;
225 struct cm_port *port;
226 struct ib_mad_recv_wc *mad_recv_wc;
227 __be32 local_id;
228 __be32 remote_id;
229 struct ib_cm_event cm_event;
230 struct sa_path_rec path[];
231};
232
233struct cm_timewait_info {
234 struct cm_work work;
235 struct list_head list;
236 struct rb_node remote_qp_node;
237 struct rb_node remote_id_node;
238 __be64 remote_ca_guid;
239 __be32 remote_qpn;
240 u8 inserted_remote_qp;
241 u8 inserted_remote_id;
242};
243
244struct cm_id_private {
245 struct ib_cm_id id;
246
247 struct rb_node service_node;
248 struct rb_node sidr_id_node;
249 spinlock_t lock;
250 struct completion comp;
251 refcount_t refcount;
252
253
254 int listen_sharecount;
255 struct rcu_head rcu;
256
257 struct ib_mad_send_buf *msg;
258 struct cm_timewait_info *timewait_info;
259
260 struct cm_av av;
261 struct cm_av alt_av;
262
263 void *private_data;
264 __be64 tid;
265 __be32 local_qpn;
266 __be32 remote_qpn;
267 enum ib_qp_type qp_type;
268 __be32 sq_psn;
269 __be32 rq_psn;
270 int timeout_ms;
271 enum ib_mtu path_mtu;
272 __be16 pkey;
273 u8 private_data_len;
274 u8 max_cm_retries;
275 u8 responder_resources;
276 u8 initiator_depth;
277 u8 retry_count;
278 u8 rnr_retry_count;
279 u8 service_timeout;
280 u8 target_ack_delay;
281
282 struct list_head prim_list;
283 struct list_head altr_list;
284
285 int prim_send_port_not_ready;
286 int altr_send_port_not_ready;
287
288 struct list_head work_list;
289 atomic_t work_count;
290};
291
292static void cm_work_handler(struct work_struct *work);
293
294static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
295{
296 if (refcount_dec_and_test(&cm_id_priv->refcount))
297 complete(&cm_id_priv->comp);
298}
299
300static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
301 struct ib_mad_send_buf **msg)
302{
303 struct ib_mad_agent *mad_agent;
304 struct ib_mad_send_buf *m;
305 struct ib_ah *ah;
306 struct cm_av *av;
307 unsigned long flags, flags2;
308 int ret = 0;
309
310
311 spin_lock_irqsave(&cm.state_lock, flags2);
312 spin_lock_irqsave(&cm.lock, flags);
313 if (!cm_id_priv->prim_send_port_not_ready)
314 av = &cm_id_priv->av;
315 else if (!cm_id_priv->altr_send_port_not_ready &&
316 (cm_id_priv->alt_av.port))
317 av = &cm_id_priv->alt_av;
318 else {
319 pr_info("%s: not valid CM id\n", __func__);
320 ret = -ENODEV;
321 spin_unlock_irqrestore(&cm.lock, flags);
322 goto out;
323 }
324 spin_unlock_irqrestore(&cm.lock, flags);
325
326 mad_agent = cm_id_priv->av.port->mad_agent;
327 if (!mad_agent) {
328 pr_info("%s: not a valid MAD agent\n", __func__);
329 ret = -ENODEV;
330 goto out;
331 }
332 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
333 if (IS_ERR(ah)) {
334 ret = PTR_ERR(ah);
335 goto out;
336 }
337
338 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
339 av->pkey_index,
340 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
341 GFP_ATOMIC,
342 IB_MGMT_BASE_VERSION);
343 if (IS_ERR(m)) {
344 rdma_destroy_ah(ah, 0);
345 ret = PTR_ERR(m);
346 goto out;
347 }
348
349
350 m->ah = ah;
351 m->retries = cm_id_priv->max_cm_retries;
352
353 refcount_inc(&cm_id_priv->refcount);
354 m->context[0] = cm_id_priv;
355 *msg = m;
356
357out:
358 spin_unlock_irqrestore(&cm.state_lock, flags2);
359 return ret;
360}
361
362static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
363 struct ib_mad_recv_wc *mad_recv_wc)
364{
365 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
366 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
367 GFP_ATOMIC,
368 IB_MGMT_BASE_VERSION);
369}
370
371static int cm_create_response_msg_ah(struct cm_port *port,
372 struct ib_mad_recv_wc *mad_recv_wc,
373 struct ib_mad_send_buf *msg)
374{
375 struct ib_ah *ah;
376
377 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
378 mad_recv_wc->recv_buf.grh, port->port_num);
379 if (IS_ERR(ah))
380 return PTR_ERR(ah);
381
382 msg->ah = ah;
383 return 0;
384}
385
386static void cm_free_msg(struct ib_mad_send_buf *msg)
387{
388 if (msg->ah)
389 rdma_destroy_ah(msg->ah, 0);
390 if (msg->context[0])
391 cm_deref_id(msg->context[0]);
392 ib_free_send_mad(msg);
393}
394
395static int cm_alloc_response_msg(struct cm_port *port,
396 struct ib_mad_recv_wc *mad_recv_wc,
397 struct ib_mad_send_buf **msg)
398{
399 struct ib_mad_send_buf *m;
400 int ret;
401
402 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
403 if (IS_ERR(m))
404 return PTR_ERR(m);
405
406 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
407 if (ret) {
408 cm_free_msg(m);
409 return ret;
410 }
411
412 *msg = m;
413 return 0;
414}
415
416static void * cm_copy_private_data(const void *private_data,
417 u8 private_data_len)
418{
419 void *data;
420
421 if (!private_data || !private_data_len)
422 return NULL;
423
424 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
425 if (!data)
426 return ERR_PTR(-ENOMEM);
427
428 return data;
429}
430
431static void cm_set_private_data(struct cm_id_private *cm_id_priv,
432 void *private_data, u8 private_data_len)
433{
434 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
435 kfree(cm_id_priv->private_data);
436
437 cm_id_priv->private_data = private_data;
438 cm_id_priv->private_data_len = private_data_len;
439}
440
441static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
442 struct ib_grh *grh, struct cm_av *av)
443{
444 struct rdma_ah_attr new_ah_attr;
445 int ret;
446
447 av->port = port;
448 av->pkey_index = wc->pkey_index;
449
450
451
452
453
454
455
456
457 ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
458 port->port_num, wc,
459 grh, &new_ah_attr);
460 if (ret)
461 return ret;
462
463 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
464 return 0;
465}
466
467static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
468 struct ib_grh *grh, struct cm_av *av)
469{
470 av->port = port;
471 av->pkey_index = wc->pkey_index;
472 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
473 port->port_num, wc,
474 grh, &av->ah_attr);
475}
476
477static int add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
478 struct cm_av *av,
479 struct cm_port *port)
480{
481 unsigned long flags;
482 int ret = 0;
483
484 spin_lock_irqsave(&cm.lock, flags);
485
486 if (&cm_id_priv->av == av)
487 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
488 else if (&cm_id_priv->alt_av == av)
489 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
490 else
491 ret = -EINVAL;
492
493 spin_unlock_irqrestore(&cm.lock, flags);
494 return ret;
495}
496
497static struct cm_port *
498get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
499{
500 struct cm_device *cm_dev;
501 struct cm_port *port = NULL;
502 unsigned long flags;
503
504 if (attr) {
505 read_lock_irqsave(&cm.device_lock, flags);
506 list_for_each_entry(cm_dev, &cm.device_list, list) {
507 if (cm_dev->ib_device == attr->device) {
508 port = cm_dev->port[attr->port_num - 1];
509 break;
510 }
511 }
512 read_unlock_irqrestore(&cm.device_lock, flags);
513 } else {
514
515
516
517
518
519
520 read_lock_irqsave(&cm.device_lock, flags);
521 list_for_each_entry(cm_dev, &cm.device_list, list) {
522 attr = rdma_find_gid(cm_dev->ib_device,
523 &path->sgid,
524 sa_conv_pathrec_to_gid_type(path),
525 NULL);
526 if (!IS_ERR(attr)) {
527 port = cm_dev->port[attr->port_num - 1];
528 break;
529 }
530 }
531 read_unlock_irqrestore(&cm.device_lock, flags);
532 if (port)
533 rdma_put_gid_attr(attr);
534 }
535 return port;
536}
537
538static int cm_init_av_by_path(struct sa_path_rec *path,
539 const struct ib_gid_attr *sgid_attr,
540 struct cm_av *av,
541 struct cm_id_private *cm_id_priv)
542{
543 struct rdma_ah_attr new_ah_attr;
544 struct cm_device *cm_dev;
545 struct cm_port *port;
546 int ret;
547
548 port = get_cm_port_from_path(path, sgid_attr);
549 if (!port)
550 return -EINVAL;
551 cm_dev = port->cm_dev;
552
553 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
554 be16_to_cpu(path->pkey), &av->pkey_index);
555 if (ret)
556 return ret;
557
558 av->port = port;
559
560
561
562
563
564
565
566
567
568
569 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
570 &new_ah_attr, sgid_attr);
571 if (ret)
572 return ret;
573
574 av->timeout = path->packet_life_time + 1;
575
576 ret = add_cm_id_to_port_list(cm_id_priv, av, port);
577 if (ret) {
578 rdma_destroy_ah_attr(&new_ah_attr);
579 return ret;
580 }
581 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
582 return 0;
583}
584
585static u32 cm_local_id(__be32 local_id)
586{
587 return (__force u32) (local_id ^ cm.random_id_operand);
588}
589
590static void cm_free_id(__be32 local_id)
591{
592 xa_erase_irq(&cm.local_id_table, cm_local_id(local_id));
593}
594
595static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
596{
597 struct cm_id_private *cm_id_priv;
598
599 rcu_read_lock();
600 cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
601 if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
602 !refcount_inc_not_zero(&cm_id_priv->refcount))
603 cm_id_priv = NULL;
604 rcu_read_unlock();
605
606 return cm_id_priv;
607}
608
609
610
611
612
613
614static int be32_lt(__be32 a, __be32 b)
615{
616 return (__force u32) a < (__force u32) b;
617}
618
619static int be32_gt(__be32 a, __be32 b)
620{
621 return (__force u32) a > (__force u32) b;
622}
623
624static int be64_lt(__be64 a, __be64 b)
625{
626 return (__force u64) a < (__force u64) b;
627}
628
629static int be64_gt(__be64 a, __be64 b)
630{
631 return (__force u64) a > (__force u64) b;
632}
633
634
635
636
637
638
639static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
640 ib_cm_handler shared_handler)
641{
642 struct rb_node **link = &cm.listen_service_table.rb_node;
643 struct rb_node *parent = NULL;
644 struct cm_id_private *cur_cm_id_priv;
645 __be64 service_id = cm_id_priv->id.service_id;
646 __be64 service_mask = cm_id_priv->id.service_mask;
647 unsigned long flags;
648
649 spin_lock_irqsave(&cm.lock, flags);
650 while (*link) {
651 parent = *link;
652 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
653 service_node);
654 if ((cur_cm_id_priv->id.service_mask & service_id) ==
655 (service_mask & cur_cm_id_priv->id.service_id) &&
656 (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
657
658
659
660
661 if (cur_cm_id_priv->id.cm_handler != shared_handler ||
662 cur_cm_id_priv->id.context ||
663 WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
664 spin_unlock_irqrestore(&cm.lock, flags);
665 return NULL;
666 }
667 refcount_inc(&cur_cm_id_priv->refcount);
668 cur_cm_id_priv->listen_sharecount++;
669 spin_unlock_irqrestore(&cm.lock, flags);
670 return cur_cm_id_priv;
671 }
672
673 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
674 link = &(*link)->rb_left;
675 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
676 link = &(*link)->rb_right;
677 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
678 link = &(*link)->rb_left;
679 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
680 link = &(*link)->rb_right;
681 else
682 link = &(*link)->rb_right;
683 }
684 cm_id_priv->listen_sharecount++;
685 rb_link_node(&cm_id_priv->service_node, parent, link);
686 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
687 spin_unlock_irqrestore(&cm.lock, flags);
688 return cm_id_priv;
689}
690
691static struct cm_id_private * cm_find_listen(struct ib_device *device,
692 __be64 service_id)
693{
694 struct rb_node *node = cm.listen_service_table.rb_node;
695 struct cm_id_private *cm_id_priv;
696
697 while (node) {
698 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
699 if ((cm_id_priv->id.service_mask & service_id) ==
700 cm_id_priv->id.service_id &&
701 (cm_id_priv->id.device == device))
702 return cm_id_priv;
703
704 if (device < cm_id_priv->id.device)
705 node = node->rb_left;
706 else if (device > cm_id_priv->id.device)
707 node = node->rb_right;
708 else if (be64_lt(service_id, cm_id_priv->id.service_id))
709 node = node->rb_left;
710 else if (be64_gt(service_id, cm_id_priv->id.service_id))
711 node = node->rb_right;
712 else
713 node = node->rb_right;
714 }
715 return NULL;
716}
717
718static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
719 *timewait_info)
720{
721 struct rb_node **link = &cm.remote_id_table.rb_node;
722 struct rb_node *parent = NULL;
723 struct cm_timewait_info *cur_timewait_info;
724 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
725 __be32 remote_id = timewait_info->work.remote_id;
726
727 while (*link) {
728 parent = *link;
729 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
730 remote_id_node);
731 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
732 link = &(*link)->rb_left;
733 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
734 link = &(*link)->rb_right;
735 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
736 link = &(*link)->rb_left;
737 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
738 link = &(*link)->rb_right;
739 else
740 return cur_timewait_info;
741 }
742 timewait_info->inserted_remote_id = 1;
743 rb_link_node(&timewait_info->remote_id_node, parent, link);
744 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
745 return NULL;
746}
747
748static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
749 __be32 remote_id)
750{
751 struct rb_node *node = cm.remote_id_table.rb_node;
752 struct cm_timewait_info *timewait_info;
753
754 while (node) {
755 timewait_info = rb_entry(node, struct cm_timewait_info,
756 remote_id_node);
757 if (be32_lt(remote_id, timewait_info->work.remote_id))
758 node = node->rb_left;
759 else if (be32_gt(remote_id, timewait_info->work.remote_id))
760 node = node->rb_right;
761 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
762 node = node->rb_left;
763 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
764 node = node->rb_right;
765 else
766 return timewait_info;
767 }
768 return NULL;
769}
770
771static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
772 *timewait_info)
773{
774 struct rb_node **link = &cm.remote_qp_table.rb_node;
775 struct rb_node *parent = NULL;
776 struct cm_timewait_info *cur_timewait_info;
777 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
778 __be32 remote_qpn = timewait_info->remote_qpn;
779
780 while (*link) {
781 parent = *link;
782 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
783 remote_qp_node);
784 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
785 link = &(*link)->rb_left;
786 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
787 link = &(*link)->rb_right;
788 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
789 link = &(*link)->rb_left;
790 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
791 link = &(*link)->rb_right;
792 else
793 return cur_timewait_info;
794 }
795 timewait_info->inserted_remote_qp = 1;
796 rb_link_node(&timewait_info->remote_qp_node, parent, link);
797 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
798 return NULL;
799}
800
801static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
802 *cm_id_priv)
803{
804 struct rb_node **link = &cm.remote_sidr_table.rb_node;
805 struct rb_node *parent = NULL;
806 struct cm_id_private *cur_cm_id_priv;
807 union ib_gid *port_gid = &cm_id_priv->av.dgid;
808 __be32 remote_id = cm_id_priv->id.remote_id;
809
810 while (*link) {
811 parent = *link;
812 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
813 sidr_id_node);
814 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
815 link = &(*link)->rb_left;
816 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
817 link = &(*link)->rb_right;
818 else {
819 int cmp;
820 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
821 sizeof *port_gid);
822 if (cmp < 0)
823 link = &(*link)->rb_left;
824 else if (cmp > 0)
825 link = &(*link)->rb_right;
826 else
827 return cur_cm_id_priv;
828 }
829 }
830 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
831 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
832 return NULL;
833}
834
835static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
836 ib_cm_handler cm_handler,
837 void *context)
838{
839 struct cm_id_private *cm_id_priv;
840 u32 id;
841 int ret;
842
843 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
844 if (!cm_id_priv)
845 return ERR_PTR(-ENOMEM);
846
847 cm_id_priv->id.state = IB_CM_IDLE;
848 cm_id_priv->id.device = device;
849 cm_id_priv->id.cm_handler = cm_handler;
850 cm_id_priv->id.context = context;
851 cm_id_priv->id.remote_cm_qpn = 1;
852
853 RB_CLEAR_NODE(&cm_id_priv->service_node);
854 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
855 spin_lock_init(&cm_id_priv->lock);
856 init_completion(&cm_id_priv->comp);
857 INIT_LIST_HEAD(&cm_id_priv->work_list);
858 INIT_LIST_HEAD(&cm_id_priv->prim_list);
859 INIT_LIST_HEAD(&cm_id_priv->altr_list);
860 atomic_set(&cm_id_priv->work_count, -1);
861 refcount_set(&cm_id_priv->refcount, 1);
862
863 ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
864 &cm.local_id_next, GFP_KERNEL);
865 if (ret < 0)
866 goto error;
867 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
868
869 return cm_id_priv;
870
871error:
872 kfree(cm_id_priv);
873 return ERR_PTR(ret);
874}
875
876
877
878
879
880static void cm_finalize_id(struct cm_id_private *cm_id_priv)
881{
882 xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
883 cm_id_priv, GFP_KERNEL);
884}
885
886struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
887 ib_cm_handler cm_handler,
888 void *context)
889{
890 struct cm_id_private *cm_id_priv;
891
892 cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
893 if (IS_ERR(cm_id_priv))
894 return ERR_CAST(cm_id_priv);
895
896 cm_finalize_id(cm_id_priv);
897 return &cm_id_priv->id;
898}
899EXPORT_SYMBOL(ib_create_cm_id);
900
901static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
902{
903 struct cm_work *work;
904
905 if (list_empty(&cm_id_priv->work_list))
906 return NULL;
907
908 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
909 list_del(&work->list);
910 return work;
911}
912
913static void cm_free_work(struct cm_work *work)
914{
915 if (work->mad_recv_wc)
916 ib_free_recv_mad(work->mad_recv_wc);
917 kfree(work);
918}
919
920static inline int cm_convert_to_ms(int iba_time)
921{
922
923 return 1 << max(iba_time - 8, 0);
924}
925
926
927
928
929
930
931
932static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
933{
934 int ack_timeout = packet_life_time + 1;
935
936 if (ack_timeout >= ca_ack_delay)
937 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
938 else
939 ack_timeout = ca_ack_delay +
940 (ack_timeout >= (ca_ack_delay - 1));
941
942 return min(31, ack_timeout);
943}
944
945static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
946{
947 if (timewait_info->inserted_remote_id) {
948 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
949 timewait_info->inserted_remote_id = 0;
950 }
951
952 if (timewait_info->inserted_remote_qp) {
953 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
954 timewait_info->inserted_remote_qp = 0;
955 }
956}
957
958static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
959{
960 struct cm_timewait_info *timewait_info;
961
962 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
963 if (!timewait_info)
964 return ERR_PTR(-ENOMEM);
965
966 timewait_info->work.local_id = local_id;
967 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
968 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
969 return timewait_info;
970}
971
972static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
973{
974 int wait_time;
975 unsigned long flags;
976 struct cm_device *cm_dev;
977
978 lockdep_assert_held(&cm_id_priv->lock);
979
980 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
981 if (!cm_dev)
982 return;
983
984 spin_lock_irqsave(&cm.lock, flags);
985 cm_cleanup_timewait(cm_id_priv->timewait_info);
986 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
987 spin_unlock_irqrestore(&cm.lock, flags);
988
989
990
991
992
993
994 cm_id_priv->id.state = IB_CM_TIMEWAIT;
995 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
996
997
998 spin_lock_irqsave(&cm.lock, flags);
999 if (!cm_dev->going_down)
1000 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
1001 msecs_to_jiffies(wait_time));
1002 spin_unlock_irqrestore(&cm.lock, flags);
1003
1004 cm_id_priv->timewait_info = NULL;
1005}
1006
1007static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1008{
1009 unsigned long flags;
1010
1011 lockdep_assert_held(&cm_id_priv->lock);
1012
1013 cm_id_priv->id.state = IB_CM_IDLE;
1014 if (cm_id_priv->timewait_info) {
1015 spin_lock_irqsave(&cm.lock, flags);
1016 cm_cleanup_timewait(cm_id_priv->timewait_info);
1017 spin_unlock_irqrestore(&cm.lock, flags);
1018 kfree(cm_id_priv->timewait_info);
1019 cm_id_priv->timewait_info = NULL;
1020 }
1021}
1022
1023static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1024{
1025 struct cm_id_private *cm_id_priv;
1026 struct cm_work *work;
1027
1028 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1029 spin_lock_irq(&cm_id_priv->lock);
1030retest:
1031 switch (cm_id->state) {
1032 case IB_CM_LISTEN:
1033 spin_lock(&cm.lock);
1034 if (--cm_id_priv->listen_sharecount > 0) {
1035
1036 WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1037 spin_unlock(&cm.lock);
1038 spin_unlock_irq(&cm_id_priv->lock);
1039 cm_deref_id(cm_id_priv);
1040 return;
1041 }
1042 cm_id->state = IB_CM_IDLE;
1043 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1044 RB_CLEAR_NODE(&cm_id_priv->service_node);
1045 spin_unlock(&cm.lock);
1046 break;
1047 case IB_CM_SIDR_REQ_SENT:
1048 cm_id->state = IB_CM_IDLE;
1049 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1050 break;
1051 case IB_CM_SIDR_REQ_RCVD:
1052 cm_send_sidr_rep_locked(cm_id_priv,
1053 &(struct ib_cm_sidr_rep_param){
1054 .status = IB_SIDR_REJECT });
1055
1056 cm_id->state = IB_CM_IDLE;
1057 break;
1058 case IB_CM_REQ_SENT:
1059 case IB_CM_MRA_REQ_RCVD:
1060 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1061 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
1062 &cm_id_priv->id.device->node_guid,
1063 sizeof(cm_id_priv->id.device->node_guid),
1064 NULL, 0);
1065 break;
1066 case IB_CM_REQ_RCVD:
1067 if (err == -ENOMEM) {
1068
1069 cm_reset_to_idle(cm_id_priv);
1070 } else {
1071 cm_send_rej_locked(cm_id_priv,
1072 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1073 NULL, 0);
1074 }
1075 break;
1076 case IB_CM_REP_SENT:
1077 case IB_CM_MRA_REP_RCVD:
1078 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1079
1080 case IB_CM_MRA_REQ_SENT:
1081 case IB_CM_REP_RCVD:
1082 case IB_CM_MRA_REP_SENT:
1083 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1084 0, NULL, 0);
1085 break;
1086 case IB_CM_ESTABLISHED:
1087 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1088 cm_id->state = IB_CM_IDLE;
1089 break;
1090 }
1091 cm_send_dreq_locked(cm_id_priv, NULL, 0);
1092 goto retest;
1093 case IB_CM_DREQ_SENT:
1094 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1095 cm_enter_timewait(cm_id_priv);
1096 goto retest;
1097 case IB_CM_DREQ_RCVD:
1098 cm_send_drep_locked(cm_id_priv, NULL, 0);
1099 WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1100 goto retest;
1101 case IB_CM_TIMEWAIT:
1102
1103
1104
1105
1106
1107 cm_id->state = IB_CM_IDLE;
1108 break;
1109 case IB_CM_IDLE:
1110 break;
1111 }
1112 WARN_ON(cm_id->state != IB_CM_IDLE);
1113
1114 spin_lock(&cm.lock);
1115
1116 if (cm_id_priv->timewait_info) {
1117 cm_cleanup_timewait(cm_id_priv->timewait_info);
1118 kfree(cm_id_priv->timewait_info);
1119 cm_id_priv->timewait_info = NULL;
1120 }
1121 if (!list_empty(&cm_id_priv->altr_list) &&
1122 (!cm_id_priv->altr_send_port_not_ready))
1123 list_del(&cm_id_priv->altr_list);
1124 if (!list_empty(&cm_id_priv->prim_list) &&
1125 (!cm_id_priv->prim_send_port_not_ready))
1126 list_del(&cm_id_priv->prim_list);
1127 WARN_ON(cm_id_priv->listen_sharecount);
1128 WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1129 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1130 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
1131 spin_unlock(&cm.lock);
1132 spin_unlock_irq(&cm_id_priv->lock);
1133
1134 cm_free_id(cm_id->local_id);
1135 cm_deref_id(cm_id_priv);
1136 wait_for_completion(&cm_id_priv->comp);
1137 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1138 cm_free_work(work);
1139
1140 rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
1141 rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
1142 kfree(cm_id_priv->private_data);
1143 kfree_rcu(cm_id_priv, rcu);
1144}
1145
1146void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1147{
1148 cm_destroy_id(cm_id, 0);
1149}
1150EXPORT_SYMBOL(ib_destroy_cm_id);
1151
1152static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
1153 __be64 service_mask)
1154{
1155 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1156 service_id &= service_mask;
1157 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1158 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1159 return -EINVAL;
1160
1161 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1162 cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1163 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1164 } else {
1165 cm_id_priv->id.service_id = service_id;
1166 cm_id_priv->id.service_mask = service_mask;
1167 }
1168 return 0;
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1185{
1186 struct cm_id_private *cm_id_priv =
1187 container_of(cm_id, struct cm_id_private, id);
1188 unsigned long flags;
1189 int ret;
1190
1191 spin_lock_irqsave(&cm_id_priv->lock, flags);
1192 if (cm_id_priv->id.state != IB_CM_IDLE) {
1193 ret = -EINVAL;
1194 goto out;
1195 }
1196
1197 ret = cm_init_listen(cm_id_priv, service_id, service_mask);
1198 if (ret)
1199 goto out;
1200
1201 if (!cm_insert_listen(cm_id_priv, NULL)) {
1202 ret = -EBUSY;
1203 goto out;
1204 }
1205
1206 cm_id_priv->id.state = IB_CM_LISTEN;
1207 ret = 0;
1208
1209out:
1210 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1211 return ret;
1212}
1213EXPORT_SYMBOL(ib_cm_listen);
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1232 ib_cm_handler cm_handler,
1233 __be64 service_id)
1234{
1235 struct cm_id_private *listen_id_priv;
1236 struct cm_id_private *cm_id_priv;
1237 int err = 0;
1238
1239
1240 cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1241 if (IS_ERR(cm_id_priv))
1242 return ERR_CAST(cm_id_priv);
1243
1244 err = cm_init_listen(cm_id_priv, service_id, 0);
1245 if (err)
1246 return ERR_PTR(err);
1247
1248 spin_lock_irq(&cm_id_priv->lock);
1249 listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
1250 if (listen_id_priv != cm_id_priv) {
1251 spin_unlock_irq(&cm_id_priv->lock);
1252 ib_destroy_cm_id(&cm_id_priv->id);
1253 if (!listen_id_priv)
1254 return ERR_PTR(-EINVAL);
1255 return &listen_id_priv->id;
1256 }
1257 cm_id_priv->id.state = IB_CM_LISTEN;
1258 spin_unlock_irq(&cm_id_priv->lock);
1259
1260
1261
1262
1263
1264
1265
1266 return &cm_id_priv->id;
1267}
1268EXPORT_SYMBOL(ib_cm_insert_listen);
1269
1270static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1271{
1272 u64 hi_tid, low_tid;
1273
1274 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1275 low_tid = (u64)cm_id_priv->id.local_id;
1276 return cpu_to_be64(hi_tid | low_tid);
1277}
1278
1279static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1280 __be16 attr_id, __be64 tid)
1281{
1282 hdr->base_version = IB_MGMT_BASE_VERSION;
1283 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1284 hdr->class_version = IB_CM_CLASS_VERSION;
1285 hdr->method = IB_MGMT_METHOD_SEND;
1286 hdr->attr_id = attr_id;
1287 hdr->tid = tid;
1288}
1289
1290static void cm_format_req(struct cm_req_msg *req_msg,
1291 struct cm_id_private *cm_id_priv,
1292 struct ib_cm_req_param *param)
1293{
1294 struct sa_path_rec *pri_path = param->primary_path;
1295 struct sa_path_rec *alt_path = param->alternate_path;
1296 bool pri_ext = false;
1297
1298 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1299 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1300 pri_path->opa.slid);
1301
1302 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1303 cm_form_tid(cm_id_priv));
1304
1305 IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1306 be32_to_cpu(cm_id_priv->id.local_id));
1307 IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1308 IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1309 be64_to_cpu(cm_id_priv->id.device->node_guid));
1310 IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1311 IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1312 IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1313 param->remote_cm_response_timeout);
1314 cm_req_set_qp_type(req_msg, param->qp_type);
1315 IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1316 IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1317 IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1318 param->local_cm_response_timeout);
1319 IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1320 be16_to_cpu(param->primary_path->pkey));
1321 IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1322 param->primary_path->mtu);
1323 IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
1324
1325 if (param->qp_type != IB_QPT_XRC_INI) {
1326 IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1327 param->responder_resources);
1328 IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1329 IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1330 param->rnr_retry_count);
1331 IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
1332 }
1333
1334 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1335 pri_path->sgid;
1336 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1337 pri_path->dgid;
1338 if (pri_ext) {
1339 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1340 ->global.interface_id =
1341 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1342 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1343 ->global.interface_id =
1344 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1345 }
1346 if (pri_path->hop_limit <= 1) {
1347 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1348 be16_to_cpu(pri_ext ? 0 :
1349 htons(ntohl(sa_path_get_slid(
1350 pri_path)))));
1351 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1352 be16_to_cpu(pri_ext ? 0 :
1353 htons(ntohl(sa_path_get_dlid(
1354 pri_path)))));
1355 } else {
1356
1357 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1358 be16_to_cpu(IB_LID_PERMISSIVE));
1359 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1360 be16_to_cpu(IB_LID_PERMISSIVE));
1361 }
1362 IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1363 be32_to_cpu(pri_path->flow_label));
1364 IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1365 IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1366 IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1367 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1368 IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1369 (pri_path->hop_limit <= 1));
1370 IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
1371 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1372 pri_path->packet_life_time));
1373
1374 if (alt_path) {
1375 bool alt_ext = false;
1376
1377 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1378 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1379 alt_path->opa.slid);
1380
1381 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1382 alt_path->sgid;
1383 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1384 alt_path->dgid;
1385 if (alt_ext) {
1386 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1387 req_msg)
1388 ->global.interface_id =
1389 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1390 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1391 req_msg)
1392 ->global.interface_id =
1393 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1394 }
1395 if (alt_path->hop_limit <= 1) {
1396 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1397 be16_to_cpu(
1398 alt_ext ? 0 :
1399 htons(ntohl(sa_path_get_slid(
1400 alt_path)))));
1401 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1402 be16_to_cpu(
1403 alt_ext ? 0 :
1404 htons(ntohl(sa_path_get_dlid(
1405 alt_path)))));
1406 } else {
1407 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1408 be16_to_cpu(IB_LID_PERMISSIVE));
1409 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1410 be16_to_cpu(IB_LID_PERMISSIVE));
1411 }
1412 IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1413 be32_to_cpu(alt_path->flow_label));
1414 IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1415 IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1416 alt_path->traffic_class);
1417 IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1418 alt_path->hop_limit);
1419 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1420 IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1421 (alt_path->hop_limit <= 1));
1422 IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
1423 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1424 alt_path->packet_life_time));
1425 }
1426
1427 if (param->private_data && param->private_data_len)
1428 IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1429 param->private_data_len);
1430}
1431
1432static int cm_validate_req_param(struct ib_cm_req_param *param)
1433{
1434 if (!param->primary_path)
1435 return -EINVAL;
1436
1437 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1438 param->qp_type != IB_QPT_XRC_INI)
1439 return -EINVAL;
1440
1441 if (param->private_data &&
1442 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1443 return -EINVAL;
1444
1445 if (param->alternate_path &&
1446 (param->alternate_path->pkey != param->primary_path->pkey ||
1447 param->alternate_path->mtu != param->primary_path->mtu))
1448 return -EINVAL;
1449
1450 return 0;
1451}
1452
1453int ib_send_cm_req(struct ib_cm_id *cm_id,
1454 struct ib_cm_req_param *param)
1455{
1456 struct cm_id_private *cm_id_priv;
1457 struct cm_req_msg *req_msg;
1458 unsigned long flags;
1459 int ret;
1460
1461 ret = cm_validate_req_param(param);
1462 if (ret)
1463 return ret;
1464
1465
1466 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1467 spin_lock_irqsave(&cm_id_priv->lock, flags);
1468 if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
1469 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1470 ret = -EINVAL;
1471 goto out;
1472 }
1473 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1474
1475 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1476 id.local_id);
1477 if (IS_ERR(cm_id_priv->timewait_info)) {
1478 ret = PTR_ERR(cm_id_priv->timewait_info);
1479 goto out;
1480 }
1481
1482 ret = cm_init_av_by_path(param->primary_path,
1483 param->ppath_sgid_attr, &cm_id_priv->av,
1484 cm_id_priv);
1485 if (ret)
1486 goto out;
1487 if (param->alternate_path) {
1488 ret = cm_init_av_by_path(param->alternate_path, NULL,
1489 &cm_id_priv->alt_av, cm_id_priv);
1490 if (ret)
1491 goto out;
1492 }
1493 cm_id->service_id = param->service_id;
1494 cm_id->service_mask = ~cpu_to_be64(0);
1495 cm_id_priv->timeout_ms = cm_convert_to_ms(
1496 param->primary_path->packet_life_time) * 2 +
1497 cm_convert_to_ms(
1498 param->remote_cm_response_timeout);
1499 cm_id_priv->max_cm_retries = param->max_cm_retries;
1500 cm_id_priv->initiator_depth = param->initiator_depth;
1501 cm_id_priv->responder_resources = param->responder_resources;
1502 cm_id_priv->retry_count = param->retry_count;
1503 cm_id_priv->path_mtu = param->primary_path->mtu;
1504 cm_id_priv->pkey = param->primary_path->pkey;
1505 cm_id_priv->qp_type = param->qp_type;
1506
1507 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1508 if (ret)
1509 goto out;
1510
1511 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1512 cm_format_req(req_msg, cm_id_priv, param);
1513 cm_id_priv->tid = req_msg->hdr.tid;
1514 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1515 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1516
1517 cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1518 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
1519
1520 spin_lock_irqsave(&cm_id_priv->lock, flags);
1521 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1522 if (ret) {
1523 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1524 goto error2;
1525 }
1526 BUG_ON(cm_id->state != IB_CM_IDLE);
1527 cm_id->state = IB_CM_REQ_SENT;
1528 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1529 return 0;
1530
1531error2: cm_free_msg(cm_id_priv->msg);
1532out: return ret;
1533}
1534EXPORT_SYMBOL(ib_send_cm_req);
1535
1536static int cm_issue_rej(struct cm_port *port,
1537 struct ib_mad_recv_wc *mad_recv_wc,
1538 enum ib_cm_rej_reason reason,
1539 enum cm_msg_response msg_rejected,
1540 void *ari, u8 ari_length)
1541{
1542 struct ib_mad_send_buf *msg = NULL;
1543 struct cm_rej_msg *rej_msg, *rcv_msg;
1544 int ret;
1545
1546 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1547 if (ret)
1548 return ret;
1549
1550
1551 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1552 rej_msg = (struct cm_rej_msg *) msg->mad;
1553
1554 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1555 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1556 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1557 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1558 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1559 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1560 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1561
1562 if (ari && ari_length) {
1563 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1564 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1565 }
1566
1567 ret = ib_post_send_mad(msg, NULL);
1568 if (ret)
1569 cm_free_msg(msg);
1570
1571 return ret;
1572}
1573
1574static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1575{
1576 return ((cpu_to_be16(
1577 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1578 (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1579 req_msg))));
1580}
1581
1582static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
1583 struct sa_path_rec *path, union ib_gid *gid)
1584{
1585 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1586 path->rec_type = SA_PATH_REC_TYPE_OPA;
1587 else
1588 path->rec_type = SA_PATH_REC_TYPE_IB;
1589}
1590
1591static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1592 struct sa_path_rec *primary_path,
1593 struct sa_path_rec *alt_path)
1594{
1595 u32 lid;
1596
1597 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1598 sa_path_set_dlid(primary_path,
1599 IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
1600 req_msg));
1601 sa_path_set_slid(primary_path,
1602 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1603 req_msg));
1604 } else {
1605 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1606 CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
1607 sa_path_set_dlid(primary_path, lid);
1608
1609 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1610 CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
1611 sa_path_set_slid(primary_path, lid);
1612 }
1613
1614 if (!cm_req_has_alt_path(req_msg))
1615 return;
1616
1617 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1618 sa_path_set_dlid(alt_path,
1619 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1620 req_msg));
1621 sa_path_set_slid(alt_path,
1622 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1623 req_msg));
1624 } else {
1625 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1626 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
1627 sa_path_set_dlid(alt_path, lid);
1628
1629 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1630 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
1631 sa_path_set_slid(alt_path, lid);
1632 }
1633}
1634
1635static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1636 struct sa_path_rec *primary_path,
1637 struct sa_path_rec *alt_path)
1638{
1639 primary_path->dgid =
1640 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1641 primary_path->sgid =
1642 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1643 primary_path->flow_label =
1644 cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1645 primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1646 primary_path->traffic_class =
1647 IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
1648 primary_path->reversible = 1;
1649 primary_path->pkey =
1650 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1651 primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
1652 primary_path->mtu_selector = IB_SA_EQ;
1653 primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1654 primary_path->rate_selector = IB_SA_EQ;
1655 primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
1656 primary_path->packet_life_time_selector = IB_SA_EQ;
1657 primary_path->packet_life_time =
1658 IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
1659 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1660 primary_path->service_id =
1661 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1662 if (sa_path_is_roce(primary_path))
1663 primary_path->roce.route_resolved = false;
1664
1665 if (cm_req_has_alt_path(req_msg)) {
1666 alt_path->dgid = *IBA_GET_MEM_PTR(
1667 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1668 alt_path->sgid = *IBA_GET_MEM_PTR(
1669 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1670 alt_path->flow_label = cpu_to_be32(
1671 IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1672 alt_path->hop_limit =
1673 IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1674 alt_path->traffic_class =
1675 IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
1676 alt_path->reversible = 1;
1677 alt_path->pkey =
1678 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1679 alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
1680 alt_path->mtu_selector = IB_SA_EQ;
1681 alt_path->mtu =
1682 IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1683 alt_path->rate_selector = IB_SA_EQ;
1684 alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
1685 alt_path->packet_life_time_selector = IB_SA_EQ;
1686 alt_path->packet_life_time =
1687 IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
1688 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1689 alt_path->service_id =
1690 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1691
1692 if (sa_path_is_roce(alt_path))
1693 alt_path->roce.route_resolved = false;
1694 }
1695 cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1696}
1697
1698static u16 cm_get_bth_pkey(struct cm_work *work)
1699{
1700 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1701 u8 port_num = work->port->port_num;
1702 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1703 u16 pkey;
1704 int ret;
1705
1706 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1707 if (ret) {
1708 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1709 port_num, pkey_index, ret);
1710 return 0;
1711 }
1712
1713 return pkey;
1714}
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725static void cm_opa_to_ib_sgid(struct cm_work *work,
1726 struct sa_path_rec *path)
1727{
1728 struct ib_device *dev = work->port->cm_dev->ib_device;
1729 u8 port_num = work->port->port_num;
1730
1731 if (rdma_cap_opa_ah(dev, port_num) &&
1732 (ib_is_opa_gid(&path->sgid))) {
1733 union ib_gid sgid;
1734
1735 if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1736 dev_warn(&dev->dev,
1737 "Error updating sgid in CM request\n");
1738 return;
1739 }
1740
1741 path->sgid = sgid;
1742 }
1743}
1744
1745static void cm_format_req_event(struct cm_work *work,
1746 struct cm_id_private *cm_id_priv,
1747 struct ib_cm_id *listen_id)
1748{
1749 struct cm_req_msg *req_msg;
1750 struct ib_cm_req_event_param *param;
1751
1752 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1753 param = &work->cm_event.param.req_rcvd;
1754 param->listen_id = listen_id;
1755 param->bth_pkey = cm_get_bth_pkey(work);
1756 param->port = cm_id_priv->av.port->port_num;
1757 param->primary_path = &work->path[0];
1758 cm_opa_to_ib_sgid(work, param->primary_path);
1759 if (cm_req_has_alt_path(req_msg)) {
1760 param->alternate_path = &work->path[1];
1761 cm_opa_to_ib_sgid(work, param->alternate_path);
1762 } else {
1763 param->alternate_path = NULL;
1764 }
1765 param->remote_ca_guid =
1766 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1767 param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1768 param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
1769 param->qp_type = cm_req_get_qp_type(req_msg);
1770 param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1771 param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1772 param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
1773 param->local_cm_response_timeout =
1774 IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1775 param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
1776 param->remote_cm_response_timeout =
1777 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1778 param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1779 param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1780 param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
1781 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1782 work->cm_event.private_data =
1783 IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
1784}
1785
1786static void cm_process_work(struct cm_id_private *cm_id_priv,
1787 struct cm_work *work)
1788{
1789 int ret;
1790
1791
1792 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1793 cm_free_work(work);
1794
1795 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1796 spin_lock_irq(&cm_id_priv->lock);
1797 work = cm_dequeue_work(cm_id_priv);
1798 spin_unlock_irq(&cm_id_priv->lock);
1799 if (!work)
1800 return;
1801
1802 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1803 &work->cm_event);
1804 cm_free_work(work);
1805 }
1806 cm_deref_id(cm_id_priv);
1807 if (ret)
1808 cm_destroy_id(&cm_id_priv->id, ret);
1809}
1810
1811static void cm_format_mra(struct cm_mra_msg *mra_msg,
1812 struct cm_id_private *cm_id_priv,
1813 enum cm_msg_response msg_mraed, u8 service_timeout,
1814 const void *private_data, u8 private_data_len)
1815{
1816 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1817 IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1818 IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1819 be32_to_cpu(cm_id_priv->id.local_id));
1820 IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1821 be32_to_cpu(cm_id_priv->id.remote_id));
1822 IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
1823
1824 if (private_data && private_data_len)
1825 IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1826 private_data_len);
1827}
1828
1829static void cm_format_rej(struct cm_rej_msg *rej_msg,
1830 struct cm_id_private *cm_id_priv,
1831 enum ib_cm_rej_reason reason, void *ari,
1832 u8 ari_length, const void *private_data,
1833 u8 private_data_len, enum ib_cm_state state)
1834{
1835 lockdep_assert_held(&cm_id_priv->lock);
1836
1837 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1838 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1839 be32_to_cpu(cm_id_priv->id.remote_id));
1840
1841 switch (state) {
1842 case IB_CM_REQ_RCVD:
1843 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1844 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1845 break;
1846 case IB_CM_MRA_REQ_SENT:
1847 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1848 be32_to_cpu(cm_id_priv->id.local_id));
1849 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1850 break;
1851 case IB_CM_REP_RCVD:
1852 case IB_CM_MRA_REP_SENT:
1853 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1854 be32_to_cpu(cm_id_priv->id.local_id));
1855 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
1856 break;
1857 default:
1858 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1859 be32_to_cpu(cm_id_priv->id.local_id));
1860 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1861 CM_MSG_RESPONSE_OTHER);
1862 break;
1863 }
1864
1865 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1866 if (ari && ari_length) {
1867 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1868 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1869 }
1870
1871 if (private_data && private_data_len)
1872 IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1873 private_data_len);
1874}
1875
1876static void cm_dup_req_handler(struct cm_work *work,
1877 struct cm_id_private *cm_id_priv)
1878{
1879 struct ib_mad_send_buf *msg = NULL;
1880 int ret;
1881
1882 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1883 counter[CM_REQ_COUNTER]);
1884
1885
1886 spin_lock_irq(&cm_id_priv->lock);
1887 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1888 spin_unlock_irq(&cm_id_priv->lock);
1889 return;
1890 }
1891 spin_unlock_irq(&cm_id_priv->lock);
1892
1893 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1894 if (ret)
1895 return;
1896
1897 spin_lock_irq(&cm_id_priv->lock);
1898 switch (cm_id_priv->id.state) {
1899 case IB_CM_MRA_REQ_SENT:
1900 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1901 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1902 cm_id_priv->private_data,
1903 cm_id_priv->private_data_len);
1904 break;
1905 case IB_CM_TIMEWAIT:
1906 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
1907 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
1908 IB_CM_TIMEWAIT);
1909 break;
1910 default:
1911 goto unlock;
1912 }
1913 spin_unlock_irq(&cm_id_priv->lock);
1914
1915 ret = ib_post_send_mad(msg, NULL);
1916 if (ret)
1917 goto free;
1918 return;
1919
1920unlock: spin_unlock_irq(&cm_id_priv->lock);
1921free: cm_free_msg(msg);
1922}
1923
1924static struct cm_id_private * cm_match_req(struct cm_work *work,
1925 struct cm_id_private *cm_id_priv)
1926{
1927 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1928 struct cm_timewait_info *timewait_info;
1929 struct cm_req_msg *req_msg;
1930 struct ib_cm_id *cm_id;
1931
1932 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1933
1934
1935 spin_lock_irq(&cm.lock);
1936 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1937 if (timewait_info) {
1938 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1939 timewait_info->work.remote_id);
1940 spin_unlock_irq(&cm.lock);
1941 if (cur_cm_id_priv) {
1942 cm_dup_req_handler(work, cur_cm_id_priv);
1943 cm_deref_id(cur_cm_id_priv);
1944 }
1945 return NULL;
1946 }
1947
1948
1949 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1950 if (timewait_info) {
1951 cm_cleanup_timewait(cm_id_priv->timewait_info);
1952 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1953 timewait_info->work.remote_id);
1954
1955 spin_unlock_irq(&cm.lock);
1956 cm_issue_rej(work->port, work->mad_recv_wc,
1957 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1958 NULL, 0);
1959 if (cur_cm_id_priv) {
1960 cm_id = &cur_cm_id_priv->id;
1961 ib_send_cm_dreq(cm_id, NULL, 0);
1962 cm_deref_id(cur_cm_id_priv);
1963 }
1964 return NULL;
1965 }
1966
1967
1968 listen_cm_id_priv = cm_find_listen(
1969 cm_id_priv->id.device,
1970 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
1971 if (!listen_cm_id_priv) {
1972 cm_cleanup_timewait(cm_id_priv->timewait_info);
1973 spin_unlock_irq(&cm.lock);
1974 cm_issue_rej(work->port, work->mad_recv_wc,
1975 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1976 NULL, 0);
1977 return NULL;
1978 }
1979 refcount_inc(&listen_cm_id_priv->refcount);
1980 spin_unlock_irq(&cm.lock);
1981 return listen_cm_id_priv;
1982}
1983
1984
1985
1986
1987
1988
1989static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1990{
1991 if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
1992 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
1993 req_msg)) == IB_LID_PERMISSIVE) {
1994 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1995 be16_to_cpu(ib_lid_be16(wc->slid)));
1996 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
1997 }
1998
1999 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2000 req_msg)) == IB_LID_PERMISSIVE)
2001 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2002 wc->dlid_path_bits);
2003 }
2004
2005 if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2006 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2007 req_msg)) == IB_LID_PERMISSIVE) {
2008 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2009 be16_to_cpu(ib_lid_be16(wc->slid)));
2010 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
2011 }
2012
2013 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2014 req_msg)) == IB_LID_PERMISSIVE)
2015 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2016 wc->dlid_path_bits);
2017 }
2018}
2019
2020static int cm_req_handler(struct cm_work *work)
2021{
2022 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
2023 struct cm_req_msg *req_msg;
2024 const struct ib_global_route *grh;
2025 const struct ib_gid_attr *gid_attr;
2026 int ret;
2027
2028 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
2029
2030 cm_id_priv =
2031 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
2032 if (IS_ERR(cm_id_priv))
2033 return PTR_ERR(cm_id_priv);
2034
2035 cm_id_priv->id.remote_id =
2036 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2037 cm_id_priv->id.service_id =
2038 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
2039 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
2040 cm_id_priv->tid = req_msg->hdr.tid;
2041 cm_id_priv->timeout_ms = cm_convert_to_ms(
2042 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2043 cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2044 cm_id_priv->remote_qpn =
2045 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2046 cm_id_priv->initiator_depth =
2047 IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2048 cm_id_priv->responder_resources =
2049 IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2050 cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2051 cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2052 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2053 cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2054 cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2055 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2056
2057 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2058 work->mad_recv_wc->recv_buf.grh,
2059 &cm_id_priv->av);
2060 if (ret)
2061 goto destroy;
2062 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
2063 id.local_id);
2064 if (IS_ERR(cm_id_priv->timewait_info)) {
2065 ret = PTR_ERR(cm_id_priv->timewait_info);
2066 goto destroy;
2067 }
2068 cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2069 cm_id_priv->timewait_info->remote_ca_guid =
2070 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2071 cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2072
2073
2074
2075
2076
2077 cm_id_priv->id.state = IB_CM_REQ_RCVD;
2078
2079 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
2080 if (!listen_cm_id_priv) {
2081 pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
2082 be32_to_cpu(cm_id_priv->id.local_id));
2083 cm_id_priv->id.state = IB_CM_IDLE;
2084 ret = -EINVAL;
2085 goto destroy;
2086 }
2087
2088 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
2089
2090 memset(&work->path[0], 0, sizeof(work->path[0]));
2091 if (cm_req_has_alt_path(req_msg))
2092 memset(&work->path[1], 0, sizeof(work->path[1]));
2093 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
2094 gid_attr = grh->sgid_attr;
2095
2096 if (gid_attr &&
2097 rdma_protocol_roce(work->port->cm_dev->ib_device,
2098 work->port->port_num)) {
2099 work->path[0].rec_type =
2100 sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
2101 } else {
2102 cm_path_set_rec_type(
2103 work->port->cm_dev->ib_device, work->port->port_num,
2104 &work->path[0],
2105 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2106 req_msg));
2107 }
2108 if (cm_req_has_alt_path(req_msg))
2109 work->path[1].rec_type = work->path[0].rec_type;
2110 cm_format_paths_from_req(req_msg, &work->path[0],
2111 &work->path[1]);
2112 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2113 sa_path_set_dmac(&work->path[0],
2114 cm_id_priv->av.ah_attr.roce.dmac);
2115 work->path[0].hop_limit = grh->hop_limit;
2116 ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
2117 cm_id_priv);
2118 if (ret) {
2119 int err;
2120
2121 err = rdma_query_gid(work->port->cm_dev->ib_device,
2122 work->port->port_num, 0,
2123 &work->path[0].sgid);
2124 if (err)
2125 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2126 NULL, 0, NULL, 0);
2127 else
2128 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2129 &work->path[0].sgid,
2130 sizeof(work->path[0].sgid),
2131 NULL, 0);
2132 goto rejected;
2133 }
2134 if (cm_req_has_alt_path(req_msg)) {
2135 ret = cm_init_av_by_path(&work->path[1], NULL,
2136 &cm_id_priv->alt_av, cm_id_priv);
2137 if (ret) {
2138 ib_send_cm_rej(&cm_id_priv->id,
2139 IB_CM_REJ_INVALID_ALT_GID,
2140 &work->path[0].sgid,
2141 sizeof(work->path[0].sgid), NULL, 0);
2142 goto rejected;
2143 }
2144 }
2145
2146 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2147 cm_id_priv->id.context = listen_cm_id_priv->id.context;
2148 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2149
2150
2151 spin_lock_irq(&cm_id_priv->lock);
2152 cm_finalize_id(cm_id_priv);
2153
2154
2155 refcount_inc(&cm_id_priv->refcount);
2156 atomic_inc(&cm_id_priv->work_count);
2157 spin_unlock_irq(&cm_id_priv->lock);
2158 cm_process_work(cm_id_priv, work);
2159
2160
2161
2162
2163
2164
2165 cm_deref_id(listen_cm_id_priv);
2166 return 0;
2167
2168rejected:
2169 cm_deref_id(listen_cm_id_priv);
2170destroy:
2171 ib_destroy_cm_id(&cm_id_priv->id);
2172 return ret;
2173}
2174
2175static void cm_format_rep(struct cm_rep_msg *rep_msg,
2176 struct cm_id_private *cm_id_priv,
2177 struct ib_cm_rep_param *param)
2178{
2179 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
2180 IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2181 be32_to_cpu(cm_id_priv->id.local_id));
2182 IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2183 be32_to_cpu(cm_id_priv->id.remote_id));
2184 IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2185 IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2186 param->responder_resources);
2187 IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2188 cm_id_priv->av.port->cm_dev->ack_delay);
2189 IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2190 IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2191 IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2192 be64_to_cpu(cm_id_priv->id.device->node_guid));
2193
2194 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2195 IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2196 param->initiator_depth);
2197 IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2198 param->flow_control);
2199 IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2200 IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
2201 } else {
2202 IBA_SET(CM_REP_SRQ, rep_msg, 1);
2203 IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
2204 }
2205
2206 if (param->private_data && param->private_data_len)
2207 IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2208 param->private_data_len);
2209}
2210
2211int ib_send_cm_rep(struct ib_cm_id *cm_id,
2212 struct ib_cm_rep_param *param)
2213{
2214 struct cm_id_private *cm_id_priv;
2215 struct ib_mad_send_buf *msg;
2216 struct cm_rep_msg *rep_msg;
2217 unsigned long flags;
2218 int ret;
2219
2220 if (param->private_data &&
2221 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2222 return -EINVAL;
2223
2224 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2225 spin_lock_irqsave(&cm_id_priv->lock, flags);
2226 if (cm_id->state != IB_CM_REQ_RCVD &&
2227 cm_id->state != IB_CM_MRA_REQ_SENT) {
2228 pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
2229 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2230 ret = -EINVAL;
2231 goto out;
2232 }
2233
2234 ret = cm_alloc_msg(cm_id_priv, &msg);
2235 if (ret)
2236 goto out;
2237
2238 rep_msg = (struct cm_rep_msg *) msg->mad;
2239 cm_format_rep(rep_msg, cm_id_priv, param);
2240 msg->timeout_ms = cm_id_priv->timeout_ms;
2241 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2242
2243 ret = ib_post_send_mad(msg, NULL);
2244 if (ret) {
2245 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2246 cm_free_msg(msg);
2247 return ret;
2248 }
2249
2250 cm_id->state = IB_CM_REP_SENT;
2251 cm_id_priv->msg = msg;
2252 cm_id_priv->initiator_depth = param->initiator_depth;
2253 cm_id_priv->responder_resources = param->responder_resources;
2254 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2255 WARN_ONCE(param->qp_num & 0xFF000000,
2256 "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2257 param->qp_num);
2258 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2259
2260out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2261 return ret;
2262}
2263EXPORT_SYMBOL(ib_send_cm_rep);
2264
2265static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2266 struct cm_id_private *cm_id_priv,
2267 const void *private_data,
2268 u8 private_data_len)
2269{
2270 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2271 IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2272 be32_to_cpu(cm_id_priv->id.local_id));
2273 IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2274 be32_to_cpu(cm_id_priv->id.remote_id));
2275
2276 if (private_data && private_data_len)
2277 IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2278 private_data_len);
2279}
2280
2281int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2282 const void *private_data,
2283 u8 private_data_len)
2284{
2285 struct cm_id_private *cm_id_priv;
2286 struct ib_mad_send_buf *msg;
2287 unsigned long flags;
2288 void *data;
2289 int ret;
2290
2291 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2292 return -EINVAL;
2293
2294 data = cm_copy_private_data(private_data, private_data_len);
2295 if (IS_ERR(data))
2296 return PTR_ERR(data);
2297
2298 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2299 spin_lock_irqsave(&cm_id_priv->lock, flags);
2300 if (cm_id->state != IB_CM_REP_RCVD &&
2301 cm_id->state != IB_CM_MRA_REP_SENT) {
2302 pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
2303 be32_to_cpu(cm_id->local_id), cm_id->state);
2304 ret = -EINVAL;
2305 goto error;
2306 }
2307
2308 ret = cm_alloc_msg(cm_id_priv, &msg);
2309 if (ret)
2310 goto error;
2311
2312 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2313 private_data, private_data_len);
2314
2315 ret = ib_post_send_mad(msg, NULL);
2316 if (ret) {
2317 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2318 cm_free_msg(msg);
2319 kfree(data);
2320 return ret;
2321 }
2322
2323 cm_id->state = IB_CM_ESTABLISHED;
2324 cm_set_private_data(cm_id_priv, data, private_data_len);
2325 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2326 return 0;
2327
2328error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2329 kfree(data);
2330 return ret;
2331}
2332EXPORT_SYMBOL(ib_send_cm_rtu);
2333
2334static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2335{
2336 struct cm_rep_msg *rep_msg;
2337 struct ib_cm_rep_event_param *param;
2338
2339 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2340 param = &work->cm_event.param.rep_rcvd;
2341 param->remote_ca_guid =
2342 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2343 param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
2344 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2345 param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2346 param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2347 param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2348 param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2349 param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2350 param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2351 param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2352 param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2353 work->cm_event.private_data =
2354 IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
2355}
2356
2357static void cm_dup_rep_handler(struct cm_work *work)
2358{
2359 struct cm_id_private *cm_id_priv;
2360 struct cm_rep_msg *rep_msg;
2361 struct ib_mad_send_buf *msg = NULL;
2362 int ret;
2363
2364 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2365 cm_id_priv = cm_acquire_id(
2366 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2367 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
2368 if (!cm_id_priv)
2369 return;
2370
2371 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2372 counter[CM_REP_COUNTER]);
2373 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2374 if (ret)
2375 goto deref;
2376
2377 spin_lock_irq(&cm_id_priv->lock);
2378 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2379 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2380 cm_id_priv->private_data,
2381 cm_id_priv->private_data_len);
2382 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2383 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2384 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2385 cm_id_priv->private_data,
2386 cm_id_priv->private_data_len);
2387 else
2388 goto unlock;
2389 spin_unlock_irq(&cm_id_priv->lock);
2390
2391 ret = ib_post_send_mad(msg, NULL);
2392 if (ret)
2393 goto free;
2394 goto deref;
2395
2396unlock: spin_unlock_irq(&cm_id_priv->lock);
2397free: cm_free_msg(msg);
2398deref: cm_deref_id(cm_id_priv);
2399}
2400
2401static int cm_rep_handler(struct cm_work *work)
2402{
2403 struct cm_id_private *cm_id_priv;
2404 struct cm_rep_msg *rep_msg;
2405 int ret;
2406 struct cm_id_private *cur_cm_id_priv;
2407 struct ib_cm_id *cm_id;
2408 struct cm_timewait_info *timewait_info;
2409
2410 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2411 cm_id_priv = cm_acquire_id(
2412 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
2413 if (!cm_id_priv) {
2414 cm_dup_rep_handler(work);
2415 pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
2416 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2417 return -EINVAL;
2418 }
2419
2420 cm_format_rep_event(work, cm_id_priv->qp_type);
2421
2422 spin_lock_irq(&cm_id_priv->lock);
2423 switch (cm_id_priv->id.state) {
2424 case IB_CM_REQ_SENT:
2425 case IB_CM_MRA_REQ_RCVD:
2426 break;
2427 default:
2428 ret = -EINVAL;
2429 pr_debug(
2430 "%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
2431 __func__, cm_id_priv->id.state,
2432 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2433 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2434 spin_unlock_irq(&cm_id_priv->lock);
2435 goto error;
2436 }
2437
2438 cm_id_priv->timewait_info->work.remote_id =
2439 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2440 cm_id_priv->timewait_info->remote_ca_guid =
2441 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2442 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2443
2444 spin_lock(&cm.lock);
2445
2446 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2447 spin_unlock(&cm.lock);
2448 spin_unlock_irq(&cm_id_priv->lock);
2449 ret = -EINVAL;
2450 pr_debug("%s: Failed to insert remote id %d\n", __func__,
2451 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2452 goto error;
2453 }
2454
2455 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2456 if (timewait_info) {
2457 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2458 &cm.remote_id_table);
2459 cm_id_priv->timewait_info->inserted_remote_id = 0;
2460 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2461 timewait_info->work.remote_id);
2462
2463 spin_unlock(&cm.lock);
2464 spin_unlock_irq(&cm_id_priv->lock);
2465 cm_issue_rej(work->port, work->mad_recv_wc,
2466 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2467 NULL, 0);
2468 ret = -EINVAL;
2469 pr_debug(
2470 "%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
2471 __func__, IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2472 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2473
2474 if (cur_cm_id_priv) {
2475 cm_id = &cur_cm_id_priv->id;
2476 ib_send_cm_dreq(cm_id, NULL, 0);
2477 cm_deref_id(cur_cm_id_priv);
2478 }
2479
2480 goto error;
2481 }
2482 spin_unlock(&cm.lock);
2483
2484 cm_id_priv->id.state = IB_CM_REP_RCVD;
2485 cm_id_priv->id.remote_id =
2486 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2487 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2488 cm_id_priv->initiator_depth =
2489 IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2490 cm_id_priv->responder_resources =
2491 IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2492 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2493 cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2494 cm_id_priv->target_ack_delay =
2495 IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2496 cm_id_priv->av.timeout =
2497 cm_ack_timeout(cm_id_priv->target_ack_delay,
2498 cm_id_priv->av.timeout - 1);
2499 cm_id_priv->alt_av.timeout =
2500 cm_ack_timeout(cm_id_priv->target_ack_delay,
2501 cm_id_priv->alt_av.timeout - 1);
2502
2503 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2504 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2505 if (!ret)
2506 list_add_tail(&work->list, &cm_id_priv->work_list);
2507 spin_unlock_irq(&cm_id_priv->lock);
2508
2509 if (ret)
2510 cm_process_work(cm_id_priv, work);
2511 else
2512 cm_deref_id(cm_id_priv);
2513 return 0;
2514
2515error:
2516 cm_deref_id(cm_id_priv);
2517 return ret;
2518}
2519
2520static int cm_establish_handler(struct cm_work *work)
2521{
2522 struct cm_id_private *cm_id_priv;
2523 int ret;
2524
2525
2526 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2527 if (!cm_id_priv)
2528 return -EINVAL;
2529
2530 spin_lock_irq(&cm_id_priv->lock);
2531 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2532 spin_unlock_irq(&cm_id_priv->lock);
2533 goto out;
2534 }
2535
2536 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2537 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2538 if (!ret)
2539 list_add_tail(&work->list, &cm_id_priv->work_list);
2540 spin_unlock_irq(&cm_id_priv->lock);
2541
2542 if (ret)
2543 cm_process_work(cm_id_priv, work);
2544 else
2545 cm_deref_id(cm_id_priv);
2546 return 0;
2547out:
2548 cm_deref_id(cm_id_priv);
2549 return -EINVAL;
2550}
2551
2552static int cm_rtu_handler(struct cm_work *work)
2553{
2554 struct cm_id_private *cm_id_priv;
2555 struct cm_rtu_msg *rtu_msg;
2556 int ret;
2557
2558 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2559 cm_id_priv = cm_acquire_id(
2560 cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2561 cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
2562 if (!cm_id_priv)
2563 return -EINVAL;
2564
2565 work->cm_event.private_data =
2566 IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
2567
2568 spin_lock_irq(&cm_id_priv->lock);
2569 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2570 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2571 spin_unlock_irq(&cm_id_priv->lock);
2572 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2573 counter[CM_RTU_COUNTER]);
2574 goto out;
2575 }
2576 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2577
2578 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2579 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2580 if (!ret)
2581 list_add_tail(&work->list, &cm_id_priv->work_list);
2582 spin_unlock_irq(&cm_id_priv->lock);
2583
2584 if (ret)
2585 cm_process_work(cm_id_priv, work);
2586 else
2587 cm_deref_id(cm_id_priv);
2588 return 0;
2589out:
2590 cm_deref_id(cm_id_priv);
2591 return -EINVAL;
2592}
2593
2594static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2595 struct cm_id_private *cm_id_priv,
2596 const void *private_data,
2597 u8 private_data_len)
2598{
2599 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2600 cm_form_tid(cm_id_priv));
2601 IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2602 be32_to_cpu(cm_id_priv->id.local_id));
2603 IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2604 be32_to_cpu(cm_id_priv->id.remote_id));
2605 IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2606 be32_to_cpu(cm_id_priv->remote_qpn));
2607
2608 if (private_data && private_data_len)
2609 IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2610 private_data_len);
2611}
2612
2613static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2614 const void *private_data, u8 private_data_len)
2615{
2616 struct ib_mad_send_buf *msg;
2617 int ret;
2618
2619 lockdep_assert_held(&cm_id_priv->lock);
2620
2621 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2622 return -EINVAL;
2623
2624 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2625 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2626 be32_to_cpu(cm_id_priv->id.local_id),
2627 cm_id_priv->id.state);
2628 return -EINVAL;
2629 }
2630
2631 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2632 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2633 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2634
2635 ret = cm_alloc_msg(cm_id_priv, &msg);
2636 if (ret) {
2637 cm_enter_timewait(cm_id_priv);
2638 return ret;
2639 }
2640
2641 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2642 private_data, private_data_len);
2643 msg->timeout_ms = cm_id_priv->timeout_ms;
2644 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2645
2646 ret = ib_post_send_mad(msg, NULL);
2647 if (ret) {
2648 cm_enter_timewait(cm_id_priv);
2649 cm_free_msg(msg);
2650 return ret;
2651 }
2652
2653 cm_id_priv->id.state = IB_CM_DREQ_SENT;
2654 cm_id_priv->msg = msg;
2655 return 0;
2656}
2657
2658int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2659 u8 private_data_len)
2660{
2661 struct cm_id_private *cm_id_priv =
2662 container_of(cm_id, struct cm_id_private, id);
2663 unsigned long flags;
2664 int ret;
2665
2666 spin_lock_irqsave(&cm_id_priv->lock, flags);
2667 ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2668 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2669 return ret;
2670}
2671EXPORT_SYMBOL(ib_send_cm_dreq);
2672
2673static void cm_format_drep(struct cm_drep_msg *drep_msg,
2674 struct cm_id_private *cm_id_priv,
2675 const void *private_data,
2676 u8 private_data_len)
2677{
2678 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2679 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2680 be32_to_cpu(cm_id_priv->id.local_id));
2681 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2682 be32_to_cpu(cm_id_priv->id.remote_id));
2683
2684 if (private_data && private_data_len)
2685 IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2686 private_data_len);
2687}
2688
2689static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2690 void *private_data, u8 private_data_len)
2691{
2692 struct ib_mad_send_buf *msg;
2693 int ret;
2694
2695 lockdep_assert_held(&cm_id_priv->lock);
2696
2697 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2698 return -EINVAL;
2699
2700 if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2701 pr_debug(
2702 "%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
2703 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2704 cm_id_priv->id.state);
2705 kfree(private_data);
2706 return -EINVAL;
2707 }
2708
2709 cm_set_private_data(cm_id_priv, private_data, private_data_len);
2710 cm_enter_timewait(cm_id_priv);
2711
2712 ret = cm_alloc_msg(cm_id_priv, &msg);
2713 if (ret)
2714 return ret;
2715
2716 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2717 private_data, private_data_len);
2718
2719 ret = ib_post_send_mad(msg, NULL);
2720 if (ret) {
2721 cm_free_msg(msg);
2722 return ret;
2723 }
2724 return 0;
2725}
2726
2727int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2728 u8 private_data_len)
2729{
2730 struct cm_id_private *cm_id_priv =
2731 container_of(cm_id, struct cm_id_private, id);
2732 unsigned long flags;
2733 void *data;
2734 int ret;
2735
2736 data = cm_copy_private_data(private_data, private_data_len);
2737 if (IS_ERR(data))
2738 return PTR_ERR(data);
2739
2740 spin_lock_irqsave(&cm_id_priv->lock, flags);
2741 ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
2742 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2743 return ret;
2744}
2745EXPORT_SYMBOL(ib_send_cm_drep);
2746
2747static int cm_issue_drep(struct cm_port *port,
2748 struct ib_mad_recv_wc *mad_recv_wc)
2749{
2750 struct ib_mad_send_buf *msg = NULL;
2751 struct cm_dreq_msg *dreq_msg;
2752 struct cm_drep_msg *drep_msg;
2753 int ret;
2754
2755 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2756 if (ret)
2757 return ret;
2758
2759 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2760 drep_msg = (struct cm_drep_msg *) msg->mad;
2761
2762 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2763 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2764 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2765 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2766 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2767
2768 ret = ib_post_send_mad(msg, NULL);
2769 if (ret)
2770 cm_free_msg(msg);
2771
2772 return ret;
2773}
2774
2775static int cm_dreq_handler(struct cm_work *work)
2776{
2777 struct cm_id_private *cm_id_priv;
2778 struct cm_dreq_msg *dreq_msg;
2779 struct ib_mad_send_buf *msg = NULL;
2780 int ret;
2781
2782 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2783 cm_id_priv = cm_acquire_id(
2784 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2785 cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
2786 if (!cm_id_priv) {
2787 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2788 counter[CM_DREQ_COUNTER]);
2789 cm_issue_drep(work->port, work->mad_recv_wc);
2790 pr_debug(
2791 "%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
2792 __func__, IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2793 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2794 return -EINVAL;
2795 }
2796
2797 work->cm_event.private_data =
2798 IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
2799
2800 spin_lock_irq(&cm_id_priv->lock);
2801 if (cm_id_priv->local_qpn !=
2802 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
2803 goto unlock;
2804
2805 switch (cm_id_priv->id.state) {
2806 case IB_CM_REP_SENT:
2807 case IB_CM_DREQ_SENT:
2808 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2809 break;
2810 case IB_CM_ESTABLISHED:
2811 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2812 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2813 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2814 break;
2815 case IB_CM_MRA_REP_RCVD:
2816 break;
2817 case IB_CM_TIMEWAIT:
2818 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2819 counter[CM_DREQ_COUNTER]);
2820 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2821 if (IS_ERR(msg))
2822 goto unlock;
2823
2824 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2825 cm_id_priv->private_data,
2826 cm_id_priv->private_data_len);
2827 spin_unlock_irq(&cm_id_priv->lock);
2828
2829 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2830 ib_post_send_mad(msg, NULL))
2831 cm_free_msg(msg);
2832 goto deref;
2833 case IB_CM_DREQ_RCVD:
2834 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2835 counter[CM_DREQ_COUNTER]);
2836 goto unlock;
2837 default:
2838 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2839 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2840 cm_id_priv->id.state);
2841 goto unlock;
2842 }
2843 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2844 cm_id_priv->tid = dreq_msg->hdr.tid;
2845 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2846 if (!ret)
2847 list_add_tail(&work->list, &cm_id_priv->work_list);
2848 spin_unlock_irq(&cm_id_priv->lock);
2849
2850 if (ret)
2851 cm_process_work(cm_id_priv, work);
2852 else
2853 cm_deref_id(cm_id_priv);
2854 return 0;
2855
2856unlock: spin_unlock_irq(&cm_id_priv->lock);
2857deref: cm_deref_id(cm_id_priv);
2858 return -EINVAL;
2859}
2860
2861static int cm_drep_handler(struct cm_work *work)
2862{
2863 struct cm_id_private *cm_id_priv;
2864 struct cm_drep_msg *drep_msg;
2865 int ret;
2866
2867 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2868 cm_id_priv = cm_acquire_id(
2869 cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2870 cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
2871 if (!cm_id_priv)
2872 return -EINVAL;
2873
2874 work->cm_event.private_data =
2875 IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
2876
2877 spin_lock_irq(&cm_id_priv->lock);
2878 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2879 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2880 spin_unlock_irq(&cm_id_priv->lock);
2881 goto out;
2882 }
2883 cm_enter_timewait(cm_id_priv);
2884
2885 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2886 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2887 if (!ret)
2888 list_add_tail(&work->list, &cm_id_priv->work_list);
2889 spin_unlock_irq(&cm_id_priv->lock);
2890
2891 if (ret)
2892 cm_process_work(cm_id_priv, work);
2893 else
2894 cm_deref_id(cm_id_priv);
2895 return 0;
2896out:
2897 cm_deref_id(cm_id_priv);
2898 return -EINVAL;
2899}
2900
2901static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2902 enum ib_cm_rej_reason reason, void *ari,
2903 u8 ari_length, const void *private_data,
2904 u8 private_data_len)
2905{
2906 enum ib_cm_state state = cm_id_priv->id.state;
2907 struct ib_mad_send_buf *msg;
2908 int ret;
2909
2910 lockdep_assert_held(&cm_id_priv->lock);
2911
2912 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2913 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2914 return -EINVAL;
2915
2916 switch (state) {
2917 case IB_CM_REQ_SENT:
2918 case IB_CM_MRA_REQ_RCVD:
2919 case IB_CM_REQ_RCVD:
2920 case IB_CM_MRA_REQ_SENT:
2921 case IB_CM_REP_RCVD:
2922 case IB_CM_MRA_REP_SENT:
2923 cm_reset_to_idle(cm_id_priv);
2924 ret = cm_alloc_msg(cm_id_priv, &msg);
2925 if (ret)
2926 return ret;
2927 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2928 ari, ari_length, private_data, private_data_len,
2929 state);
2930 break;
2931 case IB_CM_REP_SENT:
2932 case IB_CM_MRA_REP_RCVD:
2933 cm_enter_timewait(cm_id_priv);
2934 ret = cm_alloc_msg(cm_id_priv, &msg);
2935 if (ret)
2936 return ret;
2937 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2938 ari, ari_length, private_data, private_data_len,
2939 state);
2940 break;
2941 default:
2942 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2943 be32_to_cpu(cm_id_priv->id.local_id),
2944 cm_id_priv->id.state);
2945 return -EINVAL;
2946 }
2947
2948 ret = ib_post_send_mad(msg, NULL);
2949 if (ret) {
2950 cm_free_msg(msg);
2951 return ret;
2952 }
2953
2954 return 0;
2955}
2956
2957int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2958 void *ari, u8 ari_length, const void *private_data,
2959 u8 private_data_len)
2960{
2961 struct cm_id_private *cm_id_priv =
2962 container_of(cm_id, struct cm_id_private, id);
2963 unsigned long flags;
2964 int ret;
2965
2966 spin_lock_irqsave(&cm_id_priv->lock, flags);
2967 ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2968 private_data, private_data_len);
2969 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2970 return ret;
2971}
2972EXPORT_SYMBOL(ib_send_cm_rej);
2973
2974static void cm_format_rej_event(struct cm_work *work)
2975{
2976 struct cm_rej_msg *rej_msg;
2977 struct ib_cm_rej_event_param *param;
2978
2979 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2980 param = &work->cm_event.param.rej_rcvd;
2981 param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
2982 param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
2983 param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
2984 work->cm_event.private_data =
2985 IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
2986}
2987
2988static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2989{
2990 struct cm_timewait_info *timewait_info;
2991 struct cm_id_private *cm_id_priv;
2992 __be32 remote_id;
2993
2994 remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
2995
2996 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
2997 spin_lock_irq(&cm.lock);
2998 timewait_info = cm_find_remote_id(
2999 *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
3000 remote_id);
3001 if (!timewait_info) {
3002 spin_unlock_irq(&cm.lock);
3003 return NULL;
3004 }
3005 cm_id_priv =
3006 cm_acquire_id(timewait_info->work.local_id, remote_id);
3007 spin_unlock_irq(&cm.lock);
3008 } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
3009 CM_MSG_RESPONSE_REQ)
3010 cm_id_priv = cm_acquire_id(
3011 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3012 0);
3013 else
3014 cm_id_priv = cm_acquire_id(
3015 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3016 remote_id);
3017
3018 return cm_id_priv;
3019}
3020
3021static int cm_rej_handler(struct cm_work *work)
3022{
3023 struct cm_id_private *cm_id_priv;
3024 struct cm_rej_msg *rej_msg;
3025 int ret;
3026
3027 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3028 cm_id_priv = cm_acquire_rejected_id(rej_msg);
3029 if (!cm_id_priv)
3030 return -EINVAL;
3031
3032 cm_format_rej_event(work);
3033
3034 spin_lock_irq(&cm_id_priv->lock);
3035 switch (cm_id_priv->id.state) {
3036 case IB_CM_REQ_SENT:
3037 case IB_CM_MRA_REQ_RCVD:
3038 case IB_CM_REP_SENT:
3039 case IB_CM_MRA_REP_RCVD:
3040 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3041
3042 case IB_CM_REQ_RCVD:
3043 case IB_CM_MRA_REQ_SENT:
3044 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
3045 cm_enter_timewait(cm_id_priv);
3046 else
3047 cm_reset_to_idle(cm_id_priv);
3048 break;
3049 case IB_CM_DREQ_SENT:
3050 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3051
3052 case IB_CM_REP_RCVD:
3053 case IB_CM_MRA_REP_SENT:
3054 cm_enter_timewait(cm_id_priv);
3055 break;
3056 case IB_CM_ESTABLISHED:
3057 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
3058 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
3059 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
3060 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
3061 cm_id_priv->msg);
3062 cm_enter_timewait(cm_id_priv);
3063 break;
3064 }
3065
3066 default:
3067 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
3068 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3069 cm_id_priv->id.state);
3070 spin_unlock_irq(&cm_id_priv->lock);
3071 ret = -EINVAL;
3072 goto out;
3073 }
3074
3075 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3076 if (!ret)
3077 list_add_tail(&work->list, &cm_id_priv->work_list);
3078 spin_unlock_irq(&cm_id_priv->lock);
3079
3080 if (ret)
3081 cm_process_work(cm_id_priv, work);
3082 else
3083 cm_deref_id(cm_id_priv);
3084 return 0;
3085out:
3086 cm_deref_id(cm_id_priv);
3087 return -EINVAL;
3088}
3089
3090int ib_send_cm_mra(struct ib_cm_id *cm_id,
3091 u8 service_timeout,
3092 const void *private_data,
3093 u8 private_data_len)
3094{
3095 struct cm_id_private *cm_id_priv;
3096 struct ib_mad_send_buf *msg;
3097 enum ib_cm_state cm_state;
3098 enum ib_cm_lap_state lap_state;
3099 enum cm_msg_response msg_response;
3100 void *data;
3101 unsigned long flags;
3102 int ret;
3103
3104 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
3105 return -EINVAL;
3106
3107 data = cm_copy_private_data(private_data, private_data_len);
3108 if (IS_ERR(data))
3109 return PTR_ERR(data);
3110
3111 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3112
3113 spin_lock_irqsave(&cm_id_priv->lock, flags);
3114 switch(cm_id_priv->id.state) {
3115 case IB_CM_REQ_RCVD:
3116 cm_state = IB_CM_MRA_REQ_SENT;
3117 lap_state = cm_id->lap_state;
3118 msg_response = CM_MSG_RESPONSE_REQ;
3119 break;
3120 case IB_CM_REP_RCVD:
3121 cm_state = IB_CM_MRA_REP_SENT;
3122 lap_state = cm_id->lap_state;
3123 msg_response = CM_MSG_RESPONSE_REP;
3124 break;
3125 case IB_CM_ESTABLISHED:
3126 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
3127 cm_state = cm_id->state;
3128 lap_state = IB_CM_MRA_LAP_SENT;
3129 msg_response = CM_MSG_RESPONSE_OTHER;
3130 break;
3131 }
3132
3133 default:
3134 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
3135 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3136 cm_id_priv->id.state);
3137 ret = -EINVAL;
3138 goto error1;
3139 }
3140
3141 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
3142 ret = cm_alloc_msg(cm_id_priv, &msg);
3143 if (ret)
3144 goto error1;
3145
3146 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3147 msg_response, service_timeout,
3148 private_data, private_data_len);
3149 ret = ib_post_send_mad(msg, NULL);
3150 if (ret)
3151 goto error2;
3152 }
3153
3154 cm_id->state = cm_state;
3155 cm_id->lap_state = lap_state;
3156 cm_id_priv->service_timeout = service_timeout;
3157 cm_set_private_data(cm_id_priv, data, private_data_len);
3158 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3159 return 0;
3160
3161error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3162 kfree(data);
3163 return ret;
3164
3165error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3166 kfree(data);
3167 cm_free_msg(msg);
3168 return ret;
3169}
3170EXPORT_SYMBOL(ib_send_cm_mra);
3171
3172static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
3173{
3174 switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
3175 case CM_MSG_RESPONSE_REQ:
3176 return cm_acquire_id(
3177 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3178 0);
3179 case CM_MSG_RESPONSE_REP:
3180 case CM_MSG_RESPONSE_OTHER:
3181 return cm_acquire_id(
3182 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3183 cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
3184 default:
3185 return NULL;
3186 }
3187}
3188
3189static int cm_mra_handler(struct cm_work *work)
3190{
3191 struct cm_id_private *cm_id_priv;
3192 struct cm_mra_msg *mra_msg;
3193 int timeout, ret;
3194
3195 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3196 cm_id_priv = cm_acquire_mraed_id(mra_msg);
3197 if (!cm_id_priv)
3198 return -EINVAL;
3199
3200 work->cm_event.private_data =
3201 IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
3202 work->cm_event.param.mra_rcvd.service_timeout =
3203 IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3204 timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
3205 cm_convert_to_ms(cm_id_priv->av.timeout);
3206
3207 spin_lock_irq(&cm_id_priv->lock);
3208 switch (cm_id_priv->id.state) {
3209 case IB_CM_REQ_SENT:
3210 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3211 CM_MSG_RESPONSE_REQ ||
3212 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3213 cm_id_priv->msg, timeout))
3214 goto out;
3215 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3216 break;
3217 case IB_CM_REP_SENT:
3218 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3219 CM_MSG_RESPONSE_REP ||
3220 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3221 cm_id_priv->msg, timeout))
3222 goto out;
3223 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3224 break;
3225 case IB_CM_ESTABLISHED:
3226 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3227 CM_MSG_RESPONSE_OTHER ||
3228 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3229 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3230 cm_id_priv->msg, timeout)) {
3231 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3232 atomic_long_inc(&work->port->
3233 counter_group[CM_RECV_DUPLICATES].
3234 counter[CM_MRA_COUNTER]);
3235 goto out;
3236 }
3237 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3238 break;
3239 case IB_CM_MRA_REQ_RCVD:
3240 case IB_CM_MRA_REP_RCVD:
3241 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3242 counter[CM_MRA_COUNTER]);
3243
3244 default:
3245 pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
3246 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3247 cm_id_priv->id.state);
3248 goto out;
3249 }
3250
3251 cm_id_priv->msg->context[1] = (void *) (unsigned long)
3252 cm_id_priv->id.state;
3253 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3254 if (!ret)
3255 list_add_tail(&work->list, &cm_id_priv->work_list);
3256 spin_unlock_irq(&cm_id_priv->lock);
3257
3258 if (ret)
3259 cm_process_work(cm_id_priv, work);
3260 else
3261 cm_deref_id(cm_id_priv);
3262 return 0;
3263out:
3264 spin_unlock_irq(&cm_id_priv->lock);
3265 cm_deref_id(cm_id_priv);
3266 return -EINVAL;
3267}
3268
3269static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3270 struct sa_path_rec *path)
3271{
3272 u32 lid;
3273
3274 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3275 sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3276 lap_msg));
3277 sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3278 lap_msg));
3279 } else {
3280 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3281 CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
3282 sa_path_set_dlid(path, lid);
3283
3284 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3285 CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
3286 sa_path_set_slid(path, lid);
3287 }
3288}
3289
3290static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3291 struct sa_path_rec *path,
3292 struct cm_lap_msg *lap_msg)
3293{
3294 path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3295 path->sgid =
3296 *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3297 path->flow_label =
3298 cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3299 path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3300 path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
3301 path->reversible = 1;
3302 path->pkey = cm_id_priv->pkey;
3303 path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
3304 path->mtu_selector = IB_SA_EQ;
3305 path->mtu = cm_id_priv->path_mtu;
3306 path->rate_selector = IB_SA_EQ;
3307 path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
3308 path->packet_life_time_selector = IB_SA_EQ;
3309 path->packet_life_time =
3310 IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
3311 path->packet_life_time -= (path->packet_life_time > 0);
3312 cm_format_path_lid_from_lap(lap_msg, path);
3313}
3314
3315static int cm_lap_handler(struct cm_work *work)
3316{
3317 struct cm_id_private *cm_id_priv;
3318 struct cm_lap_msg *lap_msg;
3319 struct ib_cm_lap_event_param *param;
3320 struct ib_mad_send_buf *msg = NULL;
3321 int ret;
3322
3323
3324
3325
3326 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3327 work->port->port_num))
3328 return -EINVAL;
3329
3330
3331 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3332 cm_id_priv = cm_acquire_id(
3333 cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3334 cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
3335 if (!cm_id_priv)
3336 return -EINVAL;
3337
3338 param = &work->cm_event.param.lap_rcvd;
3339 memset(&work->path[0], 0, sizeof(work->path[1]));
3340 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3341 work->port->port_num, &work->path[0],
3342 IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3343 lap_msg));
3344 param->alternate_path = &work->path[0];
3345 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3346 work->cm_event.private_data =
3347 IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
3348
3349 spin_lock_irq(&cm_id_priv->lock);
3350 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3351 goto unlock;
3352
3353 switch (cm_id_priv->id.lap_state) {
3354 case IB_CM_LAP_UNINIT:
3355 case IB_CM_LAP_IDLE:
3356 break;
3357 case IB_CM_MRA_LAP_SENT:
3358 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3359 counter[CM_LAP_COUNTER]);
3360 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3361 if (IS_ERR(msg))
3362 goto unlock;
3363
3364 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3365 CM_MSG_RESPONSE_OTHER,
3366 cm_id_priv->service_timeout,
3367 cm_id_priv->private_data,
3368 cm_id_priv->private_data_len);
3369 spin_unlock_irq(&cm_id_priv->lock);
3370
3371 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3372 ib_post_send_mad(msg, NULL))
3373 cm_free_msg(msg);
3374 goto deref;
3375 case IB_CM_LAP_RCVD:
3376 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3377 counter[CM_LAP_COUNTER]);
3378 goto unlock;
3379 default:
3380 goto unlock;
3381 }
3382
3383 ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3384 work->mad_recv_wc->recv_buf.grh,
3385 &cm_id_priv->av);
3386 if (ret)
3387 goto unlock;
3388
3389 ret = cm_init_av_by_path(param->alternate_path, NULL,
3390 &cm_id_priv->alt_av, cm_id_priv);
3391 if (ret)
3392 goto unlock;
3393
3394 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3395 cm_id_priv->tid = lap_msg->hdr.tid;
3396 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3397 if (!ret)
3398 list_add_tail(&work->list, &cm_id_priv->work_list);
3399 spin_unlock_irq(&cm_id_priv->lock);
3400
3401 if (ret)
3402 cm_process_work(cm_id_priv, work);
3403 else
3404 cm_deref_id(cm_id_priv);
3405 return 0;
3406
3407unlock: spin_unlock_irq(&cm_id_priv->lock);
3408deref: cm_deref_id(cm_id_priv);
3409 return -EINVAL;
3410}
3411
3412static int cm_apr_handler(struct cm_work *work)
3413{
3414 struct cm_id_private *cm_id_priv;
3415 struct cm_apr_msg *apr_msg;
3416 int ret;
3417
3418
3419
3420
3421 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3422 work->port->port_num))
3423 return -EINVAL;
3424
3425 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3426 cm_id_priv = cm_acquire_id(
3427 cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3428 cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
3429 if (!cm_id_priv)
3430 return -EINVAL;
3431
3432 work->cm_event.param.apr_rcvd.ap_status =
3433 IBA_GET(CM_APR_AR_STATUS, apr_msg);
3434 work->cm_event.param.apr_rcvd.apr_info =
3435 IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3436 work->cm_event.param.apr_rcvd.info_len =
3437 IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3438 work->cm_event.private_data =
3439 IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
3440
3441 spin_lock_irq(&cm_id_priv->lock);
3442 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3443 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3444 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3445 spin_unlock_irq(&cm_id_priv->lock);
3446 goto out;
3447 }
3448 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3449 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3450 cm_id_priv->msg = NULL;
3451
3452 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3453 if (!ret)
3454 list_add_tail(&work->list, &cm_id_priv->work_list);
3455 spin_unlock_irq(&cm_id_priv->lock);
3456
3457 if (ret)
3458 cm_process_work(cm_id_priv, work);
3459 else
3460 cm_deref_id(cm_id_priv);
3461 return 0;
3462out:
3463 cm_deref_id(cm_id_priv);
3464 return -EINVAL;
3465}
3466
3467static int cm_timewait_handler(struct cm_work *work)
3468{
3469 struct cm_timewait_info *timewait_info;
3470 struct cm_id_private *cm_id_priv;
3471 int ret;
3472
3473 timewait_info = container_of(work, struct cm_timewait_info, work);
3474 spin_lock_irq(&cm.lock);
3475 list_del(&timewait_info->list);
3476 spin_unlock_irq(&cm.lock);
3477
3478 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3479 timewait_info->work.remote_id);
3480 if (!cm_id_priv)
3481 return -EINVAL;
3482
3483 spin_lock_irq(&cm_id_priv->lock);
3484 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3485 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3486 spin_unlock_irq(&cm_id_priv->lock);
3487 goto out;
3488 }
3489 cm_id_priv->id.state = IB_CM_IDLE;
3490 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3491 if (!ret)
3492 list_add_tail(&work->list, &cm_id_priv->work_list);
3493 spin_unlock_irq(&cm_id_priv->lock);
3494
3495 if (ret)
3496 cm_process_work(cm_id_priv, work);
3497 else
3498 cm_deref_id(cm_id_priv);
3499 return 0;
3500out:
3501 cm_deref_id(cm_id_priv);
3502 return -EINVAL;
3503}
3504
3505static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3506 struct cm_id_private *cm_id_priv,
3507 struct ib_cm_sidr_req_param *param)
3508{
3509 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3510 cm_form_tid(cm_id_priv));
3511 IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3512 be32_to_cpu(cm_id_priv->id.local_id));
3513 IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3514 be16_to_cpu(param->path->pkey));
3515 IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3516 be64_to_cpu(param->service_id));
3517
3518 if (param->private_data && param->private_data_len)
3519 IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3520 param->private_data, param->private_data_len);
3521}
3522
3523int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3524 struct ib_cm_sidr_req_param *param)
3525{
3526 struct cm_id_private *cm_id_priv;
3527 struct ib_mad_send_buf *msg;
3528 unsigned long flags;
3529 int ret;
3530
3531 if (!param->path || (param->private_data &&
3532 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3533 return -EINVAL;
3534
3535 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3536 ret = cm_init_av_by_path(param->path, param->sgid_attr,
3537 &cm_id_priv->av,
3538 cm_id_priv);
3539 if (ret)
3540 goto out;
3541
3542 cm_id->service_id = param->service_id;
3543 cm_id->service_mask = ~cpu_to_be64(0);
3544 cm_id_priv->timeout_ms = param->timeout_ms;
3545 cm_id_priv->max_cm_retries = param->max_cm_retries;
3546 ret = cm_alloc_msg(cm_id_priv, &msg);
3547 if (ret)
3548 goto out;
3549
3550 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3551 param);
3552 msg->timeout_ms = cm_id_priv->timeout_ms;
3553 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3554
3555 spin_lock_irqsave(&cm_id_priv->lock, flags);
3556 if (cm_id->state == IB_CM_IDLE)
3557 ret = ib_post_send_mad(msg, NULL);
3558 else
3559 ret = -EINVAL;
3560
3561 if (ret) {
3562 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3563 cm_free_msg(msg);
3564 goto out;
3565 }
3566 cm_id->state = IB_CM_SIDR_REQ_SENT;
3567 cm_id_priv->msg = msg;
3568 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3569out:
3570 return ret;
3571}
3572EXPORT_SYMBOL(ib_send_cm_sidr_req);
3573
3574static void cm_format_sidr_req_event(struct cm_work *work,
3575 const struct cm_id_private *rx_cm_id,
3576 struct ib_cm_id *listen_id)
3577{
3578 struct cm_sidr_req_msg *sidr_req_msg;
3579 struct ib_cm_sidr_req_event_param *param;
3580
3581 sidr_req_msg = (struct cm_sidr_req_msg *)
3582 work->mad_recv_wc->recv_buf.mad;
3583 param = &work->cm_event.param.sidr_req_rcvd;
3584 param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
3585 param->listen_id = listen_id;
3586 param->service_id =
3587 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3588 param->bth_pkey = cm_get_bth_pkey(work);
3589 param->port = work->port->port_num;
3590 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3591 work->cm_event.private_data =
3592 IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
3593}
3594
3595static int cm_sidr_req_handler(struct cm_work *work)
3596{
3597 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
3598 struct cm_sidr_req_msg *sidr_req_msg;
3599 struct ib_wc *wc;
3600 int ret;
3601
3602 cm_id_priv =
3603 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
3604 if (IS_ERR(cm_id_priv))
3605 return PTR_ERR(cm_id_priv);
3606
3607
3608 sidr_req_msg = (struct cm_sidr_req_msg *)
3609 work->mad_recv_wc->recv_buf.mad;
3610
3611 cm_id_priv->id.remote_id =
3612 cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3613 cm_id_priv->id.service_id =
3614 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3615 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3616 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3617
3618 wc = work->mad_recv_wc->wc;
3619 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3620 cm_id_priv->av.dgid.global.interface_id = 0;
3621 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3622 work->mad_recv_wc->recv_buf.grh,
3623 &cm_id_priv->av);
3624 if (ret)
3625 goto out;
3626
3627 spin_lock_irq(&cm.lock);
3628 listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3629 if (listen_cm_id_priv) {
3630 spin_unlock_irq(&cm.lock);
3631 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3632 counter[CM_SIDR_REQ_COUNTER]);
3633 goto out;
3634 }
3635 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3636 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
3637 cm_id_priv->id.service_id);
3638 if (!listen_cm_id_priv) {
3639 spin_unlock_irq(&cm.lock);
3640 ib_send_cm_sidr_rep(&cm_id_priv->id,
3641 &(struct ib_cm_sidr_rep_param){
3642 .status = IB_SIDR_UNSUPPORTED });
3643 goto out;
3644 }
3645 refcount_inc(&listen_cm_id_priv->refcount);
3646 spin_unlock_irq(&cm.lock);
3647
3648 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3649 cm_id_priv->id.context = listen_cm_id_priv->id.context;
3650
3651
3652
3653
3654
3655
3656
3657 cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
3658 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3659 cm_free_work(work);
3660
3661
3662
3663
3664 cm_deref_id(listen_cm_id_priv);
3665 if (ret)
3666 cm_destroy_id(&cm_id_priv->id, ret);
3667 return 0;
3668out:
3669 ib_destroy_cm_id(&cm_id_priv->id);
3670 return -EINVAL;
3671}
3672
3673static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3674 struct cm_id_private *cm_id_priv,
3675 struct ib_cm_sidr_rep_param *param)
3676{
3677 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3678 cm_id_priv->tid);
3679 IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3680 be32_to_cpu(cm_id_priv->id.remote_id));
3681 IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3682 IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3683 IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3684 be64_to_cpu(cm_id_priv->id.service_id));
3685 IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3686
3687 if (param->info && param->info_length)
3688 IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3689 param->info, param->info_length);
3690
3691 if (param->private_data && param->private_data_len)
3692 IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3693 param->private_data, param->private_data_len);
3694}
3695
3696static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3697 struct ib_cm_sidr_rep_param *param)
3698{
3699 struct ib_mad_send_buf *msg;
3700 int ret;
3701
3702 lockdep_assert_held(&cm_id_priv->lock);
3703
3704 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3705 (param->private_data &&
3706 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3707 return -EINVAL;
3708
3709 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3710 return -EINVAL;
3711
3712 ret = cm_alloc_msg(cm_id_priv, &msg);
3713 if (ret)
3714 return ret;
3715
3716 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3717 param);
3718 ret = ib_post_send_mad(msg, NULL);
3719 if (ret) {
3720 cm_free_msg(msg);
3721 return ret;
3722 }
3723 cm_id_priv->id.state = IB_CM_IDLE;
3724 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3725 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3726 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3727 }
3728 return 0;
3729}
3730
3731int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3732 struct ib_cm_sidr_rep_param *param)
3733{
3734 struct cm_id_private *cm_id_priv =
3735 container_of(cm_id, struct cm_id_private, id);
3736 unsigned long flags;
3737 int ret;
3738
3739 spin_lock_irqsave(&cm_id_priv->lock, flags);
3740 ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3741 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3742 return ret;
3743}
3744EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3745
3746static void cm_format_sidr_rep_event(struct cm_work *work,
3747 const struct cm_id_private *cm_id_priv)
3748{
3749 struct cm_sidr_rep_msg *sidr_rep_msg;
3750 struct ib_cm_sidr_rep_event_param *param;
3751
3752 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3753 work->mad_recv_wc->recv_buf.mad;
3754 param = &work->cm_event.param.sidr_rep_rcvd;
3755 param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3756 param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3757 param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3758 param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3759 sidr_rep_msg);
3760 param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3761 sidr_rep_msg);
3762 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3763 work->cm_event.private_data =
3764 IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
3765}
3766
3767static int cm_sidr_rep_handler(struct cm_work *work)
3768{
3769 struct cm_sidr_rep_msg *sidr_rep_msg;
3770 struct cm_id_private *cm_id_priv;
3771
3772 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3773 work->mad_recv_wc->recv_buf.mad;
3774 cm_id_priv = cm_acquire_id(
3775 cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
3776 if (!cm_id_priv)
3777 return -EINVAL;
3778
3779 spin_lock_irq(&cm_id_priv->lock);
3780 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3781 spin_unlock_irq(&cm_id_priv->lock);
3782 goto out;
3783 }
3784 cm_id_priv->id.state = IB_CM_IDLE;
3785 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3786 spin_unlock_irq(&cm_id_priv->lock);
3787
3788 cm_format_sidr_rep_event(work, cm_id_priv);
3789 cm_process_work(cm_id_priv, work);
3790 return 0;
3791out:
3792 cm_deref_id(cm_id_priv);
3793 return -EINVAL;
3794}
3795
3796static void cm_process_send_error(struct ib_mad_send_buf *msg,
3797 enum ib_wc_status wc_status)
3798{
3799 struct cm_id_private *cm_id_priv;
3800 struct ib_cm_event cm_event;
3801 enum ib_cm_state state;
3802 int ret;
3803
3804 memset(&cm_event, 0, sizeof cm_event);
3805 cm_id_priv = msg->context[0];
3806
3807
3808 spin_lock_irq(&cm_id_priv->lock);
3809 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3810 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3811 goto discard;
3812
3813 pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3814 state, ib_wc_status_msg(wc_status));
3815 switch (state) {
3816 case IB_CM_REQ_SENT:
3817 case IB_CM_MRA_REQ_RCVD:
3818 cm_reset_to_idle(cm_id_priv);
3819 cm_event.event = IB_CM_REQ_ERROR;
3820 break;
3821 case IB_CM_REP_SENT:
3822 case IB_CM_MRA_REP_RCVD:
3823 cm_reset_to_idle(cm_id_priv);
3824 cm_event.event = IB_CM_REP_ERROR;
3825 break;
3826 case IB_CM_DREQ_SENT:
3827 cm_enter_timewait(cm_id_priv);
3828 cm_event.event = IB_CM_DREQ_ERROR;
3829 break;
3830 case IB_CM_SIDR_REQ_SENT:
3831 cm_id_priv->id.state = IB_CM_IDLE;
3832 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3833 break;
3834 default:
3835 goto discard;
3836 }
3837 spin_unlock_irq(&cm_id_priv->lock);
3838 cm_event.param.send_status = wc_status;
3839
3840
3841 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3842 cm_free_msg(msg);
3843 if (ret)
3844 ib_destroy_cm_id(&cm_id_priv->id);
3845 return;
3846discard:
3847 spin_unlock_irq(&cm_id_priv->lock);
3848 cm_free_msg(msg);
3849}
3850
3851static void cm_send_handler(struct ib_mad_agent *mad_agent,
3852 struct ib_mad_send_wc *mad_send_wc)
3853{
3854 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3855 struct cm_port *port;
3856 u16 attr_index;
3857
3858 port = mad_agent->context;
3859 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3860 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3861
3862
3863
3864
3865
3866
3867 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3868 msg->retries = 1;
3869
3870 atomic_long_add(1 + msg->retries,
3871 &port->counter_group[CM_XMIT].counter[attr_index]);
3872 if (msg->retries)
3873 atomic_long_add(msg->retries,
3874 &port->counter_group[CM_XMIT_RETRIES].
3875 counter[attr_index]);
3876
3877 switch (mad_send_wc->status) {
3878 case IB_WC_SUCCESS:
3879 case IB_WC_WR_FLUSH_ERR:
3880 cm_free_msg(msg);
3881 break;
3882 default:
3883 if (msg->context[0] && msg->context[1])
3884 cm_process_send_error(msg, mad_send_wc->status);
3885 else
3886 cm_free_msg(msg);
3887 break;
3888 }
3889}
3890
3891static void cm_work_handler(struct work_struct *_work)
3892{
3893 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3894 int ret;
3895
3896 switch (work->cm_event.event) {
3897 case IB_CM_REQ_RECEIVED:
3898 ret = cm_req_handler(work);
3899 break;
3900 case IB_CM_MRA_RECEIVED:
3901 ret = cm_mra_handler(work);
3902 break;
3903 case IB_CM_REJ_RECEIVED:
3904 ret = cm_rej_handler(work);
3905 break;
3906 case IB_CM_REP_RECEIVED:
3907 ret = cm_rep_handler(work);
3908 break;
3909 case IB_CM_RTU_RECEIVED:
3910 ret = cm_rtu_handler(work);
3911 break;
3912 case IB_CM_USER_ESTABLISHED:
3913 ret = cm_establish_handler(work);
3914 break;
3915 case IB_CM_DREQ_RECEIVED:
3916 ret = cm_dreq_handler(work);
3917 break;
3918 case IB_CM_DREP_RECEIVED:
3919 ret = cm_drep_handler(work);
3920 break;
3921 case IB_CM_SIDR_REQ_RECEIVED:
3922 ret = cm_sidr_req_handler(work);
3923 break;
3924 case IB_CM_SIDR_REP_RECEIVED:
3925 ret = cm_sidr_rep_handler(work);
3926 break;
3927 case IB_CM_LAP_RECEIVED:
3928 ret = cm_lap_handler(work);
3929 break;
3930 case IB_CM_APR_RECEIVED:
3931 ret = cm_apr_handler(work);
3932 break;
3933 case IB_CM_TIMEWAIT_EXIT:
3934 ret = cm_timewait_handler(work);
3935 break;
3936 default:
3937 pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
3938 ret = -EINVAL;
3939 break;
3940 }
3941 if (ret)
3942 cm_free_work(work);
3943}
3944
3945static int cm_establish(struct ib_cm_id *cm_id)
3946{
3947 struct cm_id_private *cm_id_priv;
3948 struct cm_work *work;
3949 unsigned long flags;
3950 int ret = 0;
3951 struct cm_device *cm_dev;
3952
3953 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3954 if (!cm_dev)
3955 return -ENODEV;
3956
3957 work = kmalloc(sizeof *work, GFP_ATOMIC);
3958 if (!work)
3959 return -ENOMEM;
3960
3961 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3962 spin_lock_irqsave(&cm_id_priv->lock, flags);
3963 switch (cm_id->state)
3964 {
3965 case IB_CM_REP_SENT:
3966 case IB_CM_MRA_REP_RCVD:
3967 cm_id->state = IB_CM_ESTABLISHED;
3968 break;
3969 case IB_CM_ESTABLISHED:
3970 ret = -EISCONN;
3971 break;
3972 default:
3973 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
3974 be32_to_cpu(cm_id->local_id), cm_id->state);
3975 ret = -EINVAL;
3976 break;
3977 }
3978 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3979
3980 if (ret) {
3981 kfree(work);
3982 goto out;
3983 }
3984
3985
3986
3987
3988
3989
3990
3991 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3992 work->local_id = cm_id->local_id;
3993 work->remote_id = cm_id->remote_id;
3994 work->mad_recv_wc = NULL;
3995 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3996
3997
3998 spin_lock_irqsave(&cm.lock, flags);
3999 if (!cm_dev->going_down) {
4000 queue_delayed_work(cm.wq, &work->work, 0);
4001 } else {
4002 kfree(work);
4003 ret = -ENODEV;
4004 }
4005 spin_unlock_irqrestore(&cm.lock, flags);
4006
4007out:
4008 return ret;
4009}
4010
4011static int cm_migrate(struct ib_cm_id *cm_id)
4012{
4013 struct cm_id_private *cm_id_priv;
4014 struct cm_av tmp_av;
4015 unsigned long flags;
4016 int tmp_send_port_not_ready;
4017 int ret = 0;
4018
4019 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4020 spin_lock_irqsave(&cm_id_priv->lock, flags);
4021 if (cm_id->state == IB_CM_ESTABLISHED &&
4022 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
4023 cm_id->lap_state == IB_CM_LAP_IDLE)) {
4024 cm_id->lap_state = IB_CM_LAP_IDLE;
4025
4026 tmp_av = cm_id_priv->av;
4027 cm_id_priv->av = cm_id_priv->alt_av;
4028 cm_id_priv->alt_av = tmp_av;
4029
4030 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
4031 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
4032 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
4033 } else
4034 ret = -EINVAL;
4035 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4036
4037 return ret;
4038}
4039
4040int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
4041{
4042 int ret;
4043
4044 switch (event) {
4045 case IB_EVENT_COMM_EST:
4046 ret = cm_establish(cm_id);
4047 break;
4048 case IB_EVENT_PATH_MIG:
4049 ret = cm_migrate(cm_id);
4050 break;
4051 default:
4052 ret = -EINVAL;
4053 }
4054 return ret;
4055}
4056EXPORT_SYMBOL(ib_cm_notify);
4057
4058static void cm_recv_handler(struct ib_mad_agent *mad_agent,
4059 struct ib_mad_send_buf *send_buf,
4060 struct ib_mad_recv_wc *mad_recv_wc)
4061{
4062 struct cm_port *port = mad_agent->context;
4063 struct cm_work *work;
4064 enum ib_cm_event_type event;
4065 bool alt_path = false;
4066 u16 attr_id;
4067 int paths = 0;
4068 int going_down = 0;
4069
4070 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4071 case CM_REQ_ATTR_ID:
4072 alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4073 mad_recv_wc->recv_buf.mad);
4074 paths = 1 + (alt_path != 0);
4075 event = IB_CM_REQ_RECEIVED;
4076 break;
4077 case CM_MRA_ATTR_ID:
4078 event = IB_CM_MRA_RECEIVED;
4079 break;
4080 case CM_REJ_ATTR_ID:
4081 event = IB_CM_REJ_RECEIVED;
4082 break;
4083 case CM_REP_ATTR_ID:
4084 event = IB_CM_REP_RECEIVED;
4085 break;
4086 case CM_RTU_ATTR_ID:
4087 event = IB_CM_RTU_RECEIVED;
4088 break;
4089 case CM_DREQ_ATTR_ID:
4090 event = IB_CM_DREQ_RECEIVED;
4091 break;
4092 case CM_DREP_ATTR_ID:
4093 event = IB_CM_DREP_RECEIVED;
4094 break;
4095 case CM_SIDR_REQ_ATTR_ID:
4096 event = IB_CM_SIDR_REQ_RECEIVED;
4097 break;
4098 case CM_SIDR_REP_ATTR_ID:
4099 event = IB_CM_SIDR_REP_RECEIVED;
4100 break;
4101 case CM_LAP_ATTR_ID:
4102 paths = 1;
4103 event = IB_CM_LAP_RECEIVED;
4104 break;
4105 case CM_APR_ATTR_ID:
4106 event = IB_CM_APR_RECEIVED;
4107 break;
4108 default:
4109 ib_free_recv_mad(mad_recv_wc);
4110 return;
4111 }
4112
4113 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4114 atomic_long_inc(&port->counter_group[CM_RECV].
4115 counter[attr_id - CM_ATTR_ID_OFFSET]);
4116
4117 work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4118 if (!work) {
4119 ib_free_recv_mad(mad_recv_wc);
4120 return;
4121 }
4122
4123 INIT_DELAYED_WORK(&work->work, cm_work_handler);
4124 work->cm_event.event = event;
4125 work->mad_recv_wc = mad_recv_wc;
4126 work->port = port;
4127
4128
4129 spin_lock_irq(&cm.lock);
4130 if (!port->cm_dev->going_down)
4131 queue_delayed_work(cm.wq, &work->work, 0);
4132 else
4133 going_down = 1;
4134 spin_unlock_irq(&cm.lock);
4135
4136 if (going_down) {
4137 kfree(work);
4138 ib_free_recv_mad(mad_recv_wc);
4139 }
4140}
4141
4142static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4143 struct ib_qp_attr *qp_attr,
4144 int *qp_attr_mask)
4145{
4146 unsigned long flags;
4147 int ret;
4148
4149 spin_lock_irqsave(&cm_id_priv->lock, flags);
4150 switch (cm_id_priv->id.state) {
4151 case IB_CM_REQ_SENT:
4152 case IB_CM_MRA_REQ_RCVD:
4153 case IB_CM_REQ_RCVD:
4154 case IB_CM_MRA_REQ_SENT:
4155 case IB_CM_REP_RCVD:
4156 case IB_CM_MRA_REP_SENT:
4157 case IB_CM_REP_SENT:
4158 case IB_CM_MRA_REP_RCVD:
4159 case IB_CM_ESTABLISHED:
4160 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4161 IB_QP_PKEY_INDEX | IB_QP_PORT;
4162 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4163 if (cm_id_priv->responder_resources)
4164 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4165 IB_ACCESS_REMOTE_ATOMIC;
4166 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4167 qp_attr->port_num = cm_id_priv->av.port->port_num;
4168 ret = 0;
4169 break;
4170 default:
4171 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4172 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4173 cm_id_priv->id.state);
4174 ret = -EINVAL;
4175 break;
4176 }
4177 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4178 return ret;
4179}
4180
4181static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4182 struct ib_qp_attr *qp_attr,
4183 int *qp_attr_mask)
4184{
4185 unsigned long flags;
4186 int ret;
4187
4188 spin_lock_irqsave(&cm_id_priv->lock, flags);
4189 switch (cm_id_priv->id.state) {
4190 case IB_CM_REQ_RCVD:
4191 case IB_CM_MRA_REQ_SENT:
4192 case IB_CM_REP_RCVD:
4193 case IB_CM_MRA_REP_SENT:
4194 case IB_CM_REP_SENT:
4195 case IB_CM_MRA_REP_RCVD:
4196 case IB_CM_ESTABLISHED:
4197 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4198 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4199 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4200 qp_attr->path_mtu = cm_id_priv->path_mtu;
4201 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4202 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4203 if (cm_id_priv->qp_type == IB_QPT_RC ||
4204 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4205 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4206 IB_QP_MIN_RNR_TIMER;
4207 qp_attr->max_dest_rd_atomic =
4208 cm_id_priv->responder_resources;
4209 qp_attr->min_rnr_timer = 0;
4210 }
4211 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4212 *qp_attr_mask |= IB_QP_ALT_PATH;
4213 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4214 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4215 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4216 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4217 }
4218 ret = 0;
4219 break;
4220 default:
4221 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4222 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4223 cm_id_priv->id.state);
4224 ret = -EINVAL;
4225 break;
4226 }
4227 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4228 return ret;
4229}
4230
4231static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4232 struct ib_qp_attr *qp_attr,
4233 int *qp_attr_mask)
4234{
4235 unsigned long flags;
4236 int ret;
4237
4238 spin_lock_irqsave(&cm_id_priv->lock, flags);
4239 switch (cm_id_priv->id.state) {
4240
4241 case IB_CM_REQ_RCVD:
4242 case IB_CM_MRA_REQ_SENT:
4243
4244 case IB_CM_REP_RCVD:
4245 case IB_CM_MRA_REP_SENT:
4246 case IB_CM_REP_SENT:
4247 case IB_CM_MRA_REP_RCVD:
4248 case IB_CM_ESTABLISHED:
4249 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4250 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4251 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4252 switch (cm_id_priv->qp_type) {
4253 case IB_QPT_RC:
4254 case IB_QPT_XRC_INI:
4255 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4256 IB_QP_MAX_QP_RD_ATOMIC;
4257 qp_attr->retry_cnt = cm_id_priv->retry_count;
4258 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4259 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4260
4261 case IB_QPT_XRC_TGT:
4262 *qp_attr_mask |= IB_QP_TIMEOUT;
4263 qp_attr->timeout = cm_id_priv->av.timeout;
4264 break;
4265 default:
4266 break;
4267 }
4268 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4269 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4270 qp_attr->path_mig_state = IB_MIG_REARM;
4271 }
4272 } else {
4273 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4274 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4275 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4276 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4277 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4278 qp_attr->path_mig_state = IB_MIG_REARM;
4279 }
4280 ret = 0;
4281 break;
4282 default:
4283 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4284 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4285 cm_id_priv->id.state);
4286 ret = -EINVAL;
4287 break;
4288 }
4289 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4290 return ret;
4291}
4292
4293int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4294 struct ib_qp_attr *qp_attr,
4295 int *qp_attr_mask)
4296{
4297 struct cm_id_private *cm_id_priv;
4298 int ret;
4299
4300 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4301 switch (qp_attr->qp_state) {
4302 case IB_QPS_INIT:
4303 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4304 break;
4305 case IB_QPS_RTR:
4306 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4307 break;
4308 case IB_QPS_RTS:
4309 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4310 break;
4311 default:
4312 ret = -EINVAL;
4313 break;
4314 }
4315 return ret;
4316}
4317EXPORT_SYMBOL(ib_cm_init_qp_attr);
4318
4319static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
4320 char *buf)
4321{
4322 struct cm_counter_group *group;
4323 struct cm_counter_attribute *cm_attr;
4324
4325 group = container_of(obj, struct cm_counter_group, obj);
4326 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
4327
4328 return sprintf(buf, "%ld\n",
4329 atomic_long_read(&group->counter[cm_attr->index]));
4330}
4331
4332static const struct sysfs_ops cm_counter_ops = {
4333 .show = cm_show_counter
4334};
4335
4336static struct kobj_type cm_counter_obj_type = {
4337 .sysfs_ops = &cm_counter_ops,
4338 .default_attrs = cm_counter_default_attrs
4339};
4340
4341static char *cm_devnode(struct device *dev, umode_t *mode)
4342{
4343 if (mode)
4344 *mode = 0666;
4345 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
4346}
4347
4348struct class cm_class = {
4349 .owner = THIS_MODULE,
4350 .name = "infiniband_cm",
4351 .devnode = cm_devnode,
4352};
4353EXPORT_SYMBOL(cm_class);
4354
4355static int cm_create_port_fs(struct cm_port *port)
4356{
4357 int i, ret;
4358
4359 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4360 ret = ib_port_register_module_stat(port->cm_dev->ib_device,
4361 port->port_num,
4362 &port->counter_group[i].obj,
4363 &cm_counter_obj_type,
4364 counter_group_names[i]);
4365 if (ret)
4366 goto error;
4367 }
4368
4369 return 0;
4370
4371error:
4372 while (i--)
4373 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4374 return ret;
4375
4376}
4377
4378static void cm_remove_port_fs(struct cm_port *port)
4379{
4380 int i;
4381
4382 for (i = 0; i < CM_COUNTER_GROUPS; i++)
4383 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4384
4385}
4386
4387static void cm_add_one(struct ib_device *ib_device)
4388{
4389 struct cm_device *cm_dev;
4390 struct cm_port *port;
4391 struct ib_mad_reg_req reg_req = {
4392 .mgmt_class = IB_MGMT_CLASS_CM,
4393 .mgmt_class_version = IB_CM_CLASS_VERSION,
4394 };
4395 struct ib_port_modify port_modify = {
4396 .set_port_cap_mask = IB_PORT_CM_SUP
4397 };
4398 unsigned long flags;
4399 int ret;
4400 int count = 0;
4401 u8 i;
4402
4403 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4404 GFP_KERNEL);
4405 if (!cm_dev)
4406 return;
4407
4408 cm_dev->ib_device = ib_device;
4409 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4410 cm_dev->going_down = 0;
4411
4412 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4413 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4414 if (!rdma_cap_ib_cm(ib_device, i))
4415 continue;
4416
4417 port = kzalloc(sizeof *port, GFP_KERNEL);
4418 if (!port)
4419 goto error1;
4420
4421 cm_dev->port[i-1] = port;
4422 port->cm_dev = cm_dev;
4423 port->port_num = i;
4424
4425 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4426 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4427
4428 ret = cm_create_port_fs(port);
4429 if (ret)
4430 goto error1;
4431
4432 port->mad_agent = ib_register_mad_agent(ib_device, i,
4433 IB_QPT_GSI,
4434 ®_req,
4435 0,
4436 cm_send_handler,
4437 cm_recv_handler,
4438 port,
4439 0);
4440 if (IS_ERR(port->mad_agent))
4441 goto error2;
4442
4443 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4444 if (ret)
4445 goto error3;
4446
4447 count++;
4448 }
4449
4450 if (!count)
4451 goto free;
4452
4453 ib_set_client_data(ib_device, &cm_client, cm_dev);
4454
4455 write_lock_irqsave(&cm.device_lock, flags);
4456 list_add_tail(&cm_dev->list, &cm.device_list);
4457 write_unlock_irqrestore(&cm.device_lock, flags);
4458 return;
4459
4460error3:
4461 ib_unregister_mad_agent(port->mad_agent);
4462error2:
4463 cm_remove_port_fs(port);
4464error1:
4465 port_modify.set_port_cap_mask = 0;
4466 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4467 kfree(port);
4468 while (--i) {
4469 if (!rdma_cap_ib_cm(ib_device, i))
4470 continue;
4471
4472 port = cm_dev->port[i-1];
4473 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4474 ib_unregister_mad_agent(port->mad_agent);
4475 cm_remove_port_fs(port);
4476 kfree(port);
4477 }
4478free:
4479 kfree(cm_dev);
4480}
4481
4482static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4483{
4484 struct cm_device *cm_dev = client_data;
4485 struct cm_port *port;
4486 struct cm_id_private *cm_id_priv;
4487 struct ib_mad_agent *cur_mad_agent;
4488 struct ib_port_modify port_modify = {
4489 .clr_port_cap_mask = IB_PORT_CM_SUP
4490 };
4491 unsigned long flags;
4492 int i;
4493
4494 if (!cm_dev)
4495 return;
4496
4497 write_lock_irqsave(&cm.device_lock, flags);
4498 list_del(&cm_dev->list);
4499 write_unlock_irqrestore(&cm.device_lock, flags);
4500
4501 spin_lock_irq(&cm.lock);
4502 cm_dev->going_down = 1;
4503 spin_unlock_irq(&cm.lock);
4504
4505 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4506 if (!rdma_cap_ib_cm(ib_device, i))
4507 continue;
4508
4509 port = cm_dev->port[i-1];
4510 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4511
4512 spin_lock_irq(&cm.lock);
4513 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4514 cm_id_priv->altr_send_port_not_ready = 1;
4515 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4516 cm_id_priv->prim_send_port_not_ready = 1;
4517 spin_unlock_irq(&cm.lock);
4518
4519
4520
4521
4522
4523 flush_workqueue(cm.wq);
4524 spin_lock_irq(&cm.state_lock);
4525 cur_mad_agent = port->mad_agent;
4526 port->mad_agent = NULL;
4527 spin_unlock_irq(&cm.state_lock);
4528 ib_unregister_mad_agent(cur_mad_agent);
4529 cm_remove_port_fs(port);
4530 kfree(port);
4531 }
4532
4533 kfree(cm_dev);
4534}
4535
4536static int __init ib_cm_init(void)
4537{
4538 int ret;
4539
4540 INIT_LIST_HEAD(&cm.device_list);
4541 rwlock_init(&cm.device_lock);
4542 spin_lock_init(&cm.lock);
4543 spin_lock_init(&cm.state_lock);
4544 cm.listen_service_table = RB_ROOT;
4545 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4546 cm.remote_id_table = RB_ROOT;
4547 cm.remote_qp_table = RB_ROOT;
4548 cm.remote_sidr_table = RB_ROOT;
4549 xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
4550 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4551 INIT_LIST_HEAD(&cm.timewait_list);
4552
4553 ret = class_register(&cm_class);
4554 if (ret) {
4555 ret = -ENOMEM;
4556 goto error1;
4557 }
4558
4559 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4560 if (!cm.wq) {
4561 ret = -ENOMEM;
4562 goto error2;
4563 }
4564
4565 ret = ib_register_client(&cm_client);
4566 if (ret)
4567 goto error3;
4568
4569 return 0;
4570error3:
4571 destroy_workqueue(cm.wq);
4572error2:
4573 class_unregister(&cm_class);
4574error1:
4575 return ret;
4576}
4577
4578static void __exit ib_cm_cleanup(void)
4579{
4580 struct cm_timewait_info *timewait_info, *tmp;
4581
4582 spin_lock_irq(&cm.lock);
4583 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4584 cancel_delayed_work(&timewait_info->work.work);
4585 spin_unlock_irq(&cm.lock);
4586
4587 ib_unregister_client(&cm_client);
4588 destroy_workqueue(cm.wq);
4589
4590 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4591 list_del(&timewait_info->list);
4592 kfree(timewait_info);
4593 }
4594
4595 class_unregister(&cm_class);
4596 WARN_ON(!xa_empty(&cm.local_id_table));
4597}
4598
4599module_init(ib_cm_init);
4600module_exit(ib_cm_cleanup);
4601