1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/completion.h>
37#include <linux/dma-mapping.h>
38#include <linux/device.h>
39#include <linux/module.h>
40#include <linux/err.h>
41#include <linux/idr.h>
42#include <linux/interrupt.h>
43#include <linux/random.h>
44#include <linux/rbtree.h>
45#include <linux/spinlock.h>
46#include <linux/slab.h>
47#include <linux/sysfs.h>
48#include <linux/workqueue.h>
49#include <linux/kdev_t.h>
50#include <linux/etherdevice.h>
51
52#include <rdma/ib_cache.h>
53#include <rdma/ib_cm.h>
54#include "cm_msgs.h"
55#include "core_priv.h"
56
57MODULE_AUTHOR("Sean Hefty");
58MODULE_DESCRIPTION("InfiniBand CM");
59MODULE_LICENSE("Dual BSD/GPL");
60
61static const char * const ibcm_rej_reason_strs[] = {
62 [IB_CM_REJ_NO_QP] = "no QP",
63 [IB_CM_REJ_NO_EEC] = "no EEC",
64 [IB_CM_REJ_NO_RESOURCES] = "no resources",
65 [IB_CM_REJ_TIMEOUT] = "timeout",
66 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
67 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
68 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
69 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
70 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
71 [IB_CM_REJ_STALE_CONN] = "stale conn",
72 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
73 [IB_CM_REJ_INVALID_GID] = "invalid GID",
74 [IB_CM_REJ_INVALID_LID] = "invalid LID",
75 [IB_CM_REJ_INVALID_SL] = "invalid SL",
76 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
77 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
78 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
79 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
80 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
81 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
82 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
83 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
84 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
85 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
86 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
87 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
88 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
89 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
90 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
91 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
92 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
93 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
94 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
95};
96
97const char *__attribute_const__ ibcm_reject_msg(int reason)
98{
99 size_t index = reason;
100
101 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
102 ibcm_rej_reason_strs[index])
103 return ibcm_rej_reason_strs[index];
104 else
105 return "unrecognized reason";
106}
107EXPORT_SYMBOL(ibcm_reject_msg);
108
109static void cm_add_one(struct ib_device *device);
110static void cm_remove_one(struct ib_device *device, void *client_data);
111
112static struct ib_client cm_client = {
113 .name = "cm",
114 .add = cm_add_one,
115 .remove = cm_remove_one
116};
117
118static struct ib_cm {
119 spinlock_t lock;
120 struct list_head device_list;
121 rwlock_t device_lock;
122 struct rb_root listen_service_table;
123 u64 listen_service_id;
124
125 struct rb_root remote_qp_table;
126 struct rb_root remote_id_table;
127 struct rb_root remote_sidr_table;
128 struct xarray local_id_table;
129 u32 local_id_next;
130 __be32 random_id_operand;
131 struct list_head timewait_list;
132 struct workqueue_struct *wq;
133
134 spinlock_t state_lock;
135} cm;
136
137
138enum {
139 CM_REQ_COUNTER,
140 CM_MRA_COUNTER,
141 CM_REJ_COUNTER,
142 CM_REP_COUNTER,
143 CM_RTU_COUNTER,
144 CM_DREQ_COUNTER,
145 CM_DREP_COUNTER,
146 CM_SIDR_REQ_COUNTER,
147 CM_SIDR_REP_COUNTER,
148 CM_LAP_COUNTER,
149 CM_APR_COUNTER,
150 CM_ATTR_COUNT,
151 CM_ATTR_ID_OFFSET = 0x0010,
152};
153
154enum {
155 CM_XMIT,
156 CM_XMIT_RETRIES,
157 CM_RECV,
158 CM_RECV_DUPLICATES,
159 CM_COUNTER_GROUPS
160};
161
162static char const counter_group_names[CM_COUNTER_GROUPS]
163 [sizeof("cm_rx_duplicates")] = {
164 "cm_tx_msgs", "cm_tx_retries",
165 "cm_rx_msgs", "cm_rx_duplicates"
166};
167
168struct cm_counter_group {
169 struct kobject obj;
170 atomic_long_t counter[CM_ATTR_COUNT];
171};
172
173struct cm_counter_attribute {
174 struct attribute attr;
175 int index;
176};
177
178#define CM_COUNTER_ATTR(_name, _index) \
179struct cm_counter_attribute cm_##_name##_counter_attr = { \
180 .attr = { .name = __stringify(_name), .mode = 0444 }, \
181 .index = _index \
182}
183
184static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
185static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
186static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
187static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
188static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
189static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
190static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
191static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
192static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
193static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
194static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
195
196static struct attribute *cm_counter_default_attrs[] = {
197 &cm_req_counter_attr.attr,
198 &cm_mra_counter_attr.attr,
199 &cm_rej_counter_attr.attr,
200 &cm_rep_counter_attr.attr,
201 &cm_rtu_counter_attr.attr,
202 &cm_dreq_counter_attr.attr,
203 &cm_drep_counter_attr.attr,
204 &cm_sidr_req_counter_attr.attr,
205 &cm_sidr_rep_counter_attr.attr,
206 &cm_lap_counter_attr.attr,
207 &cm_apr_counter_attr.attr,
208 NULL
209};
210
211struct cm_port {
212 struct cm_device *cm_dev;
213 struct ib_mad_agent *mad_agent;
214 struct kobject port_obj;
215 u8 port_num;
216 struct list_head cm_priv_prim_list;
217 struct list_head cm_priv_altr_list;
218 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
219};
220
221struct cm_device {
222 struct list_head list;
223 struct ib_device *ib_device;
224 u8 ack_delay;
225 int going_down;
226 struct cm_port *port[0];
227};
228
229struct cm_av {
230 struct cm_port *port;
231 union ib_gid dgid;
232 struct rdma_ah_attr ah_attr;
233 u16 pkey_index;
234 u8 timeout;
235};
236
237struct cm_work {
238 struct delayed_work work;
239 struct list_head list;
240 struct cm_port *port;
241 struct ib_mad_recv_wc *mad_recv_wc;
242 __be32 local_id;
243 __be32 remote_id;
244 struct ib_cm_event cm_event;
245 struct sa_path_rec path[0];
246};
247
248struct cm_timewait_info {
249 struct cm_work work;
250 struct list_head list;
251 struct rb_node remote_qp_node;
252 struct rb_node remote_id_node;
253 __be64 remote_ca_guid;
254 __be32 remote_qpn;
255 u8 inserted_remote_qp;
256 u8 inserted_remote_id;
257};
258
259struct cm_id_private {
260 struct ib_cm_id id;
261
262 struct rb_node service_node;
263 struct rb_node sidr_id_node;
264 spinlock_t lock;
265 struct completion comp;
266 atomic_t refcount;
267
268
269 int listen_sharecount;
270
271 struct ib_mad_send_buf *msg;
272 struct cm_timewait_info *timewait_info;
273
274 struct cm_av av;
275 struct cm_av alt_av;
276
277 void *private_data;
278 __be64 tid;
279 __be32 local_qpn;
280 __be32 remote_qpn;
281 enum ib_qp_type qp_type;
282 __be32 sq_psn;
283 __be32 rq_psn;
284 int timeout_ms;
285 enum ib_mtu path_mtu;
286 __be16 pkey;
287 u8 private_data_len;
288 u8 max_cm_retries;
289 u8 peer_to_peer;
290 u8 responder_resources;
291 u8 initiator_depth;
292 u8 retry_count;
293 u8 rnr_retry_count;
294 u8 service_timeout;
295 u8 target_ack_delay;
296
297 struct list_head prim_list;
298 struct list_head altr_list;
299
300 int prim_send_port_not_ready;
301 int altr_send_port_not_ready;
302
303 struct list_head work_list;
304 atomic_t work_count;
305};
306
307static void cm_work_handler(struct work_struct *work);
308
309static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
310{
311 if (atomic_dec_and_test(&cm_id_priv->refcount))
312 complete(&cm_id_priv->comp);
313}
314
315static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
316 struct ib_mad_send_buf **msg)
317{
318 struct ib_mad_agent *mad_agent;
319 struct ib_mad_send_buf *m;
320 struct ib_ah *ah;
321 struct cm_av *av;
322 unsigned long flags, flags2;
323 int ret = 0;
324
325
326 spin_lock_irqsave(&cm.state_lock, flags2);
327 spin_lock_irqsave(&cm.lock, flags);
328 if (!cm_id_priv->prim_send_port_not_ready)
329 av = &cm_id_priv->av;
330 else if (!cm_id_priv->altr_send_port_not_ready &&
331 (cm_id_priv->alt_av.port))
332 av = &cm_id_priv->alt_av;
333 else {
334 pr_info("%s: not valid CM id\n", __func__);
335 ret = -ENODEV;
336 spin_unlock_irqrestore(&cm.lock, flags);
337 goto out;
338 }
339 spin_unlock_irqrestore(&cm.lock, flags);
340
341 mad_agent = cm_id_priv->av.port->mad_agent;
342 if (!mad_agent) {
343 pr_info("%s: not a valid MAD agent\n", __func__);
344 ret = -ENODEV;
345 goto out;
346 }
347 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
348 if (IS_ERR(ah)) {
349 ret = PTR_ERR(ah);
350 goto out;
351 }
352
353 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
354 av->pkey_index,
355 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
356 GFP_ATOMIC,
357 IB_MGMT_BASE_VERSION);
358 if (IS_ERR(m)) {
359 rdma_destroy_ah(ah, 0);
360 ret = PTR_ERR(m);
361 goto out;
362 }
363
364
365 m->ah = ah;
366 m->retries = cm_id_priv->max_cm_retries;
367
368 atomic_inc(&cm_id_priv->refcount);
369 m->context[0] = cm_id_priv;
370 *msg = m;
371
372out:
373 spin_unlock_irqrestore(&cm.state_lock, flags2);
374 return ret;
375}
376
377static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
378 struct ib_mad_recv_wc *mad_recv_wc)
379{
380 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
381 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
382 GFP_ATOMIC,
383 IB_MGMT_BASE_VERSION);
384}
385
386static int cm_create_response_msg_ah(struct cm_port *port,
387 struct ib_mad_recv_wc *mad_recv_wc,
388 struct ib_mad_send_buf *msg)
389{
390 struct ib_ah *ah;
391
392 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
393 mad_recv_wc->recv_buf.grh, port->port_num);
394 if (IS_ERR(ah))
395 return PTR_ERR(ah);
396
397 msg->ah = ah;
398 return 0;
399}
400
401static void cm_free_msg(struct ib_mad_send_buf *msg)
402{
403 if (msg->ah)
404 rdma_destroy_ah(msg->ah, 0);
405 if (msg->context[0])
406 cm_deref_id(msg->context[0]);
407 ib_free_send_mad(msg);
408}
409
410static int cm_alloc_response_msg(struct cm_port *port,
411 struct ib_mad_recv_wc *mad_recv_wc,
412 struct ib_mad_send_buf **msg)
413{
414 struct ib_mad_send_buf *m;
415 int ret;
416
417 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
418 if (IS_ERR(m))
419 return PTR_ERR(m);
420
421 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
422 if (ret) {
423 cm_free_msg(m);
424 return ret;
425 }
426
427 *msg = m;
428 return 0;
429}
430
431static void * cm_copy_private_data(const void *private_data,
432 u8 private_data_len)
433{
434 void *data;
435
436 if (!private_data || !private_data_len)
437 return NULL;
438
439 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
440 if (!data)
441 return ERR_PTR(-ENOMEM);
442
443 return data;
444}
445
446static void cm_set_private_data(struct cm_id_private *cm_id_priv,
447 void *private_data, u8 private_data_len)
448{
449 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
450 kfree(cm_id_priv->private_data);
451
452 cm_id_priv->private_data = private_data;
453 cm_id_priv->private_data_len = private_data_len;
454}
455
456static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
457 struct ib_grh *grh, struct cm_av *av)
458{
459 struct rdma_ah_attr new_ah_attr;
460 int ret;
461
462 av->port = port;
463 av->pkey_index = wc->pkey_index;
464
465
466
467
468
469
470
471
472 ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
473 port->port_num, wc,
474 grh, &new_ah_attr);
475 if (ret)
476 return ret;
477
478 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
479 return 0;
480}
481
482static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
483 struct ib_grh *grh, struct cm_av *av)
484{
485 av->port = port;
486 av->pkey_index = wc->pkey_index;
487 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
488 port->port_num, wc,
489 grh, &av->ah_attr);
490}
491
492static int add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
493 struct cm_av *av,
494 struct cm_port *port)
495{
496 unsigned long flags;
497 int ret = 0;
498
499 spin_lock_irqsave(&cm.lock, flags);
500
501 if (&cm_id_priv->av == av)
502 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
503 else if (&cm_id_priv->alt_av == av)
504 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
505 else
506 ret = -EINVAL;
507
508 spin_unlock_irqrestore(&cm.lock, flags);
509 return ret;
510}
511
512static struct cm_port *
513get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
514{
515 struct cm_device *cm_dev;
516 struct cm_port *port = NULL;
517 unsigned long flags;
518
519 if (attr) {
520 read_lock_irqsave(&cm.device_lock, flags);
521 list_for_each_entry(cm_dev, &cm.device_list, list) {
522 if (cm_dev->ib_device == attr->device) {
523 port = cm_dev->port[attr->port_num - 1];
524 break;
525 }
526 }
527 read_unlock_irqrestore(&cm.device_lock, flags);
528 } else {
529
530
531
532
533
534
535 read_lock_irqsave(&cm.device_lock, flags);
536 list_for_each_entry(cm_dev, &cm.device_list, list) {
537 attr = rdma_find_gid(cm_dev->ib_device,
538 &path->sgid,
539 sa_conv_pathrec_to_gid_type(path),
540 NULL);
541 if (!IS_ERR(attr)) {
542 port = cm_dev->port[attr->port_num - 1];
543 break;
544 }
545 }
546 read_unlock_irqrestore(&cm.device_lock, flags);
547 if (port)
548 rdma_put_gid_attr(attr);
549 }
550 return port;
551}
552
553static int cm_init_av_by_path(struct sa_path_rec *path,
554 const struct ib_gid_attr *sgid_attr,
555 struct cm_av *av,
556 struct cm_id_private *cm_id_priv)
557{
558 struct rdma_ah_attr new_ah_attr;
559 struct cm_device *cm_dev;
560 struct cm_port *port;
561 int ret;
562
563 port = get_cm_port_from_path(path, sgid_attr);
564 if (!port)
565 return -EINVAL;
566 cm_dev = port->cm_dev;
567
568 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
569 be16_to_cpu(path->pkey), &av->pkey_index);
570 if (ret)
571 return ret;
572
573 av->port = port;
574
575
576
577
578
579
580
581
582
583
584 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
585 &new_ah_attr, sgid_attr);
586 if (ret)
587 return ret;
588
589 av->timeout = path->packet_life_time + 1;
590
591 ret = add_cm_id_to_port_list(cm_id_priv, av, port);
592 if (ret) {
593 rdma_destroy_ah_attr(&new_ah_attr);
594 return ret;
595 }
596 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
597 return 0;
598}
599
600static int cm_alloc_id(struct cm_id_private *cm_id_priv)
601{
602 int err;
603 u32 id;
604
605 err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv,
606 xa_limit_32b, &cm.local_id_next, GFP_KERNEL);
607
608 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
609 return err;
610}
611
612static u32 cm_local_id(__be32 local_id)
613{
614 return (__force u32) (local_id ^ cm.random_id_operand);
615}
616
617static void cm_free_id(__be32 local_id)
618{
619 xa_erase_irq(&cm.local_id_table, cm_local_id(local_id));
620}
621
622static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
623{
624 struct cm_id_private *cm_id_priv;
625
626 cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
627 if (cm_id_priv) {
628 if (cm_id_priv->id.remote_id == remote_id)
629 atomic_inc(&cm_id_priv->refcount);
630 else
631 cm_id_priv = NULL;
632 }
633
634 return cm_id_priv;
635}
636
637static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
638{
639 struct cm_id_private *cm_id_priv;
640
641 spin_lock_irq(&cm.lock);
642 cm_id_priv = cm_get_id(local_id, remote_id);
643 spin_unlock_irq(&cm.lock);
644
645 return cm_id_priv;
646}
647
648
649
650
651
652
653static int be32_lt(__be32 a, __be32 b)
654{
655 return (__force u32) a < (__force u32) b;
656}
657
658static int be32_gt(__be32 a, __be32 b)
659{
660 return (__force u32) a > (__force u32) b;
661}
662
663static int be64_lt(__be64 a, __be64 b)
664{
665 return (__force u64) a < (__force u64) b;
666}
667
668static int be64_gt(__be64 a, __be64 b)
669{
670 return (__force u64) a > (__force u64) b;
671}
672
673static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
674{
675 struct rb_node **link = &cm.listen_service_table.rb_node;
676 struct rb_node *parent = NULL;
677 struct cm_id_private *cur_cm_id_priv;
678 __be64 service_id = cm_id_priv->id.service_id;
679 __be64 service_mask = cm_id_priv->id.service_mask;
680
681 while (*link) {
682 parent = *link;
683 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
684 service_node);
685 if ((cur_cm_id_priv->id.service_mask & service_id) ==
686 (service_mask & cur_cm_id_priv->id.service_id) &&
687 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
688 return cur_cm_id_priv;
689
690 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
691 link = &(*link)->rb_left;
692 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
693 link = &(*link)->rb_right;
694 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
695 link = &(*link)->rb_left;
696 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
697 link = &(*link)->rb_right;
698 else
699 link = &(*link)->rb_right;
700 }
701 rb_link_node(&cm_id_priv->service_node, parent, link);
702 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
703 return NULL;
704}
705
706static struct cm_id_private * cm_find_listen(struct ib_device *device,
707 __be64 service_id)
708{
709 struct rb_node *node = cm.listen_service_table.rb_node;
710 struct cm_id_private *cm_id_priv;
711
712 while (node) {
713 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
714 if ((cm_id_priv->id.service_mask & service_id) ==
715 cm_id_priv->id.service_id &&
716 (cm_id_priv->id.device == device))
717 return cm_id_priv;
718
719 if (device < cm_id_priv->id.device)
720 node = node->rb_left;
721 else if (device > cm_id_priv->id.device)
722 node = node->rb_right;
723 else if (be64_lt(service_id, cm_id_priv->id.service_id))
724 node = node->rb_left;
725 else if (be64_gt(service_id, cm_id_priv->id.service_id))
726 node = node->rb_right;
727 else
728 node = node->rb_right;
729 }
730 return NULL;
731}
732
733static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
734 *timewait_info)
735{
736 struct rb_node **link = &cm.remote_id_table.rb_node;
737 struct rb_node *parent = NULL;
738 struct cm_timewait_info *cur_timewait_info;
739 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
740 __be32 remote_id = timewait_info->work.remote_id;
741
742 while (*link) {
743 parent = *link;
744 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
745 remote_id_node);
746 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
747 link = &(*link)->rb_left;
748 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
749 link = &(*link)->rb_right;
750 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
751 link = &(*link)->rb_left;
752 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
753 link = &(*link)->rb_right;
754 else
755 return cur_timewait_info;
756 }
757 timewait_info->inserted_remote_id = 1;
758 rb_link_node(&timewait_info->remote_id_node, parent, link);
759 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
760 return NULL;
761}
762
763static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
764 __be32 remote_id)
765{
766 struct rb_node *node = cm.remote_id_table.rb_node;
767 struct cm_timewait_info *timewait_info;
768
769 while (node) {
770 timewait_info = rb_entry(node, struct cm_timewait_info,
771 remote_id_node);
772 if (be32_lt(remote_id, timewait_info->work.remote_id))
773 node = node->rb_left;
774 else if (be32_gt(remote_id, timewait_info->work.remote_id))
775 node = node->rb_right;
776 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
777 node = node->rb_left;
778 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
779 node = node->rb_right;
780 else
781 return timewait_info;
782 }
783 return NULL;
784}
785
786static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
787 *timewait_info)
788{
789 struct rb_node **link = &cm.remote_qp_table.rb_node;
790 struct rb_node *parent = NULL;
791 struct cm_timewait_info *cur_timewait_info;
792 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
793 __be32 remote_qpn = timewait_info->remote_qpn;
794
795 while (*link) {
796 parent = *link;
797 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
798 remote_qp_node);
799 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
800 link = &(*link)->rb_left;
801 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
802 link = &(*link)->rb_right;
803 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
804 link = &(*link)->rb_left;
805 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
806 link = &(*link)->rb_right;
807 else
808 return cur_timewait_info;
809 }
810 timewait_info->inserted_remote_qp = 1;
811 rb_link_node(&timewait_info->remote_qp_node, parent, link);
812 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
813 return NULL;
814}
815
816static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
817 *cm_id_priv)
818{
819 struct rb_node **link = &cm.remote_sidr_table.rb_node;
820 struct rb_node *parent = NULL;
821 struct cm_id_private *cur_cm_id_priv;
822 union ib_gid *port_gid = &cm_id_priv->av.dgid;
823 __be32 remote_id = cm_id_priv->id.remote_id;
824
825 while (*link) {
826 parent = *link;
827 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
828 sidr_id_node);
829 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
830 link = &(*link)->rb_left;
831 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
832 link = &(*link)->rb_right;
833 else {
834 int cmp;
835 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
836 sizeof *port_gid);
837 if (cmp < 0)
838 link = &(*link)->rb_left;
839 else if (cmp > 0)
840 link = &(*link)->rb_right;
841 else
842 return cur_cm_id_priv;
843 }
844 }
845 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
846 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
847 return NULL;
848}
849
850static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
851 enum ib_cm_sidr_status status)
852{
853 struct ib_cm_sidr_rep_param param;
854
855 memset(¶m, 0, sizeof param);
856 param.status = status;
857 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
858}
859
860struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
861 ib_cm_handler cm_handler,
862 void *context)
863{
864 struct cm_id_private *cm_id_priv;
865 int ret;
866
867 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
868 if (!cm_id_priv)
869 return ERR_PTR(-ENOMEM);
870
871 cm_id_priv->id.state = IB_CM_IDLE;
872 cm_id_priv->id.device = device;
873 cm_id_priv->id.cm_handler = cm_handler;
874 cm_id_priv->id.context = context;
875 cm_id_priv->id.remote_cm_qpn = 1;
876 ret = cm_alloc_id(cm_id_priv);
877 if (ret)
878 goto error;
879
880 spin_lock_init(&cm_id_priv->lock);
881 init_completion(&cm_id_priv->comp);
882 INIT_LIST_HEAD(&cm_id_priv->work_list);
883 INIT_LIST_HEAD(&cm_id_priv->prim_list);
884 INIT_LIST_HEAD(&cm_id_priv->altr_list);
885 atomic_set(&cm_id_priv->work_count, -1);
886 atomic_set(&cm_id_priv->refcount, 1);
887 return &cm_id_priv->id;
888
889error:
890 kfree(cm_id_priv);
891 return ERR_PTR(-ENOMEM);
892}
893EXPORT_SYMBOL(ib_create_cm_id);
894
895static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
896{
897 struct cm_work *work;
898
899 if (list_empty(&cm_id_priv->work_list))
900 return NULL;
901
902 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
903 list_del(&work->list);
904 return work;
905}
906
907static void cm_free_work(struct cm_work *work)
908{
909 if (work->mad_recv_wc)
910 ib_free_recv_mad(work->mad_recv_wc);
911 kfree(work);
912}
913
914static inline int cm_convert_to_ms(int iba_time)
915{
916
917 return 1 << max(iba_time - 8, 0);
918}
919
920
921
922
923
924
925
926static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
927{
928 int ack_timeout = packet_life_time + 1;
929
930 if (ack_timeout >= ca_ack_delay)
931 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
932 else
933 ack_timeout = ca_ack_delay +
934 (ack_timeout >= (ca_ack_delay - 1));
935
936 return min(31, ack_timeout);
937}
938
939static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
940{
941 if (timewait_info->inserted_remote_id) {
942 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
943 timewait_info->inserted_remote_id = 0;
944 }
945
946 if (timewait_info->inserted_remote_qp) {
947 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
948 timewait_info->inserted_remote_qp = 0;
949 }
950}
951
952static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
953{
954 struct cm_timewait_info *timewait_info;
955
956 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
957 if (!timewait_info)
958 return ERR_PTR(-ENOMEM);
959
960 timewait_info->work.local_id = local_id;
961 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
962 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
963 return timewait_info;
964}
965
966static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
967{
968 int wait_time;
969 unsigned long flags;
970 struct cm_device *cm_dev;
971
972 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
973 if (!cm_dev)
974 return;
975
976 spin_lock_irqsave(&cm.lock, flags);
977 cm_cleanup_timewait(cm_id_priv->timewait_info);
978 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
979 spin_unlock_irqrestore(&cm.lock, flags);
980
981
982
983
984
985
986 cm_id_priv->id.state = IB_CM_TIMEWAIT;
987 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
988
989
990 spin_lock_irqsave(&cm.lock, flags);
991 if (!cm_dev->going_down)
992 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
993 msecs_to_jiffies(wait_time));
994 spin_unlock_irqrestore(&cm.lock, flags);
995
996 cm_id_priv->timewait_info = NULL;
997}
998
999static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1000{
1001 unsigned long flags;
1002
1003 cm_id_priv->id.state = IB_CM_IDLE;
1004 if (cm_id_priv->timewait_info) {
1005 spin_lock_irqsave(&cm.lock, flags);
1006 cm_cleanup_timewait(cm_id_priv->timewait_info);
1007 spin_unlock_irqrestore(&cm.lock, flags);
1008 kfree(cm_id_priv->timewait_info);
1009 cm_id_priv->timewait_info = NULL;
1010 }
1011}
1012
1013static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1014{
1015 struct cm_id_private *cm_id_priv;
1016 struct cm_work *work;
1017
1018 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1019retest:
1020 spin_lock_irq(&cm_id_priv->lock);
1021 switch (cm_id->state) {
1022 case IB_CM_LISTEN:
1023 spin_unlock_irq(&cm_id_priv->lock);
1024
1025 spin_lock_irq(&cm.lock);
1026 if (--cm_id_priv->listen_sharecount > 0) {
1027
1028 cm_deref_id(cm_id_priv);
1029 spin_unlock_irq(&cm.lock);
1030 return;
1031 }
1032 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1033 spin_unlock_irq(&cm.lock);
1034 break;
1035 case IB_CM_SIDR_REQ_SENT:
1036 cm_id->state = IB_CM_IDLE;
1037 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1038 spin_unlock_irq(&cm_id_priv->lock);
1039 break;
1040 case IB_CM_SIDR_REQ_RCVD:
1041 spin_unlock_irq(&cm_id_priv->lock);
1042 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
1043 spin_lock_irq(&cm.lock);
1044 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1045 rb_erase(&cm_id_priv->sidr_id_node,
1046 &cm.remote_sidr_table);
1047 spin_unlock_irq(&cm.lock);
1048 break;
1049 case IB_CM_REQ_SENT:
1050 case IB_CM_MRA_REQ_RCVD:
1051 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1052 spin_unlock_irq(&cm_id_priv->lock);
1053 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
1054 &cm_id_priv->id.device->node_guid,
1055 sizeof cm_id_priv->id.device->node_guid,
1056 NULL, 0);
1057 break;
1058 case IB_CM_REQ_RCVD:
1059 if (err == -ENOMEM) {
1060
1061 cm_reset_to_idle(cm_id_priv);
1062 spin_unlock_irq(&cm_id_priv->lock);
1063 } else {
1064 spin_unlock_irq(&cm_id_priv->lock);
1065 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1066 NULL, 0, NULL, 0);
1067 }
1068 break;
1069 case IB_CM_REP_SENT:
1070 case IB_CM_MRA_REP_RCVD:
1071 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1072
1073 case IB_CM_MRA_REQ_SENT:
1074 case IB_CM_REP_RCVD:
1075 case IB_CM_MRA_REP_SENT:
1076 spin_unlock_irq(&cm_id_priv->lock);
1077 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1078 NULL, 0, NULL, 0);
1079 break;
1080 case IB_CM_ESTABLISHED:
1081 spin_unlock_irq(&cm_id_priv->lock);
1082 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
1083 break;
1084 ib_send_cm_dreq(cm_id, NULL, 0);
1085 goto retest;
1086 case IB_CM_DREQ_SENT:
1087 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1088 cm_enter_timewait(cm_id_priv);
1089 spin_unlock_irq(&cm_id_priv->lock);
1090 break;
1091 case IB_CM_DREQ_RCVD:
1092 spin_unlock_irq(&cm_id_priv->lock);
1093 ib_send_cm_drep(cm_id, NULL, 0);
1094 break;
1095 default:
1096 spin_unlock_irq(&cm_id_priv->lock);
1097 break;
1098 }
1099
1100 spin_lock_irq(&cm.lock);
1101 if (!list_empty(&cm_id_priv->altr_list) &&
1102 (!cm_id_priv->altr_send_port_not_ready))
1103 list_del(&cm_id_priv->altr_list);
1104 if (!list_empty(&cm_id_priv->prim_list) &&
1105 (!cm_id_priv->prim_send_port_not_ready))
1106 list_del(&cm_id_priv->prim_list);
1107 spin_unlock_irq(&cm.lock);
1108
1109 cm_free_id(cm_id->local_id);
1110 cm_deref_id(cm_id_priv);
1111 wait_for_completion(&cm_id_priv->comp);
1112 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1113 cm_free_work(work);
1114
1115 rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
1116 rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
1117 kfree(cm_id_priv->private_data);
1118 kfree(cm_id_priv);
1119}
1120
1121void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1122{
1123 cm_destroy_id(cm_id, 0);
1124}
1125EXPORT_SYMBOL(ib_destroy_cm_id);
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
1141 __be64 service_mask)
1142{
1143 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
1144 int ret = 0;
1145
1146 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1147 service_id &= service_mask;
1148 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1149 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1150 return -EINVAL;
1151
1152 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1153 if (cm_id->state != IB_CM_IDLE)
1154 return -EINVAL;
1155
1156 cm_id->state = IB_CM_LISTEN;
1157 ++cm_id_priv->listen_sharecount;
1158
1159 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1160 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1161 cm_id->service_mask = ~cpu_to_be64(0);
1162 } else {
1163 cm_id->service_id = service_id;
1164 cm_id->service_mask = service_mask;
1165 }
1166 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1167
1168 if (cur_cm_id_priv) {
1169 cm_id->state = IB_CM_IDLE;
1170 --cm_id_priv->listen_sharecount;
1171 ret = -EBUSY;
1172 }
1173 return ret;
1174}
1175
1176int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1177{
1178 unsigned long flags;
1179 int ret;
1180
1181 spin_lock_irqsave(&cm.lock, flags);
1182 ret = __ib_cm_listen(cm_id, service_id, service_mask);
1183 spin_unlock_irqrestore(&cm.lock, flags);
1184
1185 return ret;
1186}
1187EXPORT_SYMBOL(ib_cm_listen);
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1206 ib_cm_handler cm_handler,
1207 __be64 service_id)
1208{
1209 struct cm_id_private *cm_id_priv;
1210 struct ib_cm_id *cm_id;
1211 unsigned long flags;
1212 int err = 0;
1213
1214
1215 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1216 if (IS_ERR(cm_id))
1217 return cm_id;
1218
1219 spin_lock_irqsave(&cm.lock, flags);
1220
1221 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1222 goto new_id;
1223
1224
1225 cm_id_priv = cm_find_listen(device, service_id);
1226 if (cm_id_priv) {
1227 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1228
1229
1230 spin_unlock_irqrestore(&cm.lock, flags);
1231 return ERR_PTR(-EINVAL);
1232 }
1233 atomic_inc(&cm_id_priv->refcount);
1234 ++cm_id_priv->listen_sharecount;
1235 spin_unlock_irqrestore(&cm.lock, flags);
1236
1237 ib_destroy_cm_id(cm_id);
1238 cm_id = &cm_id_priv->id;
1239 return cm_id;
1240 }
1241
1242new_id:
1243
1244 err = __ib_cm_listen(cm_id, service_id, 0);
1245
1246 spin_unlock_irqrestore(&cm.lock, flags);
1247
1248 if (err) {
1249 ib_destroy_cm_id(cm_id);
1250 return ERR_PTR(err);
1251 }
1252 return cm_id;
1253}
1254EXPORT_SYMBOL(ib_cm_insert_listen);
1255
1256static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1257{
1258 u64 hi_tid, low_tid;
1259
1260 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1261 low_tid = (u64)cm_id_priv->id.local_id;
1262 return cpu_to_be64(hi_tid | low_tid);
1263}
1264
1265static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1266 __be16 attr_id, __be64 tid)
1267{
1268 hdr->base_version = IB_MGMT_BASE_VERSION;
1269 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1270 hdr->class_version = IB_CM_CLASS_VERSION;
1271 hdr->method = IB_MGMT_METHOD_SEND;
1272 hdr->attr_id = attr_id;
1273 hdr->tid = tid;
1274}
1275
1276static void cm_format_req(struct cm_req_msg *req_msg,
1277 struct cm_id_private *cm_id_priv,
1278 struct ib_cm_req_param *param)
1279{
1280 struct sa_path_rec *pri_path = param->primary_path;
1281 struct sa_path_rec *alt_path = param->alternate_path;
1282 bool pri_ext = false;
1283
1284 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1285 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1286 pri_path->opa.slid);
1287
1288 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1289 cm_form_tid(cm_id_priv));
1290
1291 req_msg->local_comm_id = cm_id_priv->id.local_id;
1292 req_msg->service_id = param->service_id;
1293 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1294 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1295 cm_req_set_init_depth(req_msg, param->initiator_depth);
1296 cm_req_set_remote_resp_timeout(req_msg,
1297 param->remote_cm_response_timeout);
1298 cm_req_set_qp_type(req_msg, param->qp_type);
1299 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1300 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1301 cm_req_set_local_resp_timeout(req_msg,
1302 param->local_cm_response_timeout);
1303 req_msg->pkey = param->primary_path->pkey;
1304 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1305 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1306
1307 if (param->qp_type != IB_QPT_XRC_INI) {
1308 cm_req_set_resp_res(req_msg, param->responder_resources);
1309 cm_req_set_retry_count(req_msg, param->retry_count);
1310 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1311 cm_req_set_srq(req_msg, param->srq);
1312 }
1313
1314 req_msg->primary_local_gid = pri_path->sgid;
1315 req_msg->primary_remote_gid = pri_path->dgid;
1316 if (pri_ext) {
1317 req_msg->primary_local_gid.global.interface_id
1318 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1319 req_msg->primary_remote_gid.global.interface_id
1320 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1321 }
1322 if (pri_path->hop_limit <= 1) {
1323 req_msg->primary_local_lid = pri_ext ? 0 :
1324 htons(ntohl(sa_path_get_slid(pri_path)));
1325 req_msg->primary_remote_lid = pri_ext ? 0 :
1326 htons(ntohl(sa_path_get_dlid(pri_path)));
1327 } else {
1328
1329 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1330 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1331 }
1332 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1333 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1334 req_msg->primary_traffic_class = pri_path->traffic_class;
1335 req_msg->primary_hop_limit = pri_path->hop_limit;
1336 cm_req_set_primary_sl(req_msg, pri_path->sl);
1337 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1338 cm_req_set_primary_local_ack_timeout(req_msg,
1339 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1340 pri_path->packet_life_time));
1341
1342 if (alt_path) {
1343 bool alt_ext = false;
1344
1345 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1346 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1347 alt_path->opa.slid);
1348
1349 req_msg->alt_local_gid = alt_path->sgid;
1350 req_msg->alt_remote_gid = alt_path->dgid;
1351 if (alt_ext) {
1352 req_msg->alt_local_gid.global.interface_id
1353 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1354 req_msg->alt_remote_gid.global.interface_id
1355 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1356 }
1357 if (alt_path->hop_limit <= 1) {
1358 req_msg->alt_local_lid = alt_ext ? 0 :
1359 htons(ntohl(sa_path_get_slid(alt_path)));
1360 req_msg->alt_remote_lid = alt_ext ? 0 :
1361 htons(ntohl(sa_path_get_dlid(alt_path)));
1362 } else {
1363 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1364 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1365 }
1366 cm_req_set_alt_flow_label(req_msg,
1367 alt_path->flow_label);
1368 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1369 req_msg->alt_traffic_class = alt_path->traffic_class;
1370 req_msg->alt_hop_limit = alt_path->hop_limit;
1371 cm_req_set_alt_sl(req_msg, alt_path->sl);
1372 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1373 cm_req_set_alt_local_ack_timeout(req_msg,
1374 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1375 alt_path->packet_life_time));
1376 }
1377
1378 if (param->private_data && param->private_data_len)
1379 memcpy(req_msg->private_data, param->private_data,
1380 param->private_data_len);
1381}
1382
1383static int cm_validate_req_param(struct ib_cm_req_param *param)
1384{
1385
1386 if (param->peer_to_peer)
1387 return -EINVAL;
1388
1389 if (!param->primary_path)
1390 return -EINVAL;
1391
1392 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1393 param->qp_type != IB_QPT_XRC_INI)
1394 return -EINVAL;
1395
1396 if (param->private_data &&
1397 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1398 return -EINVAL;
1399
1400 if (param->alternate_path &&
1401 (param->alternate_path->pkey != param->primary_path->pkey ||
1402 param->alternate_path->mtu != param->primary_path->mtu))
1403 return -EINVAL;
1404
1405 return 0;
1406}
1407
1408int ib_send_cm_req(struct ib_cm_id *cm_id,
1409 struct ib_cm_req_param *param)
1410{
1411 struct cm_id_private *cm_id_priv;
1412 struct cm_req_msg *req_msg;
1413 unsigned long flags;
1414 int ret;
1415
1416 ret = cm_validate_req_param(param);
1417 if (ret)
1418 return ret;
1419
1420
1421 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1422 spin_lock_irqsave(&cm_id_priv->lock, flags);
1423 if (cm_id->state != IB_CM_IDLE) {
1424 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1425 ret = -EINVAL;
1426 goto out;
1427 }
1428 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1429
1430 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1431 id.local_id);
1432 if (IS_ERR(cm_id_priv->timewait_info)) {
1433 ret = PTR_ERR(cm_id_priv->timewait_info);
1434 goto out;
1435 }
1436
1437 ret = cm_init_av_by_path(param->primary_path,
1438 param->ppath_sgid_attr, &cm_id_priv->av,
1439 cm_id_priv);
1440 if (ret)
1441 goto error1;
1442 if (param->alternate_path) {
1443 ret = cm_init_av_by_path(param->alternate_path, NULL,
1444 &cm_id_priv->alt_av, cm_id_priv);
1445 if (ret)
1446 goto error1;
1447 }
1448 cm_id->service_id = param->service_id;
1449 cm_id->service_mask = ~cpu_to_be64(0);
1450 cm_id_priv->timeout_ms = cm_convert_to_ms(
1451 param->primary_path->packet_life_time) * 2 +
1452 cm_convert_to_ms(
1453 param->remote_cm_response_timeout);
1454 cm_id_priv->max_cm_retries = param->max_cm_retries;
1455 cm_id_priv->initiator_depth = param->initiator_depth;
1456 cm_id_priv->responder_resources = param->responder_resources;
1457 cm_id_priv->retry_count = param->retry_count;
1458 cm_id_priv->path_mtu = param->primary_path->mtu;
1459 cm_id_priv->pkey = param->primary_path->pkey;
1460 cm_id_priv->qp_type = param->qp_type;
1461
1462 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1463 if (ret)
1464 goto error1;
1465
1466 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1467 cm_format_req(req_msg, cm_id_priv, param);
1468 cm_id_priv->tid = req_msg->hdr.tid;
1469 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1470 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1471
1472 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1473 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1474
1475 spin_lock_irqsave(&cm_id_priv->lock, flags);
1476 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1477 if (ret) {
1478 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1479 goto error2;
1480 }
1481 BUG_ON(cm_id->state != IB_CM_IDLE);
1482 cm_id->state = IB_CM_REQ_SENT;
1483 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1484 return 0;
1485
1486error2: cm_free_msg(cm_id_priv->msg);
1487error1: kfree(cm_id_priv->timewait_info);
1488out: return ret;
1489}
1490EXPORT_SYMBOL(ib_send_cm_req);
1491
1492static int cm_issue_rej(struct cm_port *port,
1493 struct ib_mad_recv_wc *mad_recv_wc,
1494 enum ib_cm_rej_reason reason,
1495 enum cm_msg_response msg_rejected,
1496 void *ari, u8 ari_length)
1497{
1498 struct ib_mad_send_buf *msg = NULL;
1499 struct cm_rej_msg *rej_msg, *rcv_msg;
1500 int ret;
1501
1502 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1503 if (ret)
1504 return ret;
1505
1506
1507 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1508 rej_msg = (struct cm_rej_msg *) msg->mad;
1509
1510 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1511 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1512 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1513 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1514 rej_msg->reason = cpu_to_be16(reason);
1515
1516 if (ari && ari_length) {
1517 cm_rej_set_reject_info_len(rej_msg, ari_length);
1518 memcpy(rej_msg->ari, ari, ari_length);
1519 }
1520
1521 ret = ib_post_send_mad(msg, NULL);
1522 if (ret)
1523 cm_free_msg(msg);
1524
1525 return ret;
1526}
1527
1528static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1529 __be32 local_qpn, __be32 remote_qpn)
1530{
1531 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1532 ((local_ca_guid == remote_ca_guid) &&
1533 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1534}
1535
1536static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1537{
1538 return ((req_msg->alt_local_lid) ||
1539 (ib_is_opa_gid(&req_msg->alt_local_gid)));
1540}
1541
1542static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
1543 struct sa_path_rec *path, union ib_gid *gid)
1544{
1545 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1546 path->rec_type = SA_PATH_REC_TYPE_OPA;
1547 else
1548 path->rec_type = SA_PATH_REC_TYPE_IB;
1549}
1550
1551static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1552 struct sa_path_rec *primary_path,
1553 struct sa_path_rec *alt_path)
1554{
1555 u32 lid;
1556
1557 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1558 sa_path_set_dlid(primary_path,
1559 ntohs(req_msg->primary_local_lid));
1560 sa_path_set_slid(primary_path,
1561 ntohs(req_msg->primary_remote_lid));
1562 } else {
1563 lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
1564 sa_path_set_dlid(primary_path, lid);
1565
1566 lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
1567 sa_path_set_slid(primary_path, lid);
1568 }
1569
1570 if (!cm_req_has_alt_path(req_msg))
1571 return;
1572
1573 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1574 sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid));
1575 sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid));
1576 } else {
1577 lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
1578 sa_path_set_dlid(alt_path, lid);
1579
1580 lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
1581 sa_path_set_slid(alt_path, lid);
1582 }
1583}
1584
1585static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1586 struct sa_path_rec *primary_path,
1587 struct sa_path_rec *alt_path)
1588{
1589 primary_path->dgid = req_msg->primary_local_gid;
1590 primary_path->sgid = req_msg->primary_remote_gid;
1591 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1592 primary_path->hop_limit = req_msg->primary_hop_limit;
1593 primary_path->traffic_class = req_msg->primary_traffic_class;
1594 primary_path->reversible = 1;
1595 primary_path->pkey = req_msg->pkey;
1596 primary_path->sl = cm_req_get_primary_sl(req_msg);
1597 primary_path->mtu_selector = IB_SA_EQ;
1598 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1599 primary_path->rate_selector = IB_SA_EQ;
1600 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1601 primary_path->packet_life_time_selector = IB_SA_EQ;
1602 primary_path->packet_life_time =
1603 cm_req_get_primary_local_ack_timeout(req_msg);
1604 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1605 primary_path->service_id = req_msg->service_id;
1606 if (sa_path_is_roce(primary_path))
1607 primary_path->roce.route_resolved = false;
1608
1609 if (cm_req_has_alt_path(req_msg)) {
1610 alt_path->dgid = req_msg->alt_local_gid;
1611 alt_path->sgid = req_msg->alt_remote_gid;
1612 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1613 alt_path->hop_limit = req_msg->alt_hop_limit;
1614 alt_path->traffic_class = req_msg->alt_traffic_class;
1615 alt_path->reversible = 1;
1616 alt_path->pkey = req_msg->pkey;
1617 alt_path->sl = cm_req_get_alt_sl(req_msg);
1618 alt_path->mtu_selector = IB_SA_EQ;
1619 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1620 alt_path->rate_selector = IB_SA_EQ;
1621 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1622 alt_path->packet_life_time_selector = IB_SA_EQ;
1623 alt_path->packet_life_time =
1624 cm_req_get_alt_local_ack_timeout(req_msg);
1625 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1626 alt_path->service_id = req_msg->service_id;
1627
1628 if (sa_path_is_roce(alt_path))
1629 alt_path->roce.route_resolved = false;
1630 }
1631 cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1632}
1633
1634static u16 cm_get_bth_pkey(struct cm_work *work)
1635{
1636 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1637 u8 port_num = work->port->port_num;
1638 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1639 u16 pkey;
1640 int ret;
1641
1642 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1643 if (ret) {
1644 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1645 port_num, pkey_index, ret);
1646 return 0;
1647 }
1648
1649 return pkey;
1650}
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661static void cm_opa_to_ib_sgid(struct cm_work *work,
1662 struct sa_path_rec *path)
1663{
1664 struct ib_device *dev = work->port->cm_dev->ib_device;
1665 u8 port_num = work->port->port_num;
1666
1667 if (rdma_cap_opa_ah(dev, port_num) &&
1668 (ib_is_opa_gid(&path->sgid))) {
1669 union ib_gid sgid;
1670
1671 if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1672 dev_warn(&dev->dev,
1673 "Error updating sgid in CM request\n");
1674 return;
1675 }
1676
1677 path->sgid = sgid;
1678 }
1679}
1680
1681static void cm_format_req_event(struct cm_work *work,
1682 struct cm_id_private *cm_id_priv,
1683 struct ib_cm_id *listen_id)
1684{
1685 struct cm_req_msg *req_msg;
1686 struct ib_cm_req_event_param *param;
1687
1688 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1689 param = &work->cm_event.param.req_rcvd;
1690 param->listen_id = listen_id;
1691 param->bth_pkey = cm_get_bth_pkey(work);
1692 param->port = cm_id_priv->av.port->port_num;
1693 param->primary_path = &work->path[0];
1694 cm_opa_to_ib_sgid(work, param->primary_path);
1695 if (cm_req_has_alt_path(req_msg)) {
1696 param->alternate_path = &work->path[1];
1697 cm_opa_to_ib_sgid(work, param->alternate_path);
1698 } else {
1699 param->alternate_path = NULL;
1700 }
1701 param->remote_ca_guid = req_msg->local_ca_guid;
1702 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1703 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1704 param->qp_type = cm_req_get_qp_type(req_msg);
1705 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1706 param->responder_resources = cm_req_get_init_depth(req_msg);
1707 param->initiator_depth = cm_req_get_resp_res(req_msg);
1708 param->local_cm_response_timeout =
1709 cm_req_get_remote_resp_timeout(req_msg);
1710 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1711 param->remote_cm_response_timeout =
1712 cm_req_get_local_resp_timeout(req_msg);
1713 param->retry_count = cm_req_get_retry_count(req_msg);
1714 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1715 param->srq = cm_req_get_srq(req_msg);
1716 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1717 work->cm_event.private_data = &req_msg->private_data;
1718}
1719
1720static void cm_process_work(struct cm_id_private *cm_id_priv,
1721 struct cm_work *work)
1722{
1723 int ret;
1724
1725
1726 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1727 cm_free_work(work);
1728
1729 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1730 spin_lock_irq(&cm_id_priv->lock);
1731 work = cm_dequeue_work(cm_id_priv);
1732 spin_unlock_irq(&cm_id_priv->lock);
1733 if (!work)
1734 return;
1735
1736 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1737 &work->cm_event);
1738 cm_free_work(work);
1739 }
1740 cm_deref_id(cm_id_priv);
1741 if (ret)
1742 cm_destroy_id(&cm_id_priv->id, ret);
1743}
1744
1745static void cm_format_mra(struct cm_mra_msg *mra_msg,
1746 struct cm_id_private *cm_id_priv,
1747 enum cm_msg_response msg_mraed, u8 service_timeout,
1748 const void *private_data, u8 private_data_len)
1749{
1750 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1751 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1752 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1753 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1754 cm_mra_set_service_timeout(mra_msg, service_timeout);
1755
1756 if (private_data && private_data_len)
1757 memcpy(mra_msg->private_data, private_data, private_data_len);
1758}
1759
1760static void cm_format_rej(struct cm_rej_msg *rej_msg,
1761 struct cm_id_private *cm_id_priv,
1762 enum ib_cm_rej_reason reason,
1763 void *ari,
1764 u8 ari_length,
1765 const void *private_data,
1766 u8 private_data_len)
1767{
1768 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1769 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1770
1771 switch(cm_id_priv->id.state) {
1772 case IB_CM_REQ_RCVD:
1773 rej_msg->local_comm_id = 0;
1774 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1775 break;
1776 case IB_CM_MRA_REQ_SENT:
1777 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1778 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1779 break;
1780 case IB_CM_REP_RCVD:
1781 case IB_CM_MRA_REP_SENT:
1782 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1783 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1784 break;
1785 default:
1786 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1787 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1788 break;
1789 }
1790
1791 rej_msg->reason = cpu_to_be16(reason);
1792 if (ari && ari_length) {
1793 cm_rej_set_reject_info_len(rej_msg, ari_length);
1794 memcpy(rej_msg->ari, ari, ari_length);
1795 }
1796
1797 if (private_data && private_data_len)
1798 memcpy(rej_msg->private_data, private_data, private_data_len);
1799}
1800
1801static void cm_dup_req_handler(struct cm_work *work,
1802 struct cm_id_private *cm_id_priv)
1803{
1804 struct ib_mad_send_buf *msg = NULL;
1805 int ret;
1806
1807 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1808 counter[CM_REQ_COUNTER]);
1809
1810
1811 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1812 return;
1813
1814 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1815 if (ret)
1816 return;
1817
1818 spin_lock_irq(&cm_id_priv->lock);
1819 switch (cm_id_priv->id.state) {
1820 case IB_CM_MRA_REQ_SENT:
1821 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1822 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1823 cm_id_priv->private_data,
1824 cm_id_priv->private_data_len);
1825 break;
1826 case IB_CM_TIMEWAIT:
1827 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1828 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1829 break;
1830 default:
1831 goto unlock;
1832 }
1833 spin_unlock_irq(&cm_id_priv->lock);
1834
1835 ret = ib_post_send_mad(msg, NULL);
1836 if (ret)
1837 goto free;
1838 return;
1839
1840unlock: spin_unlock_irq(&cm_id_priv->lock);
1841free: cm_free_msg(msg);
1842}
1843
1844static struct cm_id_private * cm_match_req(struct cm_work *work,
1845 struct cm_id_private *cm_id_priv)
1846{
1847 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1848 struct cm_timewait_info *timewait_info;
1849 struct cm_req_msg *req_msg;
1850 struct ib_cm_id *cm_id;
1851
1852 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1853
1854
1855 spin_lock_irq(&cm.lock);
1856 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1857 if (timewait_info) {
1858 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1859 timewait_info->work.remote_id);
1860 spin_unlock_irq(&cm.lock);
1861 if (cur_cm_id_priv) {
1862 cm_dup_req_handler(work, cur_cm_id_priv);
1863 cm_deref_id(cur_cm_id_priv);
1864 }
1865 return NULL;
1866 }
1867
1868
1869 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1870 if (timewait_info) {
1871 cm_cleanup_timewait(cm_id_priv->timewait_info);
1872 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1873 timewait_info->work.remote_id);
1874
1875 spin_unlock_irq(&cm.lock);
1876 cm_issue_rej(work->port, work->mad_recv_wc,
1877 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1878 NULL, 0);
1879 if (cur_cm_id_priv) {
1880 cm_id = &cur_cm_id_priv->id;
1881 ib_send_cm_dreq(cm_id, NULL, 0);
1882 cm_deref_id(cur_cm_id_priv);
1883 }
1884 return NULL;
1885 }
1886
1887
1888 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1889 req_msg->service_id);
1890 if (!listen_cm_id_priv) {
1891 cm_cleanup_timewait(cm_id_priv->timewait_info);
1892 spin_unlock_irq(&cm.lock);
1893 cm_issue_rej(work->port, work->mad_recv_wc,
1894 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1895 NULL, 0);
1896 goto out;
1897 }
1898 atomic_inc(&listen_cm_id_priv->refcount);
1899 atomic_inc(&cm_id_priv->refcount);
1900 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1901 atomic_inc(&cm_id_priv->work_count);
1902 spin_unlock_irq(&cm.lock);
1903out:
1904 return listen_cm_id_priv;
1905}
1906
1907
1908
1909
1910
1911
1912static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1913{
1914 if (!cm_req_get_primary_subnet_local(req_msg)) {
1915 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1916 req_msg->primary_local_lid = ib_lid_be16(wc->slid);
1917 cm_req_set_primary_sl(req_msg, wc->sl);
1918 }
1919
1920 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1921 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1922 }
1923
1924 if (!cm_req_get_alt_subnet_local(req_msg)) {
1925 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1926 req_msg->alt_local_lid = ib_lid_be16(wc->slid);
1927 cm_req_set_alt_sl(req_msg, wc->sl);
1928 }
1929
1930 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1931 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1932 }
1933}
1934
1935static int cm_req_handler(struct cm_work *work)
1936{
1937 struct ib_cm_id *cm_id;
1938 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1939 struct cm_req_msg *req_msg;
1940 const struct ib_global_route *grh;
1941 const struct ib_gid_attr *gid_attr;
1942 int ret;
1943
1944 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1945
1946 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1947 if (IS_ERR(cm_id))
1948 return PTR_ERR(cm_id);
1949
1950 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1951 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1952 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1953 work->mad_recv_wc->recv_buf.grh,
1954 &cm_id_priv->av);
1955 if (ret)
1956 goto destroy;
1957 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1958 id.local_id);
1959 if (IS_ERR(cm_id_priv->timewait_info)) {
1960 ret = PTR_ERR(cm_id_priv->timewait_info);
1961 goto destroy;
1962 }
1963 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1964 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1965 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1966
1967 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1968 if (!listen_cm_id_priv) {
1969 pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
1970 be32_to_cpu(cm_id->local_id));
1971 ret = -EINVAL;
1972 goto free_timeinfo;
1973 }
1974
1975 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1976 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1977 cm_id_priv->id.service_id = req_msg->service_id;
1978 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1979
1980 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1981
1982 memset(&work->path[0], 0, sizeof(work->path[0]));
1983 if (cm_req_has_alt_path(req_msg))
1984 memset(&work->path[1], 0, sizeof(work->path[1]));
1985 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
1986 gid_attr = grh->sgid_attr;
1987
1988 if (gid_attr &&
1989 rdma_protocol_roce(work->port->cm_dev->ib_device,
1990 work->port->port_num)) {
1991 work->path[0].rec_type =
1992 sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
1993 } else {
1994 cm_path_set_rec_type(work->port->cm_dev->ib_device,
1995 work->port->port_num,
1996 &work->path[0],
1997 &req_msg->primary_local_gid);
1998 }
1999 if (cm_req_has_alt_path(req_msg))
2000 work->path[1].rec_type = work->path[0].rec_type;
2001 cm_format_paths_from_req(req_msg, &work->path[0],
2002 &work->path[1]);
2003 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2004 sa_path_set_dmac(&work->path[0],
2005 cm_id_priv->av.ah_attr.roce.dmac);
2006 work->path[0].hop_limit = grh->hop_limit;
2007 ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
2008 cm_id_priv);
2009 if (ret) {
2010 int err;
2011
2012 err = rdma_query_gid(work->port->cm_dev->ib_device,
2013 work->port->port_num, 0,
2014 &work->path[0].sgid);
2015 if (err)
2016 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
2017 NULL, 0, NULL, 0);
2018 else
2019 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
2020 &work->path[0].sgid,
2021 sizeof(work->path[0].sgid),
2022 NULL, 0);
2023 goto rejected;
2024 }
2025 if (cm_req_has_alt_path(req_msg)) {
2026 ret = cm_init_av_by_path(&work->path[1], NULL,
2027 &cm_id_priv->alt_av, cm_id_priv);
2028 if (ret) {
2029 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
2030 &work->path[0].sgid,
2031 sizeof(work->path[0].sgid), NULL, 0);
2032 goto rejected;
2033 }
2034 }
2035 cm_id_priv->tid = req_msg->hdr.tid;
2036 cm_id_priv->timeout_ms = cm_convert_to_ms(
2037 cm_req_get_local_resp_timeout(req_msg));
2038 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
2039 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
2040 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
2041 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
2042 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
2043 cm_id_priv->pkey = req_msg->pkey;
2044 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
2045 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
2046 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
2047 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2048
2049 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2050 cm_process_work(cm_id_priv, work);
2051 cm_deref_id(listen_cm_id_priv);
2052 return 0;
2053
2054rejected:
2055 atomic_dec(&cm_id_priv->refcount);
2056 cm_deref_id(listen_cm_id_priv);
2057free_timeinfo:
2058 kfree(cm_id_priv->timewait_info);
2059destroy:
2060 ib_destroy_cm_id(cm_id);
2061 return ret;
2062}
2063
2064static void cm_format_rep(struct cm_rep_msg *rep_msg,
2065 struct cm_id_private *cm_id_priv,
2066 struct ib_cm_rep_param *param)
2067{
2068 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
2069 rep_msg->local_comm_id = cm_id_priv->id.local_id;
2070 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2071 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
2072 rep_msg->resp_resources = param->responder_resources;
2073 cm_rep_set_target_ack_delay(rep_msg,
2074 cm_id_priv->av.port->cm_dev->ack_delay);
2075 cm_rep_set_failover(rep_msg, param->failover_accepted);
2076 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
2077 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
2078
2079 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2080 rep_msg->initiator_depth = param->initiator_depth;
2081 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
2082 cm_rep_set_srq(rep_msg, param->srq);
2083 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
2084 } else {
2085 cm_rep_set_srq(rep_msg, 1);
2086 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
2087 }
2088
2089 if (param->private_data && param->private_data_len)
2090 memcpy(rep_msg->private_data, param->private_data,
2091 param->private_data_len);
2092}
2093
2094int ib_send_cm_rep(struct ib_cm_id *cm_id,
2095 struct ib_cm_rep_param *param)
2096{
2097 struct cm_id_private *cm_id_priv;
2098 struct ib_mad_send_buf *msg;
2099 struct cm_rep_msg *rep_msg;
2100 unsigned long flags;
2101 int ret;
2102
2103 if (param->private_data &&
2104 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2105 return -EINVAL;
2106
2107 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2108 spin_lock_irqsave(&cm_id_priv->lock, flags);
2109 if (cm_id->state != IB_CM_REQ_RCVD &&
2110 cm_id->state != IB_CM_MRA_REQ_SENT) {
2111 pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
2112 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2113 ret = -EINVAL;
2114 goto out;
2115 }
2116
2117 ret = cm_alloc_msg(cm_id_priv, &msg);
2118 if (ret)
2119 goto out;
2120
2121 rep_msg = (struct cm_rep_msg *) msg->mad;
2122 cm_format_rep(rep_msg, cm_id_priv, param);
2123 msg->timeout_ms = cm_id_priv->timeout_ms;
2124 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2125
2126 ret = ib_post_send_mad(msg, NULL);
2127 if (ret) {
2128 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2129 cm_free_msg(msg);
2130 return ret;
2131 }
2132
2133 cm_id->state = IB_CM_REP_SENT;
2134 cm_id_priv->msg = msg;
2135 cm_id_priv->initiator_depth = param->initiator_depth;
2136 cm_id_priv->responder_resources = param->responder_resources;
2137 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
2138 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2139
2140out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2141 return ret;
2142}
2143EXPORT_SYMBOL(ib_send_cm_rep);
2144
2145static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2146 struct cm_id_private *cm_id_priv,
2147 const void *private_data,
2148 u8 private_data_len)
2149{
2150 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2151 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
2152 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
2153
2154 if (private_data && private_data_len)
2155 memcpy(rtu_msg->private_data, private_data, private_data_len);
2156}
2157
2158int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2159 const void *private_data,
2160 u8 private_data_len)
2161{
2162 struct cm_id_private *cm_id_priv;
2163 struct ib_mad_send_buf *msg;
2164 unsigned long flags;
2165 void *data;
2166 int ret;
2167
2168 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2169 return -EINVAL;
2170
2171 data = cm_copy_private_data(private_data, private_data_len);
2172 if (IS_ERR(data))
2173 return PTR_ERR(data);
2174
2175 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2176 spin_lock_irqsave(&cm_id_priv->lock, flags);
2177 if (cm_id->state != IB_CM_REP_RCVD &&
2178 cm_id->state != IB_CM_MRA_REP_SENT) {
2179 pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
2180 be32_to_cpu(cm_id->local_id), cm_id->state);
2181 ret = -EINVAL;
2182 goto error;
2183 }
2184
2185 ret = cm_alloc_msg(cm_id_priv, &msg);
2186 if (ret)
2187 goto error;
2188
2189 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2190 private_data, private_data_len);
2191
2192 ret = ib_post_send_mad(msg, NULL);
2193 if (ret) {
2194 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2195 cm_free_msg(msg);
2196 kfree(data);
2197 return ret;
2198 }
2199
2200 cm_id->state = IB_CM_ESTABLISHED;
2201 cm_set_private_data(cm_id_priv, data, private_data_len);
2202 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2203 return 0;
2204
2205error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2206 kfree(data);
2207 return ret;
2208}
2209EXPORT_SYMBOL(ib_send_cm_rtu);
2210
2211static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2212{
2213 struct cm_rep_msg *rep_msg;
2214 struct ib_cm_rep_event_param *param;
2215
2216 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2217 param = &work->cm_event.param.rep_rcvd;
2218 param->remote_ca_guid = rep_msg->local_ca_guid;
2219 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
2220 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2221 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
2222 param->responder_resources = rep_msg->initiator_depth;
2223 param->initiator_depth = rep_msg->resp_resources;
2224 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2225 param->failover_accepted = cm_rep_get_failover(rep_msg);
2226 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
2227 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2228 param->srq = cm_rep_get_srq(rep_msg);
2229 work->cm_event.private_data = &rep_msg->private_data;
2230}
2231
2232static void cm_dup_rep_handler(struct cm_work *work)
2233{
2234 struct cm_id_private *cm_id_priv;
2235 struct cm_rep_msg *rep_msg;
2236 struct ib_mad_send_buf *msg = NULL;
2237 int ret;
2238
2239 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2240 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
2241 rep_msg->local_comm_id);
2242 if (!cm_id_priv)
2243 return;
2244
2245 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2246 counter[CM_REP_COUNTER]);
2247 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2248 if (ret)
2249 goto deref;
2250
2251 spin_lock_irq(&cm_id_priv->lock);
2252 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2253 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2254 cm_id_priv->private_data,
2255 cm_id_priv->private_data_len);
2256 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2257 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2258 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2259 cm_id_priv->private_data,
2260 cm_id_priv->private_data_len);
2261 else
2262 goto unlock;
2263 spin_unlock_irq(&cm_id_priv->lock);
2264
2265 ret = ib_post_send_mad(msg, NULL);
2266 if (ret)
2267 goto free;
2268 goto deref;
2269
2270unlock: spin_unlock_irq(&cm_id_priv->lock);
2271free: cm_free_msg(msg);
2272deref: cm_deref_id(cm_id_priv);
2273}
2274
2275static int cm_rep_handler(struct cm_work *work)
2276{
2277 struct cm_id_private *cm_id_priv;
2278 struct cm_rep_msg *rep_msg;
2279 int ret;
2280 struct cm_id_private *cur_cm_id_priv;
2281 struct ib_cm_id *cm_id;
2282 struct cm_timewait_info *timewait_info;
2283
2284 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2285 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
2286 if (!cm_id_priv) {
2287 cm_dup_rep_handler(work);
2288 pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
2289 be32_to_cpu(rep_msg->remote_comm_id));
2290 return -EINVAL;
2291 }
2292
2293 cm_format_rep_event(work, cm_id_priv->qp_type);
2294
2295 spin_lock_irq(&cm_id_priv->lock);
2296 switch (cm_id_priv->id.state) {
2297 case IB_CM_REQ_SENT:
2298 case IB_CM_MRA_REQ_RCVD:
2299 break;
2300 default:
2301 spin_unlock_irq(&cm_id_priv->lock);
2302 ret = -EINVAL;
2303 pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
2304 __func__, cm_id_priv->id.state,
2305 be32_to_cpu(rep_msg->local_comm_id),
2306 be32_to_cpu(rep_msg->remote_comm_id));
2307 goto error;
2308 }
2309
2310 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
2311 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
2312 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2313
2314 spin_lock(&cm.lock);
2315
2316 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2317 spin_unlock(&cm.lock);
2318 spin_unlock_irq(&cm_id_priv->lock);
2319 ret = -EINVAL;
2320 pr_debug("%s: Failed to insert remote id %d\n", __func__,
2321 be32_to_cpu(rep_msg->remote_comm_id));
2322 goto error;
2323 }
2324
2325 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2326 if (timewait_info) {
2327 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2328 &cm.remote_id_table);
2329 cm_id_priv->timewait_info->inserted_remote_id = 0;
2330 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
2331 timewait_info->work.remote_id);
2332
2333 spin_unlock(&cm.lock);
2334 spin_unlock_irq(&cm_id_priv->lock);
2335 cm_issue_rej(work->port, work->mad_recv_wc,
2336 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2337 NULL, 0);
2338 ret = -EINVAL;
2339 pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
2340 __func__, be32_to_cpu(rep_msg->local_comm_id),
2341 be32_to_cpu(rep_msg->remote_comm_id));
2342
2343 if (cur_cm_id_priv) {
2344 cm_id = &cur_cm_id_priv->id;
2345 ib_send_cm_dreq(cm_id, NULL, 0);
2346 cm_deref_id(cur_cm_id_priv);
2347 }
2348
2349 goto error;
2350 }
2351 spin_unlock(&cm.lock);
2352
2353 cm_id_priv->id.state = IB_CM_REP_RCVD;
2354 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2355 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2356 cm_id_priv->initiator_depth = rep_msg->resp_resources;
2357 cm_id_priv->responder_resources = rep_msg->initiator_depth;
2358 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2359 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2360 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2361 cm_id_priv->av.timeout =
2362 cm_ack_timeout(cm_id_priv->target_ack_delay,
2363 cm_id_priv->av.timeout - 1);
2364 cm_id_priv->alt_av.timeout =
2365 cm_ack_timeout(cm_id_priv->target_ack_delay,
2366 cm_id_priv->alt_av.timeout - 1);
2367
2368
2369
2370 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2371 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2372 if (!ret)
2373 list_add_tail(&work->list, &cm_id_priv->work_list);
2374 spin_unlock_irq(&cm_id_priv->lock);
2375
2376 if (ret)
2377 cm_process_work(cm_id_priv, work);
2378 else
2379 cm_deref_id(cm_id_priv);
2380 return 0;
2381
2382error:
2383 cm_deref_id(cm_id_priv);
2384 return ret;
2385}
2386
2387static int cm_establish_handler(struct cm_work *work)
2388{
2389 struct cm_id_private *cm_id_priv;
2390 int ret;
2391
2392
2393 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2394 if (!cm_id_priv)
2395 return -EINVAL;
2396
2397 spin_lock_irq(&cm_id_priv->lock);
2398 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2399 spin_unlock_irq(&cm_id_priv->lock);
2400 goto out;
2401 }
2402
2403 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2404 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2405 if (!ret)
2406 list_add_tail(&work->list, &cm_id_priv->work_list);
2407 spin_unlock_irq(&cm_id_priv->lock);
2408
2409 if (ret)
2410 cm_process_work(cm_id_priv, work);
2411 else
2412 cm_deref_id(cm_id_priv);
2413 return 0;
2414out:
2415 cm_deref_id(cm_id_priv);
2416 return -EINVAL;
2417}
2418
2419static int cm_rtu_handler(struct cm_work *work)
2420{
2421 struct cm_id_private *cm_id_priv;
2422 struct cm_rtu_msg *rtu_msg;
2423 int ret;
2424
2425 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2426 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2427 rtu_msg->local_comm_id);
2428 if (!cm_id_priv)
2429 return -EINVAL;
2430
2431 work->cm_event.private_data = &rtu_msg->private_data;
2432
2433 spin_lock_irq(&cm_id_priv->lock);
2434 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2435 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2436 spin_unlock_irq(&cm_id_priv->lock);
2437 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2438 counter[CM_RTU_COUNTER]);
2439 goto out;
2440 }
2441 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2442
2443 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2444 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2445 if (!ret)
2446 list_add_tail(&work->list, &cm_id_priv->work_list);
2447 spin_unlock_irq(&cm_id_priv->lock);
2448
2449 if (ret)
2450 cm_process_work(cm_id_priv, work);
2451 else
2452 cm_deref_id(cm_id_priv);
2453 return 0;
2454out:
2455 cm_deref_id(cm_id_priv);
2456 return -EINVAL;
2457}
2458
2459static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2460 struct cm_id_private *cm_id_priv,
2461 const void *private_data,
2462 u8 private_data_len)
2463{
2464 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2465 cm_form_tid(cm_id_priv));
2466 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2467 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2468 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2469
2470 if (private_data && private_data_len)
2471 memcpy(dreq_msg->private_data, private_data, private_data_len);
2472}
2473
2474int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2475 const void *private_data,
2476 u8 private_data_len)
2477{
2478 struct cm_id_private *cm_id_priv;
2479 struct ib_mad_send_buf *msg;
2480 unsigned long flags;
2481 int ret;
2482
2483 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2484 return -EINVAL;
2485
2486 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2487 spin_lock_irqsave(&cm_id_priv->lock, flags);
2488 if (cm_id->state != IB_CM_ESTABLISHED) {
2489 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2490 be32_to_cpu(cm_id->local_id), cm_id->state);
2491 ret = -EINVAL;
2492 goto out;
2493 }
2494
2495 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2496 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2497 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2498
2499 ret = cm_alloc_msg(cm_id_priv, &msg);
2500 if (ret) {
2501 cm_enter_timewait(cm_id_priv);
2502 goto out;
2503 }
2504
2505 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2506 private_data, private_data_len);
2507 msg->timeout_ms = cm_id_priv->timeout_ms;
2508 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2509
2510 ret = ib_post_send_mad(msg, NULL);
2511 if (ret) {
2512 cm_enter_timewait(cm_id_priv);
2513 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2514 cm_free_msg(msg);
2515 return ret;
2516 }
2517
2518 cm_id->state = IB_CM_DREQ_SENT;
2519 cm_id_priv->msg = msg;
2520out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2521 return ret;
2522}
2523EXPORT_SYMBOL(ib_send_cm_dreq);
2524
2525static void cm_format_drep(struct cm_drep_msg *drep_msg,
2526 struct cm_id_private *cm_id_priv,
2527 const void *private_data,
2528 u8 private_data_len)
2529{
2530 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2531 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2532 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2533
2534 if (private_data && private_data_len)
2535 memcpy(drep_msg->private_data, private_data, private_data_len);
2536}
2537
2538int ib_send_cm_drep(struct ib_cm_id *cm_id,
2539 const void *private_data,
2540 u8 private_data_len)
2541{
2542 struct cm_id_private *cm_id_priv;
2543 struct ib_mad_send_buf *msg;
2544 unsigned long flags;
2545 void *data;
2546 int ret;
2547
2548 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2549 return -EINVAL;
2550
2551 data = cm_copy_private_data(private_data, private_data_len);
2552 if (IS_ERR(data))
2553 return PTR_ERR(data);
2554
2555 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2556 spin_lock_irqsave(&cm_id_priv->lock, flags);
2557 if (cm_id->state != IB_CM_DREQ_RCVD) {
2558 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2559 kfree(data);
2560 pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
2561 __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
2562 return -EINVAL;
2563 }
2564
2565 cm_set_private_data(cm_id_priv, data, private_data_len);
2566 cm_enter_timewait(cm_id_priv);
2567
2568 ret = cm_alloc_msg(cm_id_priv, &msg);
2569 if (ret)
2570 goto out;
2571
2572 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2573 private_data, private_data_len);
2574
2575 ret = ib_post_send_mad(msg, NULL);
2576 if (ret) {
2577 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2578 cm_free_msg(msg);
2579 return ret;
2580 }
2581
2582out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2583 return ret;
2584}
2585EXPORT_SYMBOL(ib_send_cm_drep);
2586
2587static int cm_issue_drep(struct cm_port *port,
2588 struct ib_mad_recv_wc *mad_recv_wc)
2589{
2590 struct ib_mad_send_buf *msg = NULL;
2591 struct cm_dreq_msg *dreq_msg;
2592 struct cm_drep_msg *drep_msg;
2593 int ret;
2594
2595 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2596 if (ret)
2597 return ret;
2598
2599 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2600 drep_msg = (struct cm_drep_msg *) msg->mad;
2601
2602 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2603 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2604 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2605
2606 ret = ib_post_send_mad(msg, NULL);
2607 if (ret)
2608 cm_free_msg(msg);
2609
2610 return ret;
2611}
2612
2613static int cm_dreq_handler(struct cm_work *work)
2614{
2615 struct cm_id_private *cm_id_priv;
2616 struct cm_dreq_msg *dreq_msg;
2617 struct ib_mad_send_buf *msg = NULL;
2618 int ret;
2619
2620 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2621 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2622 dreq_msg->local_comm_id);
2623 if (!cm_id_priv) {
2624 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2625 counter[CM_DREQ_COUNTER]);
2626 cm_issue_drep(work->port, work->mad_recv_wc);
2627 pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
2628 __func__, be32_to_cpu(dreq_msg->local_comm_id),
2629 be32_to_cpu(dreq_msg->remote_comm_id));
2630 return -EINVAL;
2631 }
2632
2633 work->cm_event.private_data = &dreq_msg->private_data;
2634
2635 spin_lock_irq(&cm_id_priv->lock);
2636 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2637 goto unlock;
2638
2639 switch (cm_id_priv->id.state) {
2640 case IB_CM_REP_SENT:
2641 case IB_CM_DREQ_SENT:
2642 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2643 break;
2644 case IB_CM_ESTABLISHED:
2645 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2646 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2647 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2648 break;
2649 case IB_CM_MRA_REP_RCVD:
2650 break;
2651 case IB_CM_TIMEWAIT:
2652 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2653 counter[CM_DREQ_COUNTER]);
2654 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2655 if (IS_ERR(msg))
2656 goto unlock;
2657
2658 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2659 cm_id_priv->private_data,
2660 cm_id_priv->private_data_len);
2661 spin_unlock_irq(&cm_id_priv->lock);
2662
2663 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2664 ib_post_send_mad(msg, NULL))
2665 cm_free_msg(msg);
2666 goto deref;
2667 case IB_CM_DREQ_RCVD:
2668 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2669 counter[CM_DREQ_COUNTER]);
2670 goto unlock;
2671 default:
2672 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2673 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2674 cm_id_priv->id.state);
2675 goto unlock;
2676 }
2677 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2678 cm_id_priv->tid = dreq_msg->hdr.tid;
2679 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2680 if (!ret)
2681 list_add_tail(&work->list, &cm_id_priv->work_list);
2682 spin_unlock_irq(&cm_id_priv->lock);
2683
2684 if (ret)
2685 cm_process_work(cm_id_priv, work);
2686 else
2687 cm_deref_id(cm_id_priv);
2688 return 0;
2689
2690unlock: spin_unlock_irq(&cm_id_priv->lock);
2691deref: cm_deref_id(cm_id_priv);
2692 return -EINVAL;
2693}
2694
2695static int cm_drep_handler(struct cm_work *work)
2696{
2697 struct cm_id_private *cm_id_priv;
2698 struct cm_drep_msg *drep_msg;
2699 int ret;
2700
2701 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2702 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2703 drep_msg->local_comm_id);
2704 if (!cm_id_priv)
2705 return -EINVAL;
2706
2707 work->cm_event.private_data = &drep_msg->private_data;
2708
2709 spin_lock_irq(&cm_id_priv->lock);
2710 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2711 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2712 spin_unlock_irq(&cm_id_priv->lock);
2713 goto out;
2714 }
2715 cm_enter_timewait(cm_id_priv);
2716
2717 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2718 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2719 if (!ret)
2720 list_add_tail(&work->list, &cm_id_priv->work_list);
2721 spin_unlock_irq(&cm_id_priv->lock);
2722
2723 if (ret)
2724 cm_process_work(cm_id_priv, work);
2725 else
2726 cm_deref_id(cm_id_priv);
2727 return 0;
2728out:
2729 cm_deref_id(cm_id_priv);
2730 return -EINVAL;
2731}
2732
2733int ib_send_cm_rej(struct ib_cm_id *cm_id,
2734 enum ib_cm_rej_reason reason,
2735 void *ari,
2736 u8 ari_length,
2737 const void *private_data,
2738 u8 private_data_len)
2739{
2740 struct cm_id_private *cm_id_priv;
2741 struct ib_mad_send_buf *msg;
2742 unsigned long flags;
2743 int ret;
2744
2745 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2746 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2747 return -EINVAL;
2748
2749 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2750
2751 spin_lock_irqsave(&cm_id_priv->lock, flags);
2752 switch (cm_id->state) {
2753 case IB_CM_REQ_SENT:
2754 case IB_CM_MRA_REQ_RCVD:
2755 case IB_CM_REQ_RCVD:
2756 case IB_CM_MRA_REQ_SENT:
2757 case IB_CM_REP_RCVD:
2758 case IB_CM_MRA_REP_SENT:
2759 ret = cm_alloc_msg(cm_id_priv, &msg);
2760 if (!ret)
2761 cm_format_rej((struct cm_rej_msg *) msg->mad,
2762 cm_id_priv, reason, ari, ari_length,
2763 private_data, private_data_len);
2764
2765 cm_reset_to_idle(cm_id_priv);
2766 break;
2767 case IB_CM_REP_SENT:
2768 case IB_CM_MRA_REP_RCVD:
2769 ret = cm_alloc_msg(cm_id_priv, &msg);
2770 if (!ret)
2771 cm_format_rej((struct cm_rej_msg *) msg->mad,
2772 cm_id_priv, reason, ari, ari_length,
2773 private_data, private_data_len);
2774
2775 cm_enter_timewait(cm_id_priv);
2776 break;
2777 default:
2778 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2779 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2780 ret = -EINVAL;
2781 goto out;
2782 }
2783
2784 if (ret)
2785 goto out;
2786
2787 ret = ib_post_send_mad(msg, NULL);
2788 if (ret)
2789 cm_free_msg(msg);
2790
2791out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2792 return ret;
2793}
2794EXPORT_SYMBOL(ib_send_cm_rej);
2795
2796static void cm_format_rej_event(struct cm_work *work)
2797{
2798 struct cm_rej_msg *rej_msg;
2799 struct ib_cm_rej_event_param *param;
2800
2801 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2802 param = &work->cm_event.param.rej_rcvd;
2803 param->ari = rej_msg->ari;
2804 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2805 param->reason = __be16_to_cpu(rej_msg->reason);
2806 work->cm_event.private_data = &rej_msg->private_data;
2807}
2808
2809static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2810{
2811 struct cm_timewait_info *timewait_info;
2812 struct cm_id_private *cm_id_priv;
2813 __be32 remote_id;
2814
2815 remote_id = rej_msg->local_comm_id;
2816
2817 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2818 spin_lock_irq(&cm.lock);
2819 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2820 remote_id);
2821 if (!timewait_info) {
2822 spin_unlock_irq(&cm.lock);
2823 return NULL;
2824 }
2825 cm_id_priv = xa_load(&cm.local_id_table,
2826 cm_local_id(timewait_info->work.local_id));
2827 if (cm_id_priv) {
2828 if (cm_id_priv->id.remote_id == remote_id)
2829 atomic_inc(&cm_id_priv->refcount);
2830 else
2831 cm_id_priv = NULL;
2832 }
2833 spin_unlock_irq(&cm.lock);
2834 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2835 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2836 else
2837 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2838
2839 return cm_id_priv;
2840}
2841
2842static int cm_rej_handler(struct cm_work *work)
2843{
2844 struct cm_id_private *cm_id_priv;
2845 struct cm_rej_msg *rej_msg;
2846 int ret;
2847
2848 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2849 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2850 if (!cm_id_priv)
2851 return -EINVAL;
2852
2853 cm_format_rej_event(work);
2854
2855 spin_lock_irq(&cm_id_priv->lock);
2856 switch (cm_id_priv->id.state) {
2857 case IB_CM_REQ_SENT:
2858 case IB_CM_MRA_REQ_RCVD:
2859 case IB_CM_REP_SENT:
2860 case IB_CM_MRA_REP_RCVD:
2861 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2862
2863 case IB_CM_REQ_RCVD:
2864 case IB_CM_MRA_REQ_SENT:
2865 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2866 cm_enter_timewait(cm_id_priv);
2867 else
2868 cm_reset_to_idle(cm_id_priv);
2869 break;
2870 case IB_CM_DREQ_SENT:
2871 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2872
2873 case IB_CM_REP_RCVD:
2874 case IB_CM_MRA_REP_SENT:
2875 cm_enter_timewait(cm_id_priv);
2876 break;
2877 case IB_CM_ESTABLISHED:
2878 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2879 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2880 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2881 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2882 cm_id_priv->msg);
2883 cm_enter_timewait(cm_id_priv);
2884 break;
2885 }
2886
2887 default:
2888 spin_unlock_irq(&cm_id_priv->lock);
2889 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2890 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2891 cm_id_priv->id.state);
2892 ret = -EINVAL;
2893 goto out;
2894 }
2895
2896 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2897 if (!ret)
2898 list_add_tail(&work->list, &cm_id_priv->work_list);
2899 spin_unlock_irq(&cm_id_priv->lock);
2900
2901 if (ret)
2902 cm_process_work(cm_id_priv, work);
2903 else
2904 cm_deref_id(cm_id_priv);
2905 return 0;
2906out:
2907 cm_deref_id(cm_id_priv);
2908 return -EINVAL;
2909}
2910
2911int ib_send_cm_mra(struct ib_cm_id *cm_id,
2912 u8 service_timeout,
2913 const void *private_data,
2914 u8 private_data_len)
2915{
2916 struct cm_id_private *cm_id_priv;
2917 struct ib_mad_send_buf *msg;
2918 enum ib_cm_state cm_state;
2919 enum ib_cm_lap_state lap_state;
2920 enum cm_msg_response msg_response;
2921 void *data;
2922 unsigned long flags;
2923 int ret;
2924
2925 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2926 return -EINVAL;
2927
2928 data = cm_copy_private_data(private_data, private_data_len);
2929 if (IS_ERR(data))
2930 return PTR_ERR(data);
2931
2932 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2933
2934 spin_lock_irqsave(&cm_id_priv->lock, flags);
2935 switch(cm_id_priv->id.state) {
2936 case IB_CM_REQ_RCVD:
2937 cm_state = IB_CM_MRA_REQ_SENT;
2938 lap_state = cm_id->lap_state;
2939 msg_response = CM_MSG_RESPONSE_REQ;
2940 break;
2941 case IB_CM_REP_RCVD:
2942 cm_state = IB_CM_MRA_REP_SENT;
2943 lap_state = cm_id->lap_state;
2944 msg_response = CM_MSG_RESPONSE_REP;
2945 break;
2946 case IB_CM_ESTABLISHED:
2947 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2948 cm_state = cm_id->state;
2949 lap_state = IB_CM_MRA_LAP_SENT;
2950 msg_response = CM_MSG_RESPONSE_OTHER;
2951 break;
2952 }
2953
2954 default:
2955 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2956 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2957 cm_id_priv->id.state);
2958 ret = -EINVAL;
2959 goto error1;
2960 }
2961
2962 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2963 ret = cm_alloc_msg(cm_id_priv, &msg);
2964 if (ret)
2965 goto error1;
2966
2967 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2968 msg_response, service_timeout,
2969 private_data, private_data_len);
2970 ret = ib_post_send_mad(msg, NULL);
2971 if (ret)
2972 goto error2;
2973 }
2974
2975 cm_id->state = cm_state;
2976 cm_id->lap_state = lap_state;
2977 cm_id_priv->service_timeout = service_timeout;
2978 cm_set_private_data(cm_id_priv, data, private_data_len);
2979 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2980 return 0;
2981
2982error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2983 kfree(data);
2984 return ret;
2985
2986error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2987 kfree(data);
2988 cm_free_msg(msg);
2989 return ret;
2990}
2991EXPORT_SYMBOL(ib_send_cm_mra);
2992
2993static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2994{
2995 switch (cm_mra_get_msg_mraed(mra_msg)) {
2996 case CM_MSG_RESPONSE_REQ:
2997 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2998 case CM_MSG_RESPONSE_REP:
2999 case CM_MSG_RESPONSE_OTHER:
3000 return cm_acquire_id(mra_msg->remote_comm_id,
3001 mra_msg->local_comm_id);
3002 default:
3003 return NULL;
3004 }
3005}
3006
3007static int cm_mra_handler(struct cm_work *work)
3008{
3009 struct cm_id_private *cm_id_priv;
3010 struct cm_mra_msg *mra_msg;
3011 int timeout, ret;
3012
3013 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3014 cm_id_priv = cm_acquire_mraed_id(mra_msg);
3015 if (!cm_id_priv)
3016 return -EINVAL;
3017
3018 work->cm_event.private_data = &mra_msg->private_data;
3019 work->cm_event.param.mra_rcvd.service_timeout =
3020 cm_mra_get_service_timeout(mra_msg);
3021 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
3022 cm_convert_to_ms(cm_id_priv->av.timeout);
3023
3024 spin_lock_irq(&cm_id_priv->lock);
3025 switch (cm_id_priv->id.state) {
3026 case IB_CM_REQ_SENT:
3027 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
3028 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3029 cm_id_priv->msg, timeout))
3030 goto out;
3031 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3032 break;
3033 case IB_CM_REP_SENT:
3034 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
3035 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3036 cm_id_priv->msg, timeout))
3037 goto out;
3038 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3039 break;
3040 case IB_CM_ESTABLISHED:
3041 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
3042 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3043 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3044 cm_id_priv->msg, timeout)) {
3045 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3046 atomic_long_inc(&work->port->
3047 counter_group[CM_RECV_DUPLICATES].
3048 counter[CM_MRA_COUNTER]);
3049 goto out;
3050 }
3051 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3052 break;
3053 case IB_CM_MRA_REQ_RCVD:
3054 case IB_CM_MRA_REP_RCVD:
3055 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3056 counter[CM_MRA_COUNTER]);
3057
3058 default:
3059 pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
3060 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3061 cm_id_priv->id.state);
3062 goto out;
3063 }
3064
3065 cm_id_priv->msg->context[1] = (void *) (unsigned long)
3066 cm_id_priv->id.state;
3067 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3068 if (!ret)
3069 list_add_tail(&work->list, &cm_id_priv->work_list);
3070 spin_unlock_irq(&cm_id_priv->lock);
3071
3072 if (ret)
3073 cm_process_work(cm_id_priv, work);
3074 else
3075 cm_deref_id(cm_id_priv);
3076 return 0;
3077out:
3078 spin_unlock_irq(&cm_id_priv->lock);
3079 cm_deref_id(cm_id_priv);
3080 return -EINVAL;
3081}
3082
3083static void cm_format_lap(struct cm_lap_msg *lap_msg,
3084 struct cm_id_private *cm_id_priv,
3085 struct sa_path_rec *alternate_path,
3086 const void *private_data,
3087 u8 private_data_len)
3088{
3089 bool alt_ext = false;
3090
3091 if (alternate_path->rec_type == SA_PATH_REC_TYPE_OPA)
3092 alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
3093 alternate_path->opa.slid);
3094 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
3095 cm_form_tid(cm_id_priv));
3096 lap_msg->local_comm_id = cm_id_priv->id.local_id;
3097 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
3098 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
3099
3100 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
3101 lap_msg->alt_local_lid =
3102 htons(ntohl(sa_path_get_slid(alternate_path)));
3103 lap_msg->alt_remote_lid =
3104 htons(ntohl(sa_path_get_dlid(alternate_path)));
3105 lap_msg->alt_local_gid = alternate_path->sgid;
3106 lap_msg->alt_remote_gid = alternate_path->dgid;
3107 if (alt_ext) {
3108 lap_msg->alt_local_gid.global.interface_id
3109 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.slid));
3110 lap_msg->alt_remote_gid.global.interface_id
3111 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.dlid));
3112 }
3113 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
3114 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
3115 lap_msg->alt_hop_limit = alternate_path->hop_limit;
3116 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
3117 cm_lap_set_sl(lap_msg, alternate_path->sl);
3118 cm_lap_set_subnet_local(lap_msg, 1);
3119 cm_lap_set_local_ack_timeout(lap_msg,
3120 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
3121 alternate_path->packet_life_time));
3122
3123 if (private_data && private_data_len)
3124 memcpy(lap_msg->private_data, private_data, private_data_len);
3125}
3126
3127int ib_send_cm_lap(struct ib_cm_id *cm_id,
3128 struct sa_path_rec *alternate_path,
3129 const void *private_data,
3130 u8 private_data_len)
3131{
3132 struct cm_id_private *cm_id_priv;
3133 struct ib_mad_send_buf *msg;
3134 unsigned long flags;
3135 int ret;
3136
3137 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
3138 return -EINVAL;
3139
3140 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3141 spin_lock_irqsave(&cm_id_priv->lock, flags);
3142 if (cm_id->state != IB_CM_ESTABLISHED ||
3143 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
3144 cm_id->lap_state != IB_CM_LAP_IDLE)) {
3145 ret = -EINVAL;
3146 goto out;
3147 }
3148
3149 ret = cm_init_av_by_path(alternate_path, NULL, &cm_id_priv->alt_av,
3150 cm_id_priv);
3151 if (ret)
3152 goto out;
3153 cm_id_priv->alt_av.timeout =
3154 cm_ack_timeout(cm_id_priv->target_ack_delay,
3155 cm_id_priv->alt_av.timeout - 1);
3156
3157 ret = cm_alloc_msg(cm_id_priv, &msg);
3158 if (ret)
3159 goto out;
3160
3161 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
3162 alternate_path, private_data, private_data_len);
3163 msg->timeout_ms = cm_id_priv->timeout_ms;
3164 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
3165
3166 ret = ib_post_send_mad(msg, NULL);
3167 if (ret) {
3168 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3169 cm_free_msg(msg);
3170 return ret;
3171 }
3172
3173 cm_id->lap_state = IB_CM_LAP_SENT;
3174 cm_id_priv->msg = msg;
3175
3176out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3177 return ret;
3178}
3179EXPORT_SYMBOL(ib_send_cm_lap);
3180
3181static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3182 struct sa_path_rec *path)
3183{
3184 u32 lid;
3185
3186 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3187 sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid));
3188 sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid));
3189 } else {
3190 lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
3191 sa_path_set_dlid(path, lid);
3192
3193 lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
3194 sa_path_set_slid(path, lid);
3195 }
3196}
3197
3198static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3199 struct sa_path_rec *path,
3200 struct cm_lap_msg *lap_msg)
3201{
3202 path->dgid = lap_msg->alt_local_gid;
3203 path->sgid = lap_msg->alt_remote_gid;
3204 path->flow_label = cm_lap_get_flow_label(lap_msg);
3205 path->hop_limit = lap_msg->alt_hop_limit;
3206 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
3207 path->reversible = 1;
3208 path->pkey = cm_id_priv->pkey;
3209 path->sl = cm_lap_get_sl(lap_msg);
3210 path->mtu_selector = IB_SA_EQ;
3211 path->mtu = cm_id_priv->path_mtu;
3212 path->rate_selector = IB_SA_EQ;
3213 path->rate = cm_lap_get_packet_rate(lap_msg);
3214 path->packet_life_time_selector = IB_SA_EQ;
3215 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
3216 path->packet_life_time -= (path->packet_life_time > 0);
3217 cm_format_path_lid_from_lap(lap_msg, path);
3218}
3219
3220static int cm_lap_handler(struct cm_work *work)
3221{
3222 struct cm_id_private *cm_id_priv;
3223 struct cm_lap_msg *lap_msg;
3224 struct ib_cm_lap_event_param *param;
3225 struct ib_mad_send_buf *msg = NULL;
3226 int ret;
3227
3228
3229
3230
3231 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3232 work->port->port_num))
3233 return -EINVAL;
3234
3235
3236 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3237 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
3238 lap_msg->local_comm_id);
3239 if (!cm_id_priv)
3240 return -EINVAL;
3241
3242 param = &work->cm_event.param.lap_rcvd;
3243 memset(&work->path[0], 0, sizeof(work->path[1]));
3244 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3245 work->port->port_num,
3246 &work->path[0],
3247 &lap_msg->alt_local_gid);
3248 param->alternate_path = &work->path[0];
3249 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3250 work->cm_event.private_data = &lap_msg->private_data;
3251
3252 spin_lock_irq(&cm_id_priv->lock);
3253 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3254 goto unlock;
3255
3256 switch (cm_id_priv->id.lap_state) {
3257 case IB_CM_LAP_UNINIT:
3258 case IB_CM_LAP_IDLE:
3259 break;
3260 case IB_CM_MRA_LAP_SENT:
3261 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3262 counter[CM_LAP_COUNTER]);
3263 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3264 if (IS_ERR(msg))
3265 goto unlock;
3266
3267 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3268 CM_MSG_RESPONSE_OTHER,
3269 cm_id_priv->service_timeout,
3270 cm_id_priv->private_data,
3271 cm_id_priv->private_data_len);
3272 spin_unlock_irq(&cm_id_priv->lock);
3273
3274 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3275 ib_post_send_mad(msg, NULL))
3276 cm_free_msg(msg);
3277 goto deref;
3278 case IB_CM_LAP_RCVD:
3279 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3280 counter[CM_LAP_COUNTER]);
3281 goto unlock;
3282 default:
3283 goto unlock;
3284 }
3285
3286 ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3287 work->mad_recv_wc->recv_buf.grh,
3288 &cm_id_priv->av);
3289 if (ret)
3290 goto unlock;
3291
3292 ret = cm_init_av_by_path(param->alternate_path, NULL,
3293 &cm_id_priv->alt_av, cm_id_priv);
3294 if (ret)
3295 goto unlock;
3296
3297 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3298 cm_id_priv->tid = lap_msg->hdr.tid;
3299 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3300 if (!ret)
3301 list_add_tail(&work->list, &cm_id_priv->work_list);
3302 spin_unlock_irq(&cm_id_priv->lock);
3303
3304 if (ret)
3305 cm_process_work(cm_id_priv, work);
3306 else
3307 cm_deref_id(cm_id_priv);
3308 return 0;
3309
3310unlock: spin_unlock_irq(&cm_id_priv->lock);
3311deref: cm_deref_id(cm_id_priv);
3312 return -EINVAL;
3313}
3314
3315static void cm_format_apr(struct cm_apr_msg *apr_msg,
3316 struct cm_id_private *cm_id_priv,
3317 enum ib_cm_apr_status status,
3318 void *info,
3319 u8 info_length,
3320 const void *private_data,
3321 u8 private_data_len)
3322{
3323 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
3324 apr_msg->local_comm_id = cm_id_priv->id.local_id;
3325 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
3326 apr_msg->ap_status = (u8) status;
3327
3328 if (info && info_length) {
3329 apr_msg->info_length = info_length;
3330 memcpy(apr_msg->info, info, info_length);
3331 }
3332
3333 if (private_data && private_data_len)
3334 memcpy(apr_msg->private_data, private_data, private_data_len);
3335}
3336
3337int ib_send_cm_apr(struct ib_cm_id *cm_id,
3338 enum ib_cm_apr_status status,
3339 void *info,
3340 u8 info_length,
3341 const void *private_data,
3342 u8 private_data_len)
3343{
3344 struct cm_id_private *cm_id_priv;
3345 struct ib_mad_send_buf *msg;
3346 unsigned long flags;
3347 int ret;
3348
3349 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
3350 (info && info_length > IB_CM_APR_INFO_LENGTH))
3351 return -EINVAL;
3352
3353 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3354 spin_lock_irqsave(&cm_id_priv->lock, flags);
3355 if (cm_id->state != IB_CM_ESTABLISHED ||
3356 (cm_id->lap_state != IB_CM_LAP_RCVD &&
3357 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
3358 ret = -EINVAL;
3359 goto out;
3360 }
3361
3362 ret = cm_alloc_msg(cm_id_priv, &msg);
3363 if (ret)
3364 goto out;
3365
3366 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
3367 info, info_length, private_data, private_data_len);
3368 ret = ib_post_send_mad(msg, NULL);
3369 if (ret) {
3370 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3371 cm_free_msg(msg);
3372 return ret;
3373 }
3374
3375 cm_id->lap_state = IB_CM_LAP_IDLE;
3376out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3377 return ret;
3378}
3379EXPORT_SYMBOL(ib_send_cm_apr);
3380
3381static int cm_apr_handler(struct cm_work *work)
3382{
3383 struct cm_id_private *cm_id_priv;
3384 struct cm_apr_msg *apr_msg;
3385 int ret;
3386
3387
3388
3389
3390 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3391 work->port->port_num))
3392 return -EINVAL;
3393
3394 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3395 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
3396 apr_msg->local_comm_id);
3397 if (!cm_id_priv)
3398 return -EINVAL;
3399
3400 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
3401 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
3402 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
3403 work->cm_event.private_data = &apr_msg->private_data;
3404
3405 spin_lock_irq(&cm_id_priv->lock);
3406 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3407 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3408 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3409 spin_unlock_irq(&cm_id_priv->lock);
3410 goto out;
3411 }
3412 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3413 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3414 cm_id_priv->msg = NULL;
3415
3416 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3417 if (!ret)
3418 list_add_tail(&work->list, &cm_id_priv->work_list);
3419 spin_unlock_irq(&cm_id_priv->lock);
3420
3421 if (ret)
3422 cm_process_work(cm_id_priv, work);
3423 else
3424 cm_deref_id(cm_id_priv);
3425 return 0;
3426out:
3427 cm_deref_id(cm_id_priv);
3428 return -EINVAL;
3429}
3430
3431static int cm_timewait_handler(struct cm_work *work)
3432{
3433 struct cm_timewait_info *timewait_info;
3434 struct cm_id_private *cm_id_priv;
3435 int ret;
3436
3437 timewait_info = (struct cm_timewait_info *)work;
3438 spin_lock_irq(&cm.lock);
3439 list_del(&timewait_info->list);
3440 spin_unlock_irq(&cm.lock);
3441
3442 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3443 timewait_info->work.remote_id);
3444 if (!cm_id_priv)
3445 return -EINVAL;
3446
3447 spin_lock_irq(&cm_id_priv->lock);
3448 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3449 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3450 spin_unlock_irq(&cm_id_priv->lock);
3451 goto out;
3452 }
3453 cm_id_priv->id.state = IB_CM_IDLE;
3454 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3455 if (!ret)
3456 list_add_tail(&work->list, &cm_id_priv->work_list);
3457 spin_unlock_irq(&cm_id_priv->lock);
3458
3459 if (ret)
3460 cm_process_work(cm_id_priv, work);
3461 else
3462 cm_deref_id(cm_id_priv);
3463 return 0;
3464out:
3465 cm_deref_id(cm_id_priv);
3466 return -EINVAL;
3467}
3468
3469static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3470 struct cm_id_private *cm_id_priv,
3471 struct ib_cm_sidr_req_param *param)
3472{
3473 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3474 cm_form_tid(cm_id_priv));
3475 sidr_req_msg->request_id = cm_id_priv->id.local_id;
3476 sidr_req_msg->pkey = param->path->pkey;
3477 sidr_req_msg->service_id = param->service_id;
3478
3479 if (param->private_data && param->private_data_len)
3480 memcpy(sidr_req_msg->private_data, param->private_data,
3481 param->private_data_len);
3482}
3483
3484int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3485 struct ib_cm_sidr_req_param *param)
3486{
3487 struct cm_id_private *cm_id_priv;
3488 struct ib_mad_send_buf *msg;
3489 unsigned long flags;
3490 int ret;
3491
3492 if (!param->path || (param->private_data &&
3493 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3494 return -EINVAL;
3495
3496 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3497 ret = cm_init_av_by_path(param->path, param->sgid_attr,
3498 &cm_id_priv->av,
3499 cm_id_priv);
3500 if (ret)
3501 goto out;
3502
3503 cm_id->service_id = param->service_id;
3504 cm_id->service_mask = ~cpu_to_be64(0);
3505 cm_id_priv->timeout_ms = param->timeout_ms;
3506 cm_id_priv->max_cm_retries = param->max_cm_retries;
3507 ret = cm_alloc_msg(cm_id_priv, &msg);
3508 if (ret)
3509 goto out;
3510
3511 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3512 param);
3513 msg->timeout_ms = cm_id_priv->timeout_ms;
3514 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3515
3516 spin_lock_irqsave(&cm_id_priv->lock, flags);
3517 if (cm_id->state == IB_CM_IDLE)
3518 ret = ib_post_send_mad(msg, NULL);
3519 else
3520 ret = -EINVAL;
3521
3522 if (ret) {
3523 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3524 cm_free_msg(msg);
3525 goto out;
3526 }
3527 cm_id->state = IB_CM_SIDR_REQ_SENT;
3528 cm_id_priv->msg = msg;
3529 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3530out:
3531 return ret;
3532}
3533EXPORT_SYMBOL(ib_send_cm_sidr_req);
3534
3535static void cm_format_sidr_req_event(struct cm_work *work,
3536 const struct cm_id_private *rx_cm_id,
3537 struct ib_cm_id *listen_id)
3538{
3539 struct cm_sidr_req_msg *sidr_req_msg;
3540 struct ib_cm_sidr_req_event_param *param;
3541
3542 sidr_req_msg = (struct cm_sidr_req_msg *)
3543 work->mad_recv_wc->recv_buf.mad;
3544 param = &work->cm_event.param.sidr_req_rcvd;
3545 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3546 param->listen_id = listen_id;
3547 param->service_id = sidr_req_msg->service_id;
3548 param->bth_pkey = cm_get_bth_pkey(work);
3549 param->port = work->port->port_num;
3550 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3551 work->cm_event.private_data = &sidr_req_msg->private_data;
3552}
3553
3554static int cm_sidr_req_handler(struct cm_work *work)
3555{
3556 struct ib_cm_id *cm_id;
3557 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3558 struct cm_sidr_req_msg *sidr_req_msg;
3559 struct ib_wc *wc;
3560 int ret;
3561
3562 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3563 if (IS_ERR(cm_id))
3564 return PTR_ERR(cm_id);
3565 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3566
3567
3568 sidr_req_msg = (struct cm_sidr_req_msg *)
3569 work->mad_recv_wc->recv_buf.mad;
3570 wc = work->mad_recv_wc->wc;
3571 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3572 cm_id_priv->av.dgid.global.interface_id = 0;
3573 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3574 work->mad_recv_wc->recv_buf.grh,
3575 &cm_id_priv->av);
3576 if (ret)
3577 goto out;
3578
3579 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3580 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3581 atomic_inc(&cm_id_priv->work_count);
3582
3583 spin_lock_irq(&cm.lock);
3584 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3585 if (cur_cm_id_priv) {
3586 spin_unlock_irq(&cm.lock);
3587 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3588 counter[CM_SIDR_REQ_COUNTER]);
3589 goto out;
3590 }
3591 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3592 cur_cm_id_priv = cm_find_listen(cm_id->device,
3593 sidr_req_msg->service_id);
3594 if (!cur_cm_id_priv) {
3595 spin_unlock_irq(&cm.lock);
3596 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3597 goto out;
3598 }
3599 atomic_inc(&cur_cm_id_priv->refcount);
3600 atomic_inc(&cm_id_priv->refcount);
3601 spin_unlock_irq(&cm.lock);
3602
3603 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3604 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3605 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3606 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3607
3608 cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id);
3609 cm_process_work(cm_id_priv, work);
3610 cm_deref_id(cur_cm_id_priv);
3611 return 0;
3612out:
3613 ib_destroy_cm_id(&cm_id_priv->id);
3614 return -EINVAL;
3615}
3616
3617static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3618 struct cm_id_private *cm_id_priv,
3619 struct ib_cm_sidr_rep_param *param)
3620{
3621 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3622 cm_id_priv->tid);
3623 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3624 sidr_rep_msg->status = param->status;
3625 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3626 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3627 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3628
3629 if (param->info && param->info_length)
3630 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3631
3632 if (param->private_data && param->private_data_len)
3633 memcpy(sidr_rep_msg->private_data, param->private_data,
3634 param->private_data_len);
3635}
3636
3637int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3638 struct ib_cm_sidr_rep_param *param)
3639{
3640 struct cm_id_private *cm_id_priv;
3641 struct ib_mad_send_buf *msg;
3642 unsigned long flags;
3643 int ret;
3644
3645 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3646 (param->private_data &&
3647 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3648 return -EINVAL;
3649
3650 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3651 spin_lock_irqsave(&cm_id_priv->lock, flags);
3652 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3653 ret = -EINVAL;
3654 goto error;
3655 }
3656
3657 ret = cm_alloc_msg(cm_id_priv, &msg);
3658 if (ret)
3659 goto error;
3660
3661 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3662 param);
3663 ret = ib_post_send_mad(msg, NULL);
3664 if (ret) {
3665 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3666 cm_free_msg(msg);
3667 return ret;
3668 }
3669 cm_id->state = IB_CM_IDLE;
3670 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3671
3672 spin_lock_irqsave(&cm.lock, flags);
3673 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3674 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3675 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3676 }
3677 spin_unlock_irqrestore(&cm.lock, flags);
3678 return 0;
3679
3680error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3681 return ret;
3682}
3683EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3684
3685static void cm_format_sidr_rep_event(struct cm_work *work,
3686 const struct cm_id_private *cm_id_priv)
3687{
3688 struct cm_sidr_rep_msg *sidr_rep_msg;
3689 struct ib_cm_sidr_rep_event_param *param;
3690
3691 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3692 work->mad_recv_wc->recv_buf.mad;
3693 param = &work->cm_event.param.sidr_rep_rcvd;
3694 param->status = sidr_rep_msg->status;
3695 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3696 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3697 param->info = &sidr_rep_msg->info;
3698 param->info_len = sidr_rep_msg->info_length;
3699 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3700 work->cm_event.private_data = &sidr_rep_msg->private_data;
3701}
3702
3703static int cm_sidr_rep_handler(struct cm_work *work)
3704{
3705 struct cm_sidr_rep_msg *sidr_rep_msg;
3706 struct cm_id_private *cm_id_priv;
3707
3708 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3709 work->mad_recv_wc->recv_buf.mad;
3710 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3711 if (!cm_id_priv)
3712 return -EINVAL;
3713
3714 spin_lock_irq(&cm_id_priv->lock);
3715 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3716 spin_unlock_irq(&cm_id_priv->lock);
3717 goto out;
3718 }
3719 cm_id_priv->id.state = IB_CM_IDLE;
3720 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3721 spin_unlock_irq(&cm_id_priv->lock);
3722
3723 cm_format_sidr_rep_event(work, cm_id_priv);
3724 cm_process_work(cm_id_priv, work);
3725 return 0;
3726out:
3727 cm_deref_id(cm_id_priv);
3728 return -EINVAL;
3729}
3730
3731static void cm_process_send_error(struct ib_mad_send_buf *msg,
3732 enum ib_wc_status wc_status)
3733{
3734 struct cm_id_private *cm_id_priv;
3735 struct ib_cm_event cm_event;
3736 enum ib_cm_state state;
3737 int ret;
3738
3739 memset(&cm_event, 0, sizeof cm_event);
3740 cm_id_priv = msg->context[0];
3741
3742
3743 spin_lock_irq(&cm_id_priv->lock);
3744 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3745 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3746 goto discard;
3747
3748 pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3749 state, ib_wc_status_msg(wc_status));
3750 switch (state) {
3751 case IB_CM_REQ_SENT:
3752 case IB_CM_MRA_REQ_RCVD:
3753 cm_reset_to_idle(cm_id_priv);
3754 cm_event.event = IB_CM_REQ_ERROR;
3755 break;
3756 case IB_CM_REP_SENT:
3757 case IB_CM_MRA_REP_RCVD:
3758 cm_reset_to_idle(cm_id_priv);
3759 cm_event.event = IB_CM_REP_ERROR;
3760 break;
3761 case IB_CM_DREQ_SENT:
3762 cm_enter_timewait(cm_id_priv);
3763 cm_event.event = IB_CM_DREQ_ERROR;
3764 break;
3765 case IB_CM_SIDR_REQ_SENT:
3766 cm_id_priv->id.state = IB_CM_IDLE;
3767 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3768 break;
3769 default:
3770 goto discard;
3771 }
3772 spin_unlock_irq(&cm_id_priv->lock);
3773 cm_event.param.send_status = wc_status;
3774
3775
3776 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3777 cm_free_msg(msg);
3778 if (ret)
3779 ib_destroy_cm_id(&cm_id_priv->id);
3780 return;
3781discard:
3782 spin_unlock_irq(&cm_id_priv->lock);
3783 cm_free_msg(msg);
3784}
3785
3786static void cm_send_handler(struct ib_mad_agent *mad_agent,
3787 struct ib_mad_send_wc *mad_send_wc)
3788{
3789 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3790 struct cm_port *port;
3791 u16 attr_index;
3792
3793 port = mad_agent->context;
3794 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3795 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3796
3797
3798
3799
3800
3801
3802 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3803 msg->retries = 1;
3804
3805 atomic_long_add(1 + msg->retries,
3806 &port->counter_group[CM_XMIT].counter[attr_index]);
3807 if (msg->retries)
3808 atomic_long_add(msg->retries,
3809 &port->counter_group[CM_XMIT_RETRIES].
3810 counter[attr_index]);
3811
3812 switch (mad_send_wc->status) {
3813 case IB_WC_SUCCESS:
3814 case IB_WC_WR_FLUSH_ERR:
3815 cm_free_msg(msg);
3816 break;
3817 default:
3818 if (msg->context[0] && msg->context[1])
3819 cm_process_send_error(msg, mad_send_wc->status);
3820 else
3821 cm_free_msg(msg);
3822 break;
3823 }
3824}
3825
3826static void cm_work_handler(struct work_struct *_work)
3827{
3828 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3829 int ret;
3830
3831 switch (work->cm_event.event) {
3832 case IB_CM_REQ_RECEIVED:
3833 ret = cm_req_handler(work);
3834 break;
3835 case IB_CM_MRA_RECEIVED:
3836 ret = cm_mra_handler(work);
3837 break;
3838 case IB_CM_REJ_RECEIVED:
3839 ret = cm_rej_handler(work);
3840 break;
3841 case IB_CM_REP_RECEIVED:
3842 ret = cm_rep_handler(work);
3843 break;
3844 case IB_CM_RTU_RECEIVED:
3845 ret = cm_rtu_handler(work);
3846 break;
3847 case IB_CM_USER_ESTABLISHED:
3848 ret = cm_establish_handler(work);
3849 break;
3850 case IB_CM_DREQ_RECEIVED:
3851 ret = cm_dreq_handler(work);
3852 break;
3853 case IB_CM_DREP_RECEIVED:
3854 ret = cm_drep_handler(work);
3855 break;
3856 case IB_CM_SIDR_REQ_RECEIVED:
3857 ret = cm_sidr_req_handler(work);
3858 break;
3859 case IB_CM_SIDR_REP_RECEIVED:
3860 ret = cm_sidr_rep_handler(work);
3861 break;
3862 case IB_CM_LAP_RECEIVED:
3863 ret = cm_lap_handler(work);
3864 break;
3865 case IB_CM_APR_RECEIVED:
3866 ret = cm_apr_handler(work);
3867 break;
3868 case IB_CM_TIMEWAIT_EXIT:
3869 ret = cm_timewait_handler(work);
3870 break;
3871 default:
3872 pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
3873 ret = -EINVAL;
3874 break;
3875 }
3876 if (ret)
3877 cm_free_work(work);
3878}
3879
3880static int cm_establish(struct ib_cm_id *cm_id)
3881{
3882 struct cm_id_private *cm_id_priv;
3883 struct cm_work *work;
3884 unsigned long flags;
3885 int ret = 0;
3886 struct cm_device *cm_dev;
3887
3888 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3889 if (!cm_dev)
3890 return -ENODEV;
3891
3892 work = kmalloc(sizeof *work, GFP_ATOMIC);
3893 if (!work)
3894 return -ENOMEM;
3895
3896 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3897 spin_lock_irqsave(&cm_id_priv->lock, flags);
3898 switch (cm_id->state)
3899 {
3900 case IB_CM_REP_SENT:
3901 case IB_CM_MRA_REP_RCVD:
3902 cm_id->state = IB_CM_ESTABLISHED;
3903 break;
3904 case IB_CM_ESTABLISHED:
3905 ret = -EISCONN;
3906 break;
3907 default:
3908 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
3909 be32_to_cpu(cm_id->local_id), cm_id->state);
3910 ret = -EINVAL;
3911 break;
3912 }
3913 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3914
3915 if (ret) {
3916 kfree(work);
3917 goto out;
3918 }
3919
3920
3921
3922
3923
3924
3925
3926 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3927 work->local_id = cm_id->local_id;
3928 work->remote_id = cm_id->remote_id;
3929 work->mad_recv_wc = NULL;
3930 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3931
3932
3933 spin_lock_irqsave(&cm.lock, flags);
3934 if (!cm_dev->going_down) {
3935 queue_delayed_work(cm.wq, &work->work, 0);
3936 } else {
3937 kfree(work);
3938 ret = -ENODEV;
3939 }
3940 spin_unlock_irqrestore(&cm.lock, flags);
3941
3942out:
3943 return ret;
3944}
3945
3946static int cm_migrate(struct ib_cm_id *cm_id)
3947{
3948 struct cm_id_private *cm_id_priv;
3949 struct cm_av tmp_av;
3950 unsigned long flags;
3951 int tmp_send_port_not_ready;
3952 int ret = 0;
3953
3954 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3955 spin_lock_irqsave(&cm_id_priv->lock, flags);
3956 if (cm_id->state == IB_CM_ESTABLISHED &&
3957 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3958 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3959 cm_id->lap_state = IB_CM_LAP_IDLE;
3960
3961 tmp_av = cm_id_priv->av;
3962 cm_id_priv->av = cm_id_priv->alt_av;
3963 cm_id_priv->alt_av = tmp_av;
3964
3965 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3966 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3967 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3968 } else
3969 ret = -EINVAL;
3970 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3971
3972 return ret;
3973}
3974
3975int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3976{
3977 int ret;
3978
3979 switch (event) {
3980 case IB_EVENT_COMM_EST:
3981 ret = cm_establish(cm_id);
3982 break;
3983 case IB_EVENT_PATH_MIG:
3984 ret = cm_migrate(cm_id);
3985 break;
3986 default:
3987 ret = -EINVAL;
3988 }
3989 return ret;
3990}
3991EXPORT_SYMBOL(ib_cm_notify);
3992
3993static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3994 struct ib_mad_send_buf *send_buf,
3995 struct ib_mad_recv_wc *mad_recv_wc)
3996{
3997 struct cm_port *port = mad_agent->context;
3998 struct cm_work *work;
3999 enum ib_cm_event_type event;
4000 bool alt_path = false;
4001 u16 attr_id;
4002 int paths = 0;
4003 int going_down = 0;
4004
4005 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4006 case CM_REQ_ATTR_ID:
4007 alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4008 mad_recv_wc->recv_buf.mad);
4009 paths = 1 + (alt_path != 0);
4010 event = IB_CM_REQ_RECEIVED;
4011 break;
4012 case CM_MRA_ATTR_ID:
4013 event = IB_CM_MRA_RECEIVED;
4014 break;
4015 case CM_REJ_ATTR_ID:
4016 event = IB_CM_REJ_RECEIVED;
4017 break;
4018 case CM_REP_ATTR_ID:
4019 event = IB_CM_REP_RECEIVED;
4020 break;
4021 case CM_RTU_ATTR_ID:
4022 event = IB_CM_RTU_RECEIVED;
4023 break;
4024 case CM_DREQ_ATTR_ID:
4025 event = IB_CM_DREQ_RECEIVED;
4026 break;
4027 case CM_DREP_ATTR_ID:
4028 event = IB_CM_DREP_RECEIVED;
4029 break;
4030 case CM_SIDR_REQ_ATTR_ID:
4031 event = IB_CM_SIDR_REQ_RECEIVED;
4032 break;
4033 case CM_SIDR_REP_ATTR_ID:
4034 event = IB_CM_SIDR_REP_RECEIVED;
4035 break;
4036 case CM_LAP_ATTR_ID:
4037 paths = 1;
4038 event = IB_CM_LAP_RECEIVED;
4039 break;
4040 case CM_APR_ATTR_ID:
4041 event = IB_CM_APR_RECEIVED;
4042 break;
4043 default:
4044 ib_free_recv_mad(mad_recv_wc);
4045 return;
4046 }
4047
4048 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4049 atomic_long_inc(&port->counter_group[CM_RECV].
4050 counter[attr_id - CM_ATTR_ID_OFFSET]);
4051
4052 work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4053 if (!work) {
4054 ib_free_recv_mad(mad_recv_wc);
4055 return;
4056 }
4057
4058 INIT_DELAYED_WORK(&work->work, cm_work_handler);
4059 work->cm_event.event = event;
4060 work->mad_recv_wc = mad_recv_wc;
4061 work->port = port;
4062
4063
4064 spin_lock_irq(&cm.lock);
4065 if (!port->cm_dev->going_down)
4066 queue_delayed_work(cm.wq, &work->work, 0);
4067 else
4068 going_down = 1;
4069 spin_unlock_irq(&cm.lock);
4070
4071 if (going_down) {
4072 kfree(work);
4073 ib_free_recv_mad(mad_recv_wc);
4074 }
4075}
4076
4077static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4078 struct ib_qp_attr *qp_attr,
4079 int *qp_attr_mask)
4080{
4081 unsigned long flags;
4082 int ret;
4083
4084 spin_lock_irqsave(&cm_id_priv->lock, flags);
4085 switch (cm_id_priv->id.state) {
4086 case IB_CM_REQ_SENT:
4087 case IB_CM_MRA_REQ_RCVD:
4088 case IB_CM_REQ_RCVD:
4089 case IB_CM_MRA_REQ_SENT:
4090 case IB_CM_REP_RCVD:
4091 case IB_CM_MRA_REP_SENT:
4092 case IB_CM_REP_SENT:
4093 case IB_CM_MRA_REP_RCVD:
4094 case IB_CM_ESTABLISHED:
4095 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4096 IB_QP_PKEY_INDEX | IB_QP_PORT;
4097 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4098 if (cm_id_priv->responder_resources)
4099 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4100 IB_ACCESS_REMOTE_ATOMIC;
4101 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4102 qp_attr->port_num = cm_id_priv->av.port->port_num;
4103 ret = 0;
4104 break;
4105 default:
4106 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4107 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4108 cm_id_priv->id.state);
4109 ret = -EINVAL;
4110 break;
4111 }
4112 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4113 return ret;
4114}
4115
4116static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4117 struct ib_qp_attr *qp_attr,
4118 int *qp_attr_mask)
4119{
4120 unsigned long flags;
4121 int ret;
4122
4123 spin_lock_irqsave(&cm_id_priv->lock, flags);
4124 switch (cm_id_priv->id.state) {
4125 case IB_CM_REQ_RCVD:
4126 case IB_CM_MRA_REQ_SENT:
4127 case IB_CM_REP_RCVD:
4128 case IB_CM_MRA_REP_SENT:
4129 case IB_CM_REP_SENT:
4130 case IB_CM_MRA_REP_RCVD:
4131 case IB_CM_ESTABLISHED:
4132 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4133 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4134 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4135 qp_attr->path_mtu = cm_id_priv->path_mtu;
4136 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4137 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4138 if (cm_id_priv->qp_type == IB_QPT_RC ||
4139 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4140 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4141 IB_QP_MIN_RNR_TIMER;
4142 qp_attr->max_dest_rd_atomic =
4143 cm_id_priv->responder_resources;
4144 qp_attr->min_rnr_timer = 0;
4145 }
4146 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4147 *qp_attr_mask |= IB_QP_ALT_PATH;
4148 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4149 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4150 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4151 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4152 }
4153 ret = 0;
4154 break;
4155 default:
4156 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4157 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4158 cm_id_priv->id.state);
4159 ret = -EINVAL;
4160 break;
4161 }
4162 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4163 return ret;
4164}
4165
4166static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4167 struct ib_qp_attr *qp_attr,
4168 int *qp_attr_mask)
4169{
4170 unsigned long flags;
4171 int ret;
4172
4173 spin_lock_irqsave(&cm_id_priv->lock, flags);
4174 switch (cm_id_priv->id.state) {
4175
4176 case IB_CM_REQ_RCVD:
4177 case IB_CM_MRA_REQ_SENT:
4178
4179 case IB_CM_REP_RCVD:
4180 case IB_CM_MRA_REP_SENT:
4181 case IB_CM_REP_SENT:
4182 case IB_CM_MRA_REP_RCVD:
4183 case IB_CM_ESTABLISHED:
4184 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4185 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4186 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4187 switch (cm_id_priv->qp_type) {
4188 case IB_QPT_RC:
4189 case IB_QPT_XRC_INI:
4190 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4191 IB_QP_MAX_QP_RD_ATOMIC;
4192 qp_attr->retry_cnt = cm_id_priv->retry_count;
4193 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4194 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4195
4196 case IB_QPT_XRC_TGT:
4197 *qp_attr_mask |= IB_QP_TIMEOUT;
4198 qp_attr->timeout = cm_id_priv->av.timeout;
4199 break;
4200 default:
4201 break;
4202 }
4203 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4204 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4205 qp_attr->path_mig_state = IB_MIG_REARM;
4206 }
4207 } else {
4208 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4209 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4210 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4211 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4212 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4213 qp_attr->path_mig_state = IB_MIG_REARM;
4214 }
4215 ret = 0;
4216 break;
4217 default:
4218 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4219 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4220 cm_id_priv->id.state);
4221 ret = -EINVAL;
4222 break;
4223 }
4224 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4225 return ret;
4226}
4227
4228int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4229 struct ib_qp_attr *qp_attr,
4230 int *qp_attr_mask)
4231{
4232 struct cm_id_private *cm_id_priv;
4233 int ret;
4234
4235 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4236 switch (qp_attr->qp_state) {
4237 case IB_QPS_INIT:
4238 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4239 break;
4240 case IB_QPS_RTR:
4241 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4242 break;
4243 case IB_QPS_RTS:
4244 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4245 break;
4246 default:
4247 ret = -EINVAL;
4248 break;
4249 }
4250 return ret;
4251}
4252EXPORT_SYMBOL(ib_cm_init_qp_attr);
4253
4254static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
4255 char *buf)
4256{
4257 struct cm_counter_group *group;
4258 struct cm_counter_attribute *cm_attr;
4259
4260 group = container_of(obj, struct cm_counter_group, obj);
4261 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
4262
4263 return sprintf(buf, "%ld\n",
4264 atomic_long_read(&group->counter[cm_attr->index]));
4265}
4266
4267static const struct sysfs_ops cm_counter_ops = {
4268 .show = cm_show_counter
4269};
4270
4271static struct kobj_type cm_counter_obj_type = {
4272 .sysfs_ops = &cm_counter_ops,
4273 .default_attrs = cm_counter_default_attrs
4274};
4275
4276static char *cm_devnode(struct device *dev, umode_t *mode)
4277{
4278 if (mode)
4279 *mode = 0666;
4280 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
4281}
4282
4283struct class cm_class = {
4284 .owner = THIS_MODULE,
4285 .name = "infiniband_cm",
4286 .devnode = cm_devnode,
4287};
4288EXPORT_SYMBOL(cm_class);
4289
4290static int cm_create_port_fs(struct cm_port *port)
4291{
4292 int i, ret;
4293
4294 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4295 ret = ib_port_register_module_stat(port->cm_dev->ib_device,
4296 port->port_num,
4297 &port->counter_group[i].obj,
4298 &cm_counter_obj_type,
4299 counter_group_names[i]);
4300 if (ret)
4301 goto error;
4302 }
4303
4304 return 0;
4305
4306error:
4307 while (i--)
4308 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4309 return ret;
4310
4311}
4312
4313static void cm_remove_port_fs(struct cm_port *port)
4314{
4315 int i;
4316
4317 for (i = 0; i < CM_COUNTER_GROUPS; i++)
4318 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4319
4320}
4321
4322static void cm_add_one(struct ib_device *ib_device)
4323{
4324 struct cm_device *cm_dev;
4325 struct cm_port *port;
4326 struct ib_mad_reg_req reg_req = {
4327 .mgmt_class = IB_MGMT_CLASS_CM,
4328 .mgmt_class_version = IB_CM_CLASS_VERSION,
4329 };
4330 struct ib_port_modify port_modify = {
4331 .set_port_cap_mask = IB_PORT_CM_SUP
4332 };
4333 unsigned long flags;
4334 int ret;
4335 int count = 0;
4336 u8 i;
4337
4338 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4339 GFP_KERNEL);
4340 if (!cm_dev)
4341 return;
4342
4343 cm_dev->ib_device = ib_device;
4344 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4345 cm_dev->going_down = 0;
4346
4347 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4348 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4349 if (!rdma_cap_ib_cm(ib_device, i))
4350 continue;
4351
4352 port = kzalloc(sizeof *port, GFP_KERNEL);
4353 if (!port)
4354 goto error1;
4355
4356 cm_dev->port[i-1] = port;
4357 port->cm_dev = cm_dev;
4358 port->port_num = i;
4359
4360 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4361 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4362
4363 ret = cm_create_port_fs(port);
4364 if (ret)
4365 goto error1;
4366
4367 port->mad_agent = ib_register_mad_agent(ib_device, i,
4368 IB_QPT_GSI,
4369 ®_req,
4370 0,
4371 cm_send_handler,
4372 cm_recv_handler,
4373 port,
4374 0);
4375 if (IS_ERR(port->mad_agent))
4376 goto error2;
4377
4378 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4379 if (ret)
4380 goto error3;
4381
4382 count++;
4383 }
4384
4385 if (!count)
4386 goto free;
4387
4388 ib_set_client_data(ib_device, &cm_client, cm_dev);
4389
4390 write_lock_irqsave(&cm.device_lock, flags);
4391 list_add_tail(&cm_dev->list, &cm.device_list);
4392 write_unlock_irqrestore(&cm.device_lock, flags);
4393 return;
4394
4395error3:
4396 ib_unregister_mad_agent(port->mad_agent);
4397error2:
4398 cm_remove_port_fs(port);
4399error1:
4400 port_modify.set_port_cap_mask = 0;
4401 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4402 while (--i) {
4403 if (!rdma_cap_ib_cm(ib_device, i))
4404 continue;
4405
4406 port = cm_dev->port[i-1];
4407 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4408 ib_unregister_mad_agent(port->mad_agent);
4409 cm_remove_port_fs(port);
4410 }
4411free:
4412 kfree(cm_dev);
4413}
4414
4415static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4416{
4417 struct cm_device *cm_dev = client_data;
4418 struct cm_port *port;
4419 struct cm_id_private *cm_id_priv;
4420 struct ib_mad_agent *cur_mad_agent;
4421 struct ib_port_modify port_modify = {
4422 .clr_port_cap_mask = IB_PORT_CM_SUP
4423 };
4424 unsigned long flags;
4425 int i;
4426
4427 if (!cm_dev)
4428 return;
4429
4430 write_lock_irqsave(&cm.device_lock, flags);
4431 list_del(&cm_dev->list);
4432 write_unlock_irqrestore(&cm.device_lock, flags);
4433
4434 spin_lock_irq(&cm.lock);
4435 cm_dev->going_down = 1;
4436 spin_unlock_irq(&cm.lock);
4437
4438 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4439 if (!rdma_cap_ib_cm(ib_device, i))
4440 continue;
4441
4442 port = cm_dev->port[i-1];
4443 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4444
4445 spin_lock_irq(&cm.lock);
4446 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4447 cm_id_priv->altr_send_port_not_ready = 1;
4448 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4449 cm_id_priv->prim_send_port_not_ready = 1;
4450 spin_unlock_irq(&cm.lock);
4451
4452
4453
4454
4455
4456 flush_workqueue(cm.wq);
4457 spin_lock_irq(&cm.state_lock);
4458 cur_mad_agent = port->mad_agent;
4459 port->mad_agent = NULL;
4460 spin_unlock_irq(&cm.state_lock);
4461 ib_unregister_mad_agent(cur_mad_agent);
4462 cm_remove_port_fs(port);
4463 }
4464
4465 kfree(cm_dev);
4466}
4467
4468static int __init ib_cm_init(void)
4469{
4470 int ret;
4471
4472 INIT_LIST_HEAD(&cm.device_list);
4473 rwlock_init(&cm.device_lock);
4474 spin_lock_init(&cm.lock);
4475 spin_lock_init(&cm.state_lock);
4476 cm.listen_service_table = RB_ROOT;
4477 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4478 cm.remote_id_table = RB_ROOT;
4479 cm.remote_qp_table = RB_ROOT;
4480 cm.remote_sidr_table = RB_ROOT;
4481 xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
4482 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4483 INIT_LIST_HEAD(&cm.timewait_list);
4484
4485 ret = class_register(&cm_class);
4486 if (ret) {
4487 ret = -ENOMEM;
4488 goto error1;
4489 }
4490
4491 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4492 if (!cm.wq) {
4493 ret = -ENOMEM;
4494 goto error2;
4495 }
4496
4497 ret = ib_register_client(&cm_client);
4498 if (ret)
4499 goto error3;
4500
4501 return 0;
4502error3:
4503 destroy_workqueue(cm.wq);
4504error2:
4505 class_unregister(&cm_class);
4506error1:
4507 return ret;
4508}
4509
4510static void __exit ib_cm_cleanup(void)
4511{
4512 struct cm_timewait_info *timewait_info, *tmp;
4513
4514 spin_lock_irq(&cm.lock);
4515 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4516 cancel_delayed_work(&timewait_info->work.work);
4517 spin_unlock_irq(&cm.lock);
4518
4519 ib_unregister_client(&cm_client);
4520 destroy_workqueue(cm.wq);
4521
4522 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4523 list_del(&timewait_info->list);
4524 kfree(timewait_info);
4525 }
4526
4527 class_unregister(&cm_class);
4528 WARN_ON(!xa_empty(&cm.local_id_table));
4529}
4530
4531module_init(ib_cm_init);
4532module_exit(ib_cm_cleanup);
4533