1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/slab.h>
37#include <linux/delay.h>
38
39#include "iscsi_iser.h"
40
41#define ISCSI_ISER_MAX_CONN 8
42#define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
43#define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
44#define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
45 ISCSI_ISER_MAX_CONN)
46
47static void iser_qp_event_callback(struct ib_event *cause, void *context)
48{
49 iser_err("qp event %s (%d)\n",
50 ib_event_msg(cause->event), cause->event);
51}
52
53static void iser_event_handler(struct ib_event_handler *handler,
54 struct ib_event *event)
55{
56 iser_err("async event %s (%d) on device %s port %d\n",
57 ib_event_msg(event->event), event->event,
58 event->device->name, event->element.port_num);
59}
60
61
62
63
64
65
66
67
68static int iser_create_device_ib_res(struct iser_device *device)
69{
70 struct ib_device *ib_dev = device->ib_device;
71 int ret, i, max_cqe;
72
73 ret = iser_assign_reg_ops(device);
74 if (ret)
75 return ret;
76
77 device->comps_used = min_t(int, num_online_cpus(),
78 ib_dev->num_comp_vectors);
79
80 device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
81 GFP_KERNEL);
82 if (!device->comps)
83 goto comps_err;
84
85 max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe);
86
87 iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
88 device->comps_used, ib_dev->name,
89 ib_dev->num_comp_vectors, max_cqe);
90
91 device->pd = ib_alloc_pd(ib_dev,
92 iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
93 if (IS_ERR(device->pd))
94 goto pd_err;
95
96 for (i = 0; i < device->comps_used; i++) {
97 struct iser_comp *comp = &device->comps[i];
98
99 comp->cq = ib_alloc_cq(ib_dev, comp, max_cqe, i,
100 IB_POLL_SOFTIRQ);
101 if (IS_ERR(comp->cq)) {
102 comp->cq = NULL;
103 goto cq_err;
104 }
105 }
106
107 INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
108 iser_event_handler);
109 if (ib_register_event_handler(&device->event_handler))
110 goto cq_err;
111
112 return 0;
113
114cq_err:
115 for (i = 0; i < device->comps_used; i++) {
116 struct iser_comp *comp = &device->comps[i];
117
118 if (comp->cq)
119 ib_free_cq(comp->cq);
120 }
121 ib_dealloc_pd(device->pd);
122pd_err:
123 kfree(device->comps);
124comps_err:
125 iser_err("failed to allocate an IB resource\n");
126 return -1;
127}
128
129
130
131
132
133static void iser_free_device_ib_res(struct iser_device *device)
134{
135 int i;
136
137 for (i = 0; i < device->comps_used; i++) {
138 struct iser_comp *comp = &device->comps[i];
139
140 ib_free_cq(comp->cq);
141 comp->cq = NULL;
142 }
143
144 (void)ib_unregister_event_handler(&device->event_handler);
145 ib_dealloc_pd(device->pd);
146
147 kfree(device->comps);
148 device->comps = NULL;
149 device->pd = NULL;
150}
151
152
153
154
155
156
157int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
158 unsigned cmds_max,
159 unsigned int size)
160{
161 struct iser_device *device = ib_conn->device;
162 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
163 struct iser_page_vec *page_vec;
164 struct iser_fr_desc *desc;
165 struct ib_fmr_pool *fmr_pool;
166 struct ib_fmr_pool_param params;
167 int ret;
168
169 INIT_LIST_HEAD(&fr_pool->list);
170 spin_lock_init(&fr_pool->lock);
171
172 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
173 if (!desc)
174 return -ENOMEM;
175
176 page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size),
177 GFP_KERNEL);
178 if (!page_vec) {
179 ret = -ENOMEM;
180 goto err_frpl;
181 }
182
183 page_vec->pages = (u64 *)(page_vec + 1);
184
185 params.page_shift = SHIFT_4K;
186 params.max_pages_per_fmr = size;
187
188
189 params.pool_size = cmds_max * 2;
190 params.dirty_watermark = cmds_max;
191 params.cache = 0;
192 params.flush_function = NULL;
193 params.access = (IB_ACCESS_LOCAL_WRITE |
194 IB_ACCESS_REMOTE_WRITE |
195 IB_ACCESS_REMOTE_READ);
196
197 fmr_pool = ib_create_fmr_pool(device->pd, ¶ms);
198 if (IS_ERR(fmr_pool)) {
199 ret = PTR_ERR(fmr_pool);
200 iser_err("FMR allocation failed, err %d\n", ret);
201 goto err_fmr;
202 }
203
204 desc->rsc.page_vec = page_vec;
205 desc->rsc.fmr_pool = fmr_pool;
206 list_add(&desc->list, &fr_pool->list);
207
208 return 0;
209
210err_fmr:
211 kfree(page_vec);
212err_frpl:
213 kfree(desc);
214
215 return ret;
216}
217
218
219
220
221void iser_free_fmr_pool(struct ib_conn *ib_conn)
222{
223 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
224 struct iser_fr_desc *desc;
225
226 desc = list_first_entry(&fr_pool->list,
227 struct iser_fr_desc, list);
228 list_del(&desc->list);
229
230 iser_info("freeing conn %p fmr pool %p\n",
231 ib_conn, desc->rsc.fmr_pool);
232
233 ib_destroy_fmr_pool(desc->rsc.fmr_pool);
234 kfree(desc->rsc.page_vec);
235 kfree(desc);
236}
237
238static int
239iser_alloc_reg_res(struct iser_device *device,
240 struct ib_pd *pd,
241 struct iser_reg_resources *res,
242 unsigned int size)
243{
244 struct ib_device *ib_dev = device->ib_device;
245 enum ib_mr_type mr_type;
246 int ret;
247
248 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
249 mr_type = IB_MR_TYPE_SG_GAPS;
250 else
251 mr_type = IB_MR_TYPE_MEM_REG;
252
253 res->mr = ib_alloc_mr(pd, mr_type, size);
254 if (IS_ERR(res->mr)) {
255 ret = PTR_ERR(res->mr);
256 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
257 return ret;
258 }
259 res->mr_valid = 0;
260
261 return 0;
262}
263
264static void
265iser_free_reg_res(struct iser_reg_resources *rsc)
266{
267 ib_dereg_mr(rsc->mr);
268}
269
270static int
271iser_alloc_pi_ctx(struct iser_device *device,
272 struct ib_pd *pd,
273 struct iser_fr_desc *desc,
274 unsigned int size)
275{
276 struct iser_pi_context *pi_ctx = NULL;
277 int ret;
278
279 desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
280 if (!desc->pi_ctx)
281 return -ENOMEM;
282
283 pi_ctx = desc->pi_ctx;
284
285 ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size);
286 if (ret) {
287 iser_err("failed to allocate reg_resources\n");
288 goto alloc_reg_res_err;
289 }
290
291 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
292 if (IS_ERR(pi_ctx->sig_mr)) {
293 ret = PTR_ERR(pi_ctx->sig_mr);
294 goto sig_mr_failure;
295 }
296 pi_ctx->sig_mr_valid = 0;
297 desc->pi_ctx->sig_protected = 0;
298
299 return 0;
300
301sig_mr_failure:
302 iser_free_reg_res(&pi_ctx->rsc);
303alloc_reg_res_err:
304 kfree(desc->pi_ctx);
305
306 return ret;
307}
308
309static void
310iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
311{
312 iser_free_reg_res(&pi_ctx->rsc);
313 ib_dereg_mr(pi_ctx->sig_mr);
314 kfree(pi_ctx);
315}
316
317static struct iser_fr_desc *
318iser_create_fastreg_desc(struct iser_device *device,
319 struct ib_pd *pd,
320 bool pi_enable,
321 unsigned int size)
322{
323 struct iser_fr_desc *desc;
324 int ret;
325
326 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
327 if (!desc)
328 return ERR_PTR(-ENOMEM);
329
330 ret = iser_alloc_reg_res(device, pd, &desc->rsc, size);
331 if (ret)
332 goto reg_res_alloc_failure;
333
334 if (pi_enable) {
335 ret = iser_alloc_pi_ctx(device, pd, desc, size);
336 if (ret)
337 goto pi_ctx_alloc_failure;
338 }
339
340 return desc;
341
342pi_ctx_alloc_failure:
343 iser_free_reg_res(&desc->rsc);
344reg_res_alloc_failure:
345 kfree(desc);
346
347 return ERR_PTR(ret);
348}
349
350
351
352
353
354
355int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
356 unsigned cmds_max,
357 unsigned int size)
358{
359 struct iser_device *device = ib_conn->device;
360 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
361 struct iser_fr_desc *desc;
362 int i, ret;
363
364 INIT_LIST_HEAD(&fr_pool->list);
365 spin_lock_init(&fr_pool->lock);
366 fr_pool->size = 0;
367 for (i = 0; i < cmds_max; i++) {
368 desc = iser_create_fastreg_desc(device, device->pd,
369 ib_conn->pi_support, size);
370 if (IS_ERR(desc)) {
371 ret = PTR_ERR(desc);
372 goto err;
373 }
374
375 list_add_tail(&desc->list, &fr_pool->list);
376 fr_pool->size++;
377 }
378
379 return 0;
380
381err:
382 iser_free_fastreg_pool(ib_conn);
383 return ret;
384}
385
386
387
388
389void iser_free_fastreg_pool(struct ib_conn *ib_conn)
390{
391 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
392 struct iser_fr_desc *desc, *tmp;
393 int i = 0;
394
395 if (list_empty(&fr_pool->list))
396 return;
397
398 iser_info("freeing conn %p fr pool\n", ib_conn);
399
400 list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
401 list_del(&desc->list);
402 iser_free_reg_res(&desc->rsc);
403 if (desc->pi_ctx)
404 iser_free_pi_ctx(desc->pi_ctx);
405 kfree(desc);
406 ++i;
407 }
408
409 if (i < fr_pool->size)
410 iser_warn("pool still has %d regions registered\n",
411 fr_pool->size - i);
412}
413
414
415
416
417
418
419static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
420{
421 struct iser_conn *iser_conn = to_iser_conn(ib_conn);
422 struct iser_device *device;
423 struct ib_device *ib_dev;
424 struct ib_qp_init_attr init_attr;
425 int ret = -ENOMEM;
426 int index, min_index = 0;
427
428 BUG_ON(ib_conn->device == NULL);
429
430 device = ib_conn->device;
431 ib_dev = device->ib_device;
432
433 memset(&init_attr, 0, sizeof init_attr);
434
435 mutex_lock(&ig.connlist_mutex);
436
437 for (index = 0; index < device->comps_used; index++) {
438 if (device->comps[index].active_qps <
439 device->comps[min_index].active_qps)
440 min_index = index;
441 }
442 ib_conn->comp = &device->comps[min_index];
443 ib_conn->comp->active_qps++;
444 mutex_unlock(&ig.connlist_mutex);
445 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
446
447 init_attr.event_handler = iser_qp_event_callback;
448 init_attr.qp_context = (void *)ib_conn;
449 init_attr.send_cq = ib_conn->comp->cq;
450 init_attr.recv_cq = ib_conn->comp->cq;
451 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
452 init_attr.cap.max_send_sge = 2;
453 init_attr.cap.max_recv_sge = 1;
454 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
455 init_attr.qp_type = IB_QPT_RC;
456 if (ib_conn->pi_support) {
457 init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
458 init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
459 iser_conn->max_cmds =
460 ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
461 } else {
462 if (ib_dev->attrs.max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
463 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
464 iser_conn->max_cmds =
465 ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
466 } else {
467 init_attr.cap.max_send_wr = ib_dev->attrs.max_qp_wr;
468 iser_conn->max_cmds =
469 ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr);
470 iser_dbg("device %s supports max_send_wr %d\n",
471 device->ib_device->name, ib_dev->attrs.max_qp_wr);
472 }
473 }
474
475 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
476 if (ret)
477 goto out_err;
478
479 ib_conn->qp = ib_conn->cma_id->qp;
480 iser_info("setting conn %p cma_id %p qp %p\n",
481 ib_conn, ib_conn->cma_id,
482 ib_conn->cma_id->qp);
483 return ret;
484
485out_err:
486 mutex_lock(&ig.connlist_mutex);
487 ib_conn->comp->active_qps--;
488 mutex_unlock(&ig.connlist_mutex);
489 iser_err("unable to alloc mem or create resource, err %d\n", ret);
490
491 return ret;
492}
493
494
495
496
497
498static
499struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
500{
501 struct iser_device *device;
502
503 mutex_lock(&ig.device_list_mutex);
504
505 list_for_each_entry(device, &ig.device_list, ig_list)
506
507 if (device->ib_device->node_guid == cma_id->device->node_guid)
508 goto inc_refcnt;
509
510 device = kzalloc(sizeof *device, GFP_KERNEL);
511 if (device == NULL)
512 goto out;
513
514
515 device->ib_device = cma_id->device;
516
517 if (iser_create_device_ib_res(device)) {
518 kfree(device);
519 device = NULL;
520 goto out;
521 }
522 list_add(&device->ig_list, &ig.device_list);
523
524inc_refcnt:
525 device->refcount++;
526out:
527 mutex_unlock(&ig.device_list_mutex);
528 return device;
529}
530
531
532static void iser_device_try_release(struct iser_device *device)
533{
534 mutex_lock(&ig.device_list_mutex);
535 device->refcount--;
536 iser_info("device %p refcount %d\n", device, device->refcount);
537 if (!device->refcount) {
538 iser_free_device_ib_res(device);
539 list_del(&device->ig_list);
540 kfree(device);
541 }
542 mutex_unlock(&ig.device_list_mutex);
543}
544
545
546
547
548static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
549 enum iser_conn_state comp,
550 enum iser_conn_state exch)
551{
552 int ret;
553
554 ret = (iser_conn->state == comp);
555 if (ret)
556 iser_conn->state = exch;
557
558 return ret;
559}
560
561void iser_release_work(struct work_struct *work)
562{
563 struct iser_conn *iser_conn;
564
565 iser_conn = container_of(work, struct iser_conn, release_work);
566
567
568 wait_for_completion(&iser_conn->stop_completion);
569
570 wait_for_completion(&iser_conn->ib_completion);
571
572 mutex_lock(&iser_conn->state_mutex);
573 iser_conn->state = ISER_CONN_DOWN;
574 mutex_unlock(&iser_conn->state_mutex);
575
576 iser_conn_release(iser_conn);
577}
578
579
580
581
582
583
584
585
586
587
588
589
590static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
591 bool destroy)
592{
593 struct ib_conn *ib_conn = &iser_conn->ib_conn;
594 struct iser_device *device = ib_conn->device;
595
596 iser_info("freeing conn %p cma_id %p qp %p\n",
597 iser_conn, ib_conn->cma_id, ib_conn->qp);
598
599 if (ib_conn->qp != NULL) {
600 ib_conn->comp->active_qps--;
601 rdma_destroy_qp(ib_conn->cma_id);
602 ib_conn->qp = NULL;
603 }
604
605 if (destroy) {
606 if (iser_conn->rx_descs)
607 iser_free_rx_descriptors(iser_conn);
608
609 if (device != NULL) {
610 iser_device_try_release(device);
611 ib_conn->device = NULL;
612 }
613 }
614}
615
616
617
618
619void iser_conn_release(struct iser_conn *iser_conn)
620{
621 struct ib_conn *ib_conn = &iser_conn->ib_conn;
622
623 mutex_lock(&ig.connlist_mutex);
624 list_del(&iser_conn->conn_list);
625 mutex_unlock(&ig.connlist_mutex);
626
627 mutex_lock(&iser_conn->state_mutex);
628
629 if (iser_conn->state != ISER_CONN_DOWN) {
630 iser_warn("iser conn %p state %d, expected state down.\n",
631 iser_conn, iser_conn->state);
632 iscsi_destroy_endpoint(iser_conn->ep);
633 iser_conn->state = ISER_CONN_DOWN;
634 }
635
636
637
638
639 iser_free_ib_conn_res(iser_conn, true);
640 mutex_unlock(&iser_conn->state_mutex);
641
642 if (ib_conn->cma_id != NULL) {
643 rdma_destroy_id(ib_conn->cma_id);
644 ib_conn->cma_id = NULL;
645 }
646
647 kfree(iser_conn);
648}
649
650
651
652
653
654int iser_conn_terminate(struct iser_conn *iser_conn)
655{
656 struct ib_conn *ib_conn = &iser_conn->ib_conn;
657 int err = 0;
658
659
660 if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
661 ISER_CONN_TERMINATING))
662 return 0;
663
664 iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
665
666
667 if (iser_conn->iscsi_conn)
668 iscsi_suspend_queue(iser_conn->iscsi_conn);
669
670
671
672
673
674
675 if (ib_conn->cma_id) {
676 err = rdma_disconnect(ib_conn->cma_id);
677 if (err)
678 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
679 iser_conn, err);
680
681
682 ib_drain_sq(ib_conn->qp);
683 }
684
685 return 1;
686}
687
688
689
690
691static void iser_connect_error(struct rdma_cm_id *cma_id)
692{
693 struct iser_conn *iser_conn;
694
695 iser_conn = (struct iser_conn *)cma_id->context;
696 iser_conn->state = ISER_CONN_TERMINATING;
697}
698
699static void
700iser_calc_scsi_params(struct iser_conn *iser_conn,
701 unsigned int max_sectors)
702{
703 struct iser_device *device = iser_conn->ib_conn.device;
704 unsigned short sg_tablesize, sup_sg_tablesize;
705
706 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
707 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
708 device->ib_device->attrs.max_fast_reg_page_list_len);
709
710 if (sg_tablesize > sup_sg_tablesize) {
711 sg_tablesize = sup_sg_tablesize;
712 iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
713 } else {
714 iser_conn->scsi_max_sectors = max_sectors;
715 }
716
717 iser_conn->scsi_sg_tablesize = sg_tablesize;
718
719 iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
720 iser_conn, iser_conn->scsi_sg_tablesize,
721 iser_conn->scsi_max_sectors);
722}
723
724
725
726
727static void iser_addr_handler(struct rdma_cm_id *cma_id)
728{
729 struct iser_device *device;
730 struct iser_conn *iser_conn;
731 struct ib_conn *ib_conn;
732 int ret;
733
734 iser_conn = (struct iser_conn *)cma_id->context;
735 if (iser_conn->state != ISER_CONN_PENDING)
736
737 return;
738
739 ib_conn = &iser_conn->ib_conn;
740 device = iser_device_find_by_ib_device(cma_id);
741 if (!device) {
742 iser_err("device lookup/creation failed\n");
743 iser_connect_error(cma_id);
744 return;
745 }
746
747 ib_conn->device = device;
748
749
750 if (iser_pi_enable) {
751 if (!(device->ib_device->attrs.device_cap_flags &
752 IB_DEVICE_SIGNATURE_HANDOVER)) {
753 iser_warn("T10-PI requested but not supported on %s, "
754 "continue without T10-PI\n",
755 ib_conn->device->ib_device->name);
756 ib_conn->pi_support = false;
757 } else {
758 ib_conn->pi_support = true;
759 }
760 }
761
762 iser_calc_scsi_params(iser_conn, iser_max_sectors);
763
764 ret = rdma_resolve_route(cma_id, 1000);
765 if (ret) {
766 iser_err("resolve route failed: %d\n", ret);
767 iser_connect_error(cma_id);
768 return;
769 }
770}
771
772
773
774
775static void iser_route_handler(struct rdma_cm_id *cma_id)
776{
777 struct rdma_conn_param conn_param;
778 int ret;
779 struct iser_cm_hdr req_hdr;
780 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
781 struct ib_conn *ib_conn = &iser_conn->ib_conn;
782 struct iser_device *device = ib_conn->device;
783
784 if (iser_conn->state != ISER_CONN_PENDING)
785
786 return;
787
788 ret = iser_create_ib_conn_res(ib_conn);
789 if (ret)
790 goto failure;
791
792 memset(&conn_param, 0, sizeof conn_param);
793 conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom;
794 conn_param.initiator_depth = 1;
795 conn_param.retry_count = 7;
796 conn_param.rnr_retry_count = 6;
797
798 memset(&req_hdr, 0, sizeof(req_hdr));
799 req_hdr.flags = ISER_ZBVA_NOT_SUP;
800 if (!device->remote_inv_sup)
801 req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
802 conn_param.private_data = (void *)&req_hdr;
803 conn_param.private_data_len = sizeof(struct iser_cm_hdr);
804
805 ret = rdma_connect(cma_id, &conn_param);
806 if (ret) {
807 iser_err("failure connecting: %d\n", ret);
808 goto failure;
809 }
810
811 return;
812failure:
813 iser_connect_error(cma_id);
814}
815
816static void iser_connected_handler(struct rdma_cm_id *cma_id,
817 const void *private_data)
818{
819 struct iser_conn *iser_conn;
820 struct ib_qp_attr attr;
821 struct ib_qp_init_attr init_attr;
822
823 iser_conn = (struct iser_conn *)cma_id->context;
824 if (iser_conn->state != ISER_CONN_PENDING)
825
826 return;
827
828 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
829 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
830
831 if (private_data) {
832 u8 flags = *(u8 *)private_data;
833
834 iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP);
835 }
836
837 iser_info("conn %p: negotiated %s invalidation\n",
838 iser_conn, iser_conn->snd_w_inv ? "remote" : "local");
839
840 iser_conn->state = ISER_CONN_UP;
841 complete(&iser_conn->up_completion);
842}
843
844static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
845{
846 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
847
848 if (iser_conn_terminate(iser_conn)) {
849 if (iser_conn->iscsi_conn)
850 iscsi_conn_failure(iser_conn->iscsi_conn,
851 ISCSI_ERR_CONN_FAILED);
852 else
853 iser_err("iscsi_iser connection isn't bound\n");
854 }
855}
856
857static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
858 bool destroy)
859{
860 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
861
862
863
864
865
866
867 iser_disconnected_handler(cma_id);
868 iser_free_ib_conn_res(iser_conn, destroy);
869 complete(&iser_conn->ib_completion);
870};
871
872static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
873{
874 struct iser_conn *iser_conn;
875 int ret = 0;
876
877 iser_conn = (struct iser_conn *)cma_id->context;
878 iser_info("%s (%d): status %d conn %p id %p\n",
879 rdma_event_msg(event->event), event->event,
880 event->status, cma_id->context, cma_id);
881
882 mutex_lock(&iser_conn->state_mutex);
883 switch (event->event) {
884 case RDMA_CM_EVENT_ADDR_RESOLVED:
885 iser_addr_handler(cma_id);
886 break;
887 case RDMA_CM_EVENT_ROUTE_RESOLVED:
888 iser_route_handler(cma_id);
889 break;
890 case RDMA_CM_EVENT_ESTABLISHED:
891 iser_connected_handler(cma_id, event->param.conn.private_data);
892 break;
893 case RDMA_CM_EVENT_ADDR_ERROR:
894 case RDMA_CM_EVENT_ROUTE_ERROR:
895 case RDMA_CM_EVENT_CONNECT_ERROR:
896 case RDMA_CM_EVENT_UNREACHABLE:
897 case RDMA_CM_EVENT_REJECTED:
898 iser_connect_error(cma_id);
899 break;
900 case RDMA_CM_EVENT_DISCONNECTED:
901 case RDMA_CM_EVENT_ADDR_CHANGE:
902 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
903 iser_cleanup_handler(cma_id, false);
904 break;
905 case RDMA_CM_EVENT_DEVICE_REMOVAL:
906
907
908
909
910
911
912 iser_cleanup_handler(cma_id, true);
913 if (iser_conn->state != ISER_CONN_DOWN) {
914 iser_conn->ib_conn.cma_id = NULL;
915 ret = 1;
916 }
917 break;
918 default:
919 iser_err("Unexpected RDMA CM event: %s (%d)\n",
920 rdma_event_msg(event->event), event->event);
921 break;
922 }
923 mutex_unlock(&iser_conn->state_mutex);
924
925 return ret;
926}
927
928void iser_conn_init(struct iser_conn *iser_conn)
929{
930 struct ib_conn *ib_conn = &iser_conn->ib_conn;
931
932 iser_conn->state = ISER_CONN_INIT;
933 init_completion(&iser_conn->stop_completion);
934 init_completion(&iser_conn->ib_completion);
935 init_completion(&iser_conn->up_completion);
936 INIT_LIST_HEAD(&iser_conn->conn_list);
937 mutex_init(&iser_conn->state_mutex);
938
939 ib_conn->post_recv_buf_count = 0;
940 ib_conn->reg_cqe.done = iser_reg_comp;
941}
942
943
944
945
946
947int iser_connect(struct iser_conn *iser_conn,
948 struct sockaddr *src_addr,
949 struct sockaddr *dst_addr,
950 int non_blocking)
951{
952 struct ib_conn *ib_conn = &iser_conn->ib_conn;
953 int err = 0;
954
955 mutex_lock(&iser_conn->state_mutex);
956
957 sprintf(iser_conn->name, "%pISp", dst_addr);
958
959 iser_info("connecting to: %s\n", iser_conn->name);
960
961
962 ib_conn->device = NULL;
963
964 iser_conn->state = ISER_CONN_PENDING;
965
966 ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
967 (void *)iser_conn,
968 RDMA_PS_TCP, IB_QPT_RC);
969 if (IS_ERR(ib_conn->cma_id)) {
970 err = PTR_ERR(ib_conn->cma_id);
971 iser_err("rdma_create_id failed: %d\n", err);
972 goto id_failure;
973 }
974
975 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
976 if (err) {
977 iser_err("rdma_resolve_addr failed: %d\n", err);
978 goto addr_failure;
979 }
980
981 if (!non_blocking) {
982 wait_for_completion_interruptible(&iser_conn->up_completion);
983
984 if (iser_conn->state != ISER_CONN_UP) {
985 err = -EIO;
986 goto connect_failure;
987 }
988 }
989 mutex_unlock(&iser_conn->state_mutex);
990
991 mutex_lock(&ig.connlist_mutex);
992 list_add(&iser_conn->conn_list, &ig.connlist);
993 mutex_unlock(&ig.connlist_mutex);
994 return 0;
995
996id_failure:
997 ib_conn->cma_id = NULL;
998addr_failure:
999 iser_conn->state = ISER_CONN_DOWN;
1000connect_failure:
1001 mutex_unlock(&iser_conn->state_mutex);
1002 iser_conn_release(iser_conn);
1003 return err;
1004}
1005
1006int iser_post_recvl(struct iser_conn *iser_conn)
1007{
1008 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1009 struct iser_login_desc *desc = &iser_conn->login_desc;
1010 struct ib_recv_wr wr, *wr_failed;
1011 int ib_ret;
1012
1013 desc->sge.addr = desc->rsp_dma;
1014 desc->sge.length = ISER_RX_LOGIN_SIZE;
1015 desc->sge.lkey = ib_conn->device->pd->local_dma_lkey;
1016
1017 desc->cqe.done = iser_login_rsp;
1018 wr.wr_cqe = &desc->cqe;
1019 wr.sg_list = &desc->sge;
1020 wr.num_sge = 1;
1021 wr.next = NULL;
1022
1023 ib_conn->post_recv_buf_count++;
1024 ib_ret = ib_post_recv(ib_conn->qp, &wr, &wr_failed);
1025 if (ib_ret) {
1026 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1027 ib_conn->post_recv_buf_count--;
1028 }
1029
1030 return ib_ret;
1031}
1032
1033int iser_post_recvm(struct iser_conn *iser_conn, int count)
1034{
1035 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1036 unsigned int my_rx_head = iser_conn->rx_desc_head;
1037 struct iser_rx_desc *rx_desc;
1038 struct ib_recv_wr *wr, *wr_failed;
1039 int i, ib_ret;
1040
1041 for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) {
1042 rx_desc = &iser_conn->rx_descs[my_rx_head];
1043 rx_desc->cqe.done = iser_task_rsp;
1044 wr->wr_cqe = &rx_desc->cqe;
1045 wr->sg_list = &rx_desc->rx_sg;
1046 wr->num_sge = 1;
1047 wr->next = wr + 1;
1048 my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1049 }
1050
1051 wr--;
1052 wr->next = NULL;
1053
1054 ib_conn->post_recv_buf_count += count;
1055 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &wr_failed);
1056 if (ib_ret) {
1057 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1058 ib_conn->post_recv_buf_count -= count;
1059 } else
1060 iser_conn->rx_desc_head = my_rx_head;
1061
1062 return ib_ret;
1063}
1064
1065
1066
1067
1068
1069
1070
1071int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
1072 bool signal)
1073{
1074 struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc);
1075 int ib_ret;
1076
1077 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1078 tx_desc->dma_addr, ISER_HEADERS_LEN,
1079 DMA_TO_DEVICE);
1080
1081 wr->next = NULL;
1082 wr->wr_cqe = &tx_desc->cqe;
1083 wr->sg_list = tx_desc->tx_sg;
1084 wr->num_sge = tx_desc->num_sge;
1085 wr->opcode = IB_WR_SEND;
1086 wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
1087
1088 ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr);
1089 if (ib_ret)
1090 iser_err("ib_post_send failed, ret:%d opcode:%d\n",
1091 ib_ret, bad_wr->opcode);
1092
1093 return ib_ret;
1094}
1095
1096u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1097 enum iser_data_dir cmd_dir, sector_t *sector)
1098{
1099 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
1100 struct iser_fr_desc *desc = reg->mem_h;
1101 unsigned long sector_size = iser_task->sc->device->sector_size;
1102 struct ib_mr_status mr_status;
1103 int ret;
1104
1105 if (desc && desc->pi_ctx->sig_protected) {
1106 desc->pi_ctx->sig_protected = 0;
1107 ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
1108 IB_MR_CHECK_SIG_STATUS, &mr_status);
1109 if (ret) {
1110 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1111 goto err;
1112 }
1113
1114 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1115 sector_t sector_off = mr_status.sig_err.sig_err_offset;
1116
1117 sector_div(sector_off, sector_size + 8);
1118 *sector = scsi_get_lba(iser_task->sc) + sector_off;
1119
1120 pr_err("PI error found type %d at sector %llx "
1121 "expected %x vs actual %x\n",
1122 mr_status.sig_err.err_type,
1123 (unsigned long long)*sector,
1124 mr_status.sig_err.expected,
1125 mr_status.sig_err.actual);
1126
1127 switch (mr_status.sig_err.err_type) {
1128 case IB_SIG_BAD_GUARD:
1129 return 0x1;
1130 case IB_SIG_BAD_REFTAG:
1131 return 0x3;
1132 case IB_SIG_BAD_APPTAG:
1133 return 0x2;
1134 }
1135 }
1136 }
1137
1138 return 0;
1139err:
1140
1141 return 0x1;
1142}
1143
1144void iser_err_comp(struct ib_wc *wc, const char *type)
1145{
1146 if (wc->status != IB_WC_WR_FLUSH_ERR) {
1147 struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context);
1148
1149 iser_err("%s failure: %s (%d) vend_err %x\n", type,
1150 ib_wc_status_msg(wc->status), wc->status,
1151 wc->vendor_err);
1152
1153 if (iser_conn->iscsi_conn)
1154 iscsi_conn_failure(iser_conn->iscsi_conn,
1155 ISCSI_ERR_CONN_FAILED);
1156 } else {
1157 iser_dbg("%s failure: %s (%d)\n", type,
1158 ib_wc_status_msg(wc->status), wc->status);
1159 }
1160}
1161