1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/slab.h>
38#include <linux/err.h>
39#include <linux/string.h>
40#include <linux/parser.h>
41#include <linux/random.h>
42#include <linux/jiffies.h>
43#include <linux/lockdep.h>
44#include <linux/inet.h>
45#include <rdma/ib_cache.h>
46
47#include <linux/atomic.h>
48
49#include <scsi/scsi.h>
50#include <scsi/scsi_device.h>
51#include <scsi/scsi_dbg.h>
52#include <scsi/scsi_tcq.h>
53#include <scsi/srp.h>
54#include <scsi/scsi_transport_srp.h>
55
56#include "ib_srp.h"
57
58#define DRV_NAME "ib_srp"
59#define PFX DRV_NAME ": "
60
61MODULE_AUTHOR("Roland Dreier");
62MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63MODULE_LICENSE("Dual BSD/GPL");
64
65#if !defined(CONFIG_DYNAMIC_DEBUG)
66#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
67#define DYNAMIC_DEBUG_BRANCH(descriptor) false
68#endif
69
70static unsigned int srp_sg_tablesize;
71static unsigned int cmd_sg_entries;
72static unsigned int indirect_sg_entries;
73static bool allow_ext_sg;
74static bool register_always = true;
75static bool never_register;
76static int topspin_workarounds = 1;
77
78module_param(srp_sg_tablesize, uint, 0444);
79MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
80
81module_param(cmd_sg_entries, uint, 0444);
82MODULE_PARM_DESC(cmd_sg_entries,
83 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
84
85module_param(indirect_sg_entries, uint, 0444);
86MODULE_PARM_DESC(indirect_sg_entries,
87 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
88
89module_param(allow_ext_sg, bool, 0444);
90MODULE_PARM_DESC(allow_ext_sg,
91 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
92
93module_param(topspin_workarounds, int, 0444);
94MODULE_PARM_DESC(topspin_workarounds,
95 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
96
97module_param(register_always, bool, 0444);
98MODULE_PARM_DESC(register_always,
99 "Use memory registration even for contiguous memory regions");
100
101module_param(never_register, bool, 0444);
102MODULE_PARM_DESC(never_register, "Never register memory");
103
104static const struct kernel_param_ops srp_tmo_ops;
105
106static int srp_reconnect_delay = 10;
107module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
108 S_IRUGO | S_IWUSR);
109MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
110
111static int srp_fast_io_fail_tmo = 15;
112module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
113 S_IRUGO | S_IWUSR);
114MODULE_PARM_DESC(fast_io_fail_tmo,
115 "Number of seconds between the observation of a transport"
116 " layer error and failing all I/O. \"off\" means that this"
117 " functionality is disabled.");
118
119static int srp_dev_loss_tmo = 600;
120module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
121 S_IRUGO | S_IWUSR);
122MODULE_PARM_DESC(dev_loss_tmo,
123 "Maximum number of seconds that the SRP transport should"
124 " insulate transport layer errors. After this time has been"
125 " exceeded the SCSI host is removed. Should be"
126 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
127 " if fast_io_fail_tmo has not been set. \"off\" means that"
128 " this functionality is disabled.");
129
130static bool srp_use_imm_data = true;
131module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
132MODULE_PARM_DESC(use_imm_data,
133 "Whether or not to request permission to use immediate data during SRP login.");
134
135static unsigned int srp_max_imm_data = 8 * 1024;
136module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
137MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
138
139static unsigned ch_count;
140module_param(ch_count, uint, 0444);
141MODULE_PARM_DESC(ch_count,
142 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
143
144static int srp_add_one(struct ib_device *device);
145static void srp_remove_one(struct ib_device *device, void *client_data);
146static void srp_rename_dev(struct ib_device *device, void *client_data);
147static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
148static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
149 const char *opname);
150static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
151 const struct ib_cm_event *event);
152static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
153 struct rdma_cm_event *event);
154
155static struct scsi_transport_template *ib_srp_transport_template;
156static struct workqueue_struct *srp_remove_wq;
157
158static struct ib_client srp_client = {
159 .name = "srp",
160 .add = srp_add_one,
161 .remove = srp_remove_one,
162 .rename = srp_rename_dev
163};
164
165static struct ib_sa_client srp_sa_client;
166
167static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
168{
169 int tmo = *(int *)kp->arg;
170
171 if (tmo >= 0)
172 return sysfs_emit(buffer, "%d\n", tmo);
173 else
174 return sysfs_emit(buffer, "off\n");
175}
176
177static int srp_tmo_set(const char *val, const struct kernel_param *kp)
178{
179 int tmo, res;
180
181 res = srp_parse_tmo(&tmo, val);
182 if (res)
183 goto out;
184
185 if (kp->arg == &srp_reconnect_delay)
186 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
187 srp_dev_loss_tmo);
188 else if (kp->arg == &srp_fast_io_fail_tmo)
189 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
190 else
191 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
192 tmo);
193 if (res)
194 goto out;
195 *(int *)kp->arg = tmo;
196
197out:
198 return res;
199}
200
201static const struct kernel_param_ops srp_tmo_ops = {
202 .get = srp_tmo_get,
203 .set = srp_tmo_set,
204};
205
206static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
207{
208 return (struct srp_target_port *) host->hostdata;
209}
210
211static const char *srp_target_info(struct Scsi_Host *host)
212{
213 return host_to_target(host)->target_name;
214}
215
216static int srp_target_is_topspin(struct srp_target_port *target)
217{
218 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
219 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
220
221 return topspin_workarounds &&
222 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
223 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
224}
225
226static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
227 gfp_t gfp_mask,
228 enum dma_data_direction direction)
229{
230 struct srp_iu *iu;
231
232 iu = kmalloc(sizeof *iu, gfp_mask);
233 if (!iu)
234 goto out;
235
236 iu->buf = kzalloc(size, gfp_mask);
237 if (!iu->buf)
238 goto out_free_iu;
239
240 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
241 direction);
242 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
243 goto out_free_buf;
244
245 iu->size = size;
246 iu->direction = direction;
247
248 return iu;
249
250out_free_buf:
251 kfree(iu->buf);
252out_free_iu:
253 kfree(iu);
254out:
255 return NULL;
256}
257
258static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
259{
260 if (!iu)
261 return;
262
263 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
264 iu->direction);
265 kfree(iu->buf);
266 kfree(iu);
267}
268
269static void srp_qp_event(struct ib_event *event, void *context)
270{
271 pr_debug("QP event %s (%d)\n",
272 ib_event_msg(event->event), event->event);
273}
274
275static int srp_init_ib_qp(struct srp_target_port *target,
276 struct ib_qp *qp)
277{
278 struct ib_qp_attr *attr;
279 int ret;
280
281 attr = kmalloc(sizeof *attr, GFP_KERNEL);
282 if (!attr)
283 return -ENOMEM;
284
285 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
286 target->srp_host->port,
287 be16_to_cpu(target->ib_cm.pkey),
288 &attr->pkey_index);
289 if (ret)
290 goto out;
291
292 attr->qp_state = IB_QPS_INIT;
293 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
294 IB_ACCESS_REMOTE_WRITE);
295 attr->port_num = target->srp_host->port;
296
297 ret = ib_modify_qp(qp, attr,
298 IB_QP_STATE |
299 IB_QP_PKEY_INDEX |
300 IB_QP_ACCESS_FLAGS |
301 IB_QP_PORT);
302
303out:
304 kfree(attr);
305 return ret;
306}
307
308static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
309{
310 struct srp_target_port *target = ch->target;
311 struct ib_cm_id *new_cm_id;
312
313 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
314 srp_ib_cm_handler, ch);
315 if (IS_ERR(new_cm_id))
316 return PTR_ERR(new_cm_id);
317
318 if (ch->ib_cm.cm_id)
319 ib_destroy_cm_id(ch->ib_cm.cm_id);
320 ch->ib_cm.cm_id = new_cm_id;
321 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
322 target->srp_host->port))
323 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
324 else
325 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
326 ch->ib_cm.path.sgid = target->sgid;
327 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
328 ch->ib_cm.path.pkey = target->ib_cm.pkey;
329 ch->ib_cm.path.service_id = target->ib_cm.service_id;
330
331 return 0;
332}
333
334static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
335{
336 struct srp_target_port *target = ch->target;
337 struct rdma_cm_id *new_cm_id;
338 int ret;
339
340 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
341 RDMA_PS_TCP, IB_QPT_RC);
342 if (IS_ERR(new_cm_id)) {
343 ret = PTR_ERR(new_cm_id);
344 new_cm_id = NULL;
345 goto out;
346 }
347
348 init_completion(&ch->done);
349 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
350 &target->rdma_cm.src.sa : NULL,
351 &target->rdma_cm.dst.sa,
352 SRP_PATH_REC_TIMEOUT_MS);
353 if (ret) {
354 pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
355 &target->rdma_cm.src, &target->rdma_cm.dst, ret);
356 goto out;
357 }
358 ret = wait_for_completion_interruptible(&ch->done);
359 if (ret < 0)
360 goto out;
361
362 ret = ch->status;
363 if (ret) {
364 pr_err("Resolving address %pISpsc failed (%d)\n",
365 &target->rdma_cm.dst, ret);
366 goto out;
367 }
368
369 swap(ch->rdma_cm.cm_id, new_cm_id);
370
371out:
372 if (new_cm_id)
373 rdma_destroy_id(new_cm_id);
374
375 return ret;
376}
377
378static int srp_new_cm_id(struct srp_rdma_ch *ch)
379{
380 struct srp_target_port *target = ch->target;
381
382 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
383 srp_new_ib_cm_id(ch);
384}
385
386
387
388
389
390static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
391{
392 int i;
393 struct srp_fr_desc *d;
394
395 if (!pool)
396 return;
397
398 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
399 if (d->mr)
400 ib_dereg_mr(d->mr);
401 }
402 kfree(pool);
403}
404
405
406
407
408
409
410
411
412static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
413 struct ib_pd *pd, int pool_size,
414 int max_page_list_len)
415{
416 struct srp_fr_pool *pool;
417 struct srp_fr_desc *d;
418 struct ib_mr *mr;
419 int i, ret = -EINVAL;
420 enum ib_mr_type mr_type;
421
422 if (pool_size <= 0)
423 goto err;
424 ret = -ENOMEM;
425 pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
426 if (!pool)
427 goto err;
428 pool->size = pool_size;
429 pool->max_page_list_len = max_page_list_len;
430 spin_lock_init(&pool->lock);
431 INIT_LIST_HEAD(&pool->free_list);
432
433 if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
434 mr_type = IB_MR_TYPE_SG_GAPS;
435 else
436 mr_type = IB_MR_TYPE_MEM_REG;
437
438 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
439 mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
440 if (IS_ERR(mr)) {
441 ret = PTR_ERR(mr);
442 if (ret == -ENOMEM)
443 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
444 dev_name(&device->dev));
445 goto destroy_pool;
446 }
447 d->mr = mr;
448 list_add_tail(&d->entry, &pool->free_list);
449 }
450
451out:
452 return pool;
453
454destroy_pool:
455 srp_destroy_fr_pool(pool);
456
457err:
458 pool = ERR_PTR(ret);
459 goto out;
460}
461
462
463
464
465
466static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
467{
468 struct srp_fr_desc *d = NULL;
469 unsigned long flags;
470
471 spin_lock_irqsave(&pool->lock, flags);
472 if (!list_empty(&pool->free_list)) {
473 d = list_first_entry(&pool->free_list, typeof(*d), entry);
474 list_del(&d->entry);
475 }
476 spin_unlock_irqrestore(&pool->lock, flags);
477
478 return d;
479}
480
481
482
483
484
485
486
487
488
489
490static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
491 int n)
492{
493 unsigned long flags;
494 int i;
495
496 spin_lock_irqsave(&pool->lock, flags);
497 for (i = 0; i < n; i++)
498 list_add(&desc[i]->entry, &pool->free_list);
499 spin_unlock_irqrestore(&pool->lock, flags);
500}
501
502static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
503{
504 struct srp_device *dev = target->srp_host->srp_dev;
505
506 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
507 dev->max_pages_per_mr);
508}
509
510
511
512
513
514
515
516
517
518static void srp_destroy_qp(struct srp_rdma_ch *ch)
519{
520 spin_lock_irq(&ch->lock);
521 ib_process_cq_direct(ch->send_cq, -1);
522 spin_unlock_irq(&ch->lock);
523
524 ib_drain_qp(ch->qp);
525 ib_destroy_qp(ch->qp);
526}
527
528static int srp_create_ch_ib(struct srp_rdma_ch *ch)
529{
530 struct srp_target_port *target = ch->target;
531 struct srp_device *dev = target->srp_host->srp_dev;
532 const struct ib_device_attr *attr = &dev->dev->attrs;
533 struct ib_qp_init_attr *init_attr;
534 struct ib_cq *recv_cq, *send_cq;
535 struct ib_qp *qp;
536 struct srp_fr_pool *fr_pool = NULL;
537 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
538 int ret;
539
540 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
541 if (!init_attr)
542 return -ENOMEM;
543
544
545 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
546 ch->comp_vector, IB_POLL_SOFTIRQ);
547 if (IS_ERR(recv_cq)) {
548 ret = PTR_ERR(recv_cq);
549 goto err;
550 }
551
552 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
553 ch->comp_vector, IB_POLL_DIRECT);
554 if (IS_ERR(send_cq)) {
555 ret = PTR_ERR(send_cq);
556 goto err_recv_cq;
557 }
558
559 init_attr->event_handler = srp_qp_event;
560 init_attr->cap.max_send_wr = m * target->queue_size;
561 init_attr->cap.max_recv_wr = target->queue_size + 1;
562 init_attr->cap.max_recv_sge = 1;
563 init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
564 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
565 init_attr->qp_type = IB_QPT_RC;
566 init_attr->send_cq = send_cq;
567 init_attr->recv_cq = recv_cq;
568
569 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
570
571 if (target->using_rdma_cm) {
572 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
573 qp = ch->rdma_cm.cm_id->qp;
574 } else {
575 qp = ib_create_qp(dev->pd, init_attr);
576 if (!IS_ERR(qp)) {
577 ret = srp_init_ib_qp(target, qp);
578 if (ret)
579 ib_destroy_qp(qp);
580 } else {
581 ret = PTR_ERR(qp);
582 }
583 }
584 if (ret) {
585 pr_err("QP creation failed for dev %s: %d\n",
586 dev_name(&dev->dev->dev), ret);
587 goto err_send_cq;
588 }
589
590 if (dev->use_fast_reg) {
591 fr_pool = srp_alloc_fr_pool(target);
592 if (IS_ERR(fr_pool)) {
593 ret = PTR_ERR(fr_pool);
594 shost_printk(KERN_WARNING, target->scsi_host, PFX
595 "FR pool allocation failed (%d)\n", ret);
596 goto err_qp;
597 }
598 }
599
600 if (ch->qp)
601 srp_destroy_qp(ch);
602 if (ch->recv_cq)
603 ib_free_cq(ch->recv_cq);
604 if (ch->send_cq)
605 ib_free_cq(ch->send_cq);
606
607 ch->qp = qp;
608 ch->recv_cq = recv_cq;
609 ch->send_cq = send_cq;
610
611 if (dev->use_fast_reg) {
612 if (ch->fr_pool)
613 srp_destroy_fr_pool(ch->fr_pool);
614 ch->fr_pool = fr_pool;
615 }
616
617 kfree(init_attr);
618 return 0;
619
620err_qp:
621 if (target->using_rdma_cm)
622 rdma_destroy_qp(ch->rdma_cm.cm_id);
623 else
624 ib_destroy_qp(qp);
625
626err_send_cq:
627 ib_free_cq(send_cq);
628
629err_recv_cq:
630 ib_free_cq(recv_cq);
631
632err:
633 kfree(init_attr);
634 return ret;
635}
636
637
638
639
640
641static void srp_free_ch_ib(struct srp_target_port *target,
642 struct srp_rdma_ch *ch)
643{
644 struct srp_device *dev = target->srp_host->srp_dev;
645 int i;
646
647 if (!ch->target)
648 return;
649
650 if (target->using_rdma_cm) {
651 if (ch->rdma_cm.cm_id) {
652 rdma_destroy_id(ch->rdma_cm.cm_id);
653 ch->rdma_cm.cm_id = NULL;
654 }
655 } else {
656 if (ch->ib_cm.cm_id) {
657 ib_destroy_cm_id(ch->ib_cm.cm_id);
658 ch->ib_cm.cm_id = NULL;
659 }
660 }
661
662
663 if (!ch->qp)
664 return;
665
666 if (dev->use_fast_reg) {
667 if (ch->fr_pool)
668 srp_destroy_fr_pool(ch->fr_pool);
669 }
670
671 srp_destroy_qp(ch);
672 ib_free_cq(ch->send_cq);
673 ib_free_cq(ch->recv_cq);
674
675
676
677
678
679
680
681 ch->target = NULL;
682
683 ch->qp = NULL;
684 ch->send_cq = ch->recv_cq = NULL;
685
686 if (ch->rx_ring) {
687 for (i = 0; i < target->queue_size; ++i)
688 srp_free_iu(target->srp_host, ch->rx_ring[i]);
689 kfree(ch->rx_ring);
690 ch->rx_ring = NULL;
691 }
692 if (ch->tx_ring) {
693 for (i = 0; i < target->queue_size; ++i)
694 srp_free_iu(target->srp_host, ch->tx_ring[i]);
695 kfree(ch->tx_ring);
696 ch->tx_ring = NULL;
697 }
698}
699
700static void srp_path_rec_completion(int status,
701 struct sa_path_rec *pathrec,
702 void *ch_ptr)
703{
704 struct srp_rdma_ch *ch = ch_ptr;
705 struct srp_target_port *target = ch->target;
706
707 ch->status = status;
708 if (status)
709 shost_printk(KERN_ERR, target->scsi_host,
710 PFX "Got failed path rec status %d\n", status);
711 else
712 ch->ib_cm.path = *pathrec;
713 complete(&ch->done);
714}
715
716static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
717{
718 struct srp_target_port *target = ch->target;
719 int ret;
720
721 ch->ib_cm.path.numb_path = 1;
722
723 init_completion(&ch->done);
724
725 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
726 target->srp_host->srp_dev->dev,
727 target->srp_host->port,
728 &ch->ib_cm.path,
729 IB_SA_PATH_REC_SERVICE_ID |
730 IB_SA_PATH_REC_DGID |
731 IB_SA_PATH_REC_SGID |
732 IB_SA_PATH_REC_NUMB_PATH |
733 IB_SA_PATH_REC_PKEY,
734 SRP_PATH_REC_TIMEOUT_MS,
735 GFP_KERNEL,
736 srp_path_rec_completion,
737 ch, &ch->ib_cm.path_query);
738 if (ch->ib_cm.path_query_id < 0)
739 return ch->ib_cm.path_query_id;
740
741 ret = wait_for_completion_interruptible(&ch->done);
742 if (ret < 0)
743 return ret;
744
745 if (ch->status < 0)
746 shost_printk(KERN_WARNING, target->scsi_host,
747 PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
748 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
749 be16_to_cpu(target->ib_cm.pkey),
750 be64_to_cpu(target->ib_cm.service_id));
751
752 return ch->status;
753}
754
755static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
756{
757 struct srp_target_port *target = ch->target;
758 int ret;
759
760 init_completion(&ch->done);
761
762 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
763 if (ret)
764 return ret;
765
766 wait_for_completion_interruptible(&ch->done);
767
768 if (ch->status != 0)
769 shost_printk(KERN_WARNING, target->scsi_host,
770 PFX "Path resolution failed\n");
771
772 return ch->status;
773}
774
775static int srp_lookup_path(struct srp_rdma_ch *ch)
776{
777 struct srp_target_port *target = ch->target;
778
779 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
780 srp_ib_lookup_path(ch);
781}
782
783static u8 srp_get_subnet_timeout(struct srp_host *host)
784{
785 struct ib_port_attr attr;
786 int ret;
787 u8 subnet_timeout = 18;
788
789 ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
790 if (ret == 0)
791 subnet_timeout = attr.subnet_timeout;
792
793 if (unlikely(subnet_timeout < 15))
794 pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
795 dev_name(&host->srp_dev->dev->dev), subnet_timeout);
796
797 return subnet_timeout;
798}
799
800static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
801 bool multich)
802{
803 struct srp_target_port *target = ch->target;
804 struct {
805 struct rdma_conn_param rdma_param;
806 struct srp_login_req_rdma rdma_req;
807 struct ib_cm_req_param ib_param;
808 struct srp_login_req ib_req;
809 } *req = NULL;
810 char *ipi, *tpi;
811 int status;
812
813 req = kzalloc(sizeof *req, GFP_KERNEL);
814 if (!req)
815 return -ENOMEM;
816
817 req->ib_param.flow_control = 1;
818 req->ib_param.retry_count = target->tl_retry_count;
819
820
821
822
823
824 req->ib_param.responder_resources = 4;
825 req->ib_param.rnr_retry_count = 7;
826 req->ib_param.max_cm_retries = 15;
827
828 req->ib_req.opcode = SRP_LOGIN_REQ;
829 req->ib_req.tag = 0;
830 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
831 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
832 SRP_BUF_FORMAT_INDIRECT);
833 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
834 SRP_MULTICHAN_SINGLE);
835 if (srp_use_imm_data) {
836 req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
837 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
838 }
839
840 if (target->using_rdma_cm) {
841 req->rdma_param.flow_control = req->ib_param.flow_control;
842 req->rdma_param.responder_resources =
843 req->ib_param.responder_resources;
844 req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
845 req->rdma_param.retry_count = req->ib_param.retry_count;
846 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
847 req->rdma_param.private_data = &req->rdma_req;
848 req->rdma_param.private_data_len = sizeof(req->rdma_req);
849
850 req->rdma_req.opcode = req->ib_req.opcode;
851 req->rdma_req.tag = req->ib_req.tag;
852 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
853 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
854 req->rdma_req.req_flags = req->ib_req.req_flags;
855 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
856
857 ipi = req->rdma_req.initiator_port_id;
858 tpi = req->rdma_req.target_port_id;
859 } else {
860 u8 subnet_timeout;
861
862 subnet_timeout = srp_get_subnet_timeout(target->srp_host);
863
864 req->ib_param.primary_path = &ch->ib_cm.path;
865 req->ib_param.alternate_path = NULL;
866 req->ib_param.service_id = target->ib_cm.service_id;
867 get_random_bytes(&req->ib_param.starting_psn, 4);
868 req->ib_param.starting_psn &= 0xffffff;
869 req->ib_param.qp_num = ch->qp->qp_num;
870 req->ib_param.qp_type = ch->qp->qp_type;
871 req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
872 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
873 req->ib_param.private_data = &req->ib_req;
874 req->ib_param.private_data_len = sizeof(req->ib_req);
875
876 ipi = req->ib_req.initiator_port_id;
877 tpi = req->ib_req.target_port_id;
878 }
879
880
881
882
883
884
885
886
887
888
889 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
890 memcpy(ipi, &target->sgid.global.interface_id, 8);
891 memcpy(ipi + 8, &target->initiator_ext, 8);
892 memcpy(tpi, &target->ioc_guid, 8);
893 memcpy(tpi + 8, &target->id_ext, 8);
894 } else {
895 memcpy(ipi, &target->initiator_ext, 8);
896 memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
897 memcpy(tpi, &target->id_ext, 8);
898 memcpy(tpi + 8, &target->ioc_guid, 8);
899 }
900
901
902
903
904
905
906 if (srp_target_is_topspin(target)) {
907 shost_printk(KERN_DEBUG, target->scsi_host,
908 PFX "Topspin/Cisco initiator port ID workaround "
909 "activated for target GUID %016llx\n",
910 be64_to_cpu(target->ioc_guid));
911 memset(ipi, 0, 8);
912 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
913 }
914
915 if (target->using_rdma_cm)
916 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
917 else
918 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
919
920 kfree(req);
921
922 return status;
923}
924
925static bool srp_queue_remove_work(struct srp_target_port *target)
926{
927 bool changed = false;
928
929 spin_lock_irq(&target->lock);
930 if (target->state != SRP_TARGET_REMOVED) {
931 target->state = SRP_TARGET_REMOVED;
932 changed = true;
933 }
934 spin_unlock_irq(&target->lock);
935
936 if (changed)
937 queue_work(srp_remove_wq, &target->remove_work);
938
939 return changed;
940}
941
942static void srp_disconnect_target(struct srp_target_port *target)
943{
944 struct srp_rdma_ch *ch;
945 int i, ret;
946
947
948
949 for (i = 0; i < target->ch_count; i++) {
950 ch = &target->ch[i];
951 ch->connected = false;
952 ret = 0;
953 if (target->using_rdma_cm) {
954 if (ch->rdma_cm.cm_id)
955 rdma_disconnect(ch->rdma_cm.cm_id);
956 } else {
957 if (ch->ib_cm.cm_id)
958 ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
959 NULL, 0);
960 }
961 if (ret < 0) {
962 shost_printk(KERN_DEBUG, target->scsi_host,
963 PFX "Sending CM DREQ failed\n");
964 }
965 }
966}
967
968static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
969{
970 struct srp_target_port *target = host_to_target(shost);
971 struct srp_device *dev = target->srp_host->srp_dev;
972 struct ib_device *ibdev = dev->dev;
973 struct srp_request *req = scsi_cmd_priv(cmd);
974
975 kfree(req->fr_list);
976 if (req->indirect_dma_addr) {
977 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
978 target->indirect_size,
979 DMA_TO_DEVICE);
980 }
981 kfree(req->indirect_desc);
982
983 return 0;
984}
985
986static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
987{
988 struct srp_target_port *target = host_to_target(shost);
989 struct srp_device *srp_dev = target->srp_host->srp_dev;
990 struct ib_device *ibdev = srp_dev->dev;
991 struct srp_request *req = scsi_cmd_priv(cmd);
992 dma_addr_t dma_addr;
993 int ret = -ENOMEM;
994
995 if (srp_dev->use_fast_reg) {
996 req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
997 GFP_KERNEL);
998 if (!req->fr_list)
999 goto out;
1000 }
1001 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
1002 if (!req->indirect_desc)
1003 goto out;
1004
1005 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
1006 target->indirect_size,
1007 DMA_TO_DEVICE);
1008 if (ib_dma_mapping_error(ibdev, dma_addr)) {
1009 srp_exit_cmd_priv(shost, cmd);
1010 goto out;
1011 }
1012
1013 req->indirect_dma_addr = dma_addr;
1014 ret = 0;
1015
1016out:
1017 return ret;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
1028{
1029 struct device_attribute **attr;
1030
1031 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
1032 device_remove_file(&shost->shost_dev, *attr);
1033}
1034
1035static void srp_remove_target(struct srp_target_port *target)
1036{
1037 struct srp_rdma_ch *ch;
1038 int i;
1039
1040 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1041
1042 srp_del_scsi_host_attr(target->scsi_host);
1043 srp_rport_get(target->rport);
1044 srp_remove_host(target->scsi_host);
1045 scsi_remove_host(target->scsi_host);
1046 srp_stop_rport_timers(target->rport);
1047 srp_disconnect_target(target);
1048 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
1049 for (i = 0; i < target->ch_count; i++) {
1050 ch = &target->ch[i];
1051 srp_free_ch_ib(target, ch);
1052 }
1053 cancel_work_sync(&target->tl_err_work);
1054 srp_rport_put(target->rport);
1055 kfree(target->ch);
1056 target->ch = NULL;
1057
1058 spin_lock(&target->srp_host->target_lock);
1059 list_del(&target->list);
1060 spin_unlock(&target->srp_host->target_lock);
1061
1062 scsi_host_put(target->scsi_host);
1063}
1064
1065static void srp_remove_work(struct work_struct *work)
1066{
1067 struct srp_target_port *target =
1068 container_of(work, struct srp_target_port, remove_work);
1069
1070 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
1071
1072 srp_remove_target(target);
1073}
1074
1075static void srp_rport_delete(struct srp_rport *rport)
1076{
1077 struct srp_target_port *target = rport->lld_data;
1078
1079 srp_queue_remove_work(target);
1080}
1081
1082
1083
1084
1085
1086static int srp_connected_ch(struct srp_target_port *target)
1087{
1088 int i, c = 0;
1089
1090 for (i = 0; i < target->ch_count; i++)
1091 c += target->ch[i].connected;
1092
1093 return c;
1094}
1095
1096static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
1097 bool multich)
1098{
1099 struct srp_target_port *target = ch->target;
1100 int ret;
1101
1102 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1103
1104 ret = srp_lookup_path(ch);
1105 if (ret)
1106 goto out;
1107
1108 while (1) {
1109 init_completion(&ch->done);
1110 ret = srp_send_req(ch, max_iu_len, multich);
1111 if (ret)
1112 goto out;
1113 ret = wait_for_completion_interruptible(&ch->done);
1114 if (ret < 0)
1115 goto out;
1116
1117
1118
1119
1120
1121
1122
1123 ret = ch->status;
1124 switch (ret) {
1125 case 0:
1126 ch->connected = true;
1127 goto out;
1128
1129 case SRP_PORT_REDIRECT:
1130 ret = srp_lookup_path(ch);
1131 if (ret)
1132 goto out;
1133 break;
1134
1135 case SRP_DLID_REDIRECT:
1136 break;
1137
1138 case SRP_STALE_CONN:
1139 shost_printk(KERN_ERR, target->scsi_host, PFX
1140 "giving up on stale connection\n");
1141 ret = -ECONNRESET;
1142 goto out;
1143
1144 default:
1145 goto out;
1146 }
1147 }
1148
1149out:
1150 return ret <= 0 ? ret : -ENODEV;
1151}
1152
1153static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1154{
1155 srp_handle_qp_err(cq, wc, "INV RKEY");
1156}
1157
1158static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1159 u32 rkey)
1160{
1161 struct ib_send_wr wr = {
1162 .opcode = IB_WR_LOCAL_INV,
1163 .next = NULL,
1164 .num_sge = 0,
1165 .send_flags = 0,
1166 .ex.invalidate_rkey = rkey,
1167 };
1168
1169 wr.wr_cqe = &req->reg_cqe;
1170 req->reg_cqe.done = srp_inv_rkey_err_done;
1171 return ib_post_send(ch->qp, &wr, NULL);
1172}
1173
1174static void srp_unmap_data(struct scsi_cmnd *scmnd,
1175 struct srp_rdma_ch *ch,
1176 struct srp_request *req)
1177{
1178 struct srp_target_port *target = ch->target;
1179 struct srp_device *dev = target->srp_host->srp_dev;
1180 struct ib_device *ibdev = dev->dev;
1181 int i, res;
1182
1183 if (!scsi_sglist(scmnd) ||
1184 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1185 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1186 return;
1187
1188 if (dev->use_fast_reg) {
1189 struct srp_fr_desc **pfr;
1190
1191 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1192 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1193 if (res < 0) {
1194 shost_printk(KERN_ERR, target->scsi_host, PFX
1195 "Queueing INV WR for rkey %#x failed (%d)\n",
1196 (*pfr)->mr->rkey, res);
1197 queue_work(system_long_wq,
1198 &target->tl_err_work);
1199 }
1200 }
1201 if (req->nmdesc)
1202 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1203 req->nmdesc);
1204 }
1205
1206 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1207 scmnd->sc_data_direction);
1208}
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1222 struct srp_request *req,
1223 struct scsi_device *sdev,
1224 struct scsi_cmnd *scmnd)
1225{
1226 unsigned long flags;
1227
1228 spin_lock_irqsave(&ch->lock, flags);
1229 if (req->scmnd &&
1230 (!sdev || req->scmnd->device == sdev) &&
1231 (!scmnd || req->scmnd == scmnd)) {
1232 scmnd = req->scmnd;
1233 req->scmnd = NULL;
1234 } else {
1235 scmnd = NULL;
1236 }
1237 spin_unlock_irqrestore(&ch->lock, flags);
1238
1239 return scmnd;
1240}
1241
1242
1243
1244
1245
1246
1247
1248
1249static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1250 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1251{
1252 unsigned long flags;
1253
1254 srp_unmap_data(scmnd, ch, req);
1255
1256 spin_lock_irqsave(&ch->lock, flags);
1257 ch->req_lim += req_lim_delta;
1258 spin_unlock_irqrestore(&ch->lock, flags);
1259}
1260
1261static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1262 struct scsi_device *sdev, int result)
1263{
1264 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1265
1266 if (scmnd) {
1267 srp_free_req(ch, req, scmnd, 0);
1268 scmnd->result = result;
1269 scmnd->scsi_done(scmnd);
1270 }
1271}
1272
1273struct srp_terminate_context {
1274 struct srp_target_port *srp_target;
1275 int scsi_result;
1276};
1277
1278static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr,
1279 bool reserved)
1280{
1281 struct srp_terminate_context *context = context_ptr;
1282 struct srp_target_port *target = context->srp_target;
1283 u32 tag = blk_mq_unique_tag(scmnd->request);
1284 struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
1285 struct srp_request *req = scsi_cmd_priv(scmnd);
1286
1287 srp_finish_req(ch, req, NULL, context->scsi_result);
1288
1289 return true;
1290}
1291
1292static void srp_terminate_io(struct srp_rport *rport)
1293{
1294 struct srp_target_port *target = rport->lld_data;
1295 struct srp_terminate_context context = { .srp_target = target,
1296 .scsi_result = DID_TRANSPORT_FAILFAST << 16 };
1297
1298 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
1299}
1300
1301
1302static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
1303 uint32_t max_it_iu_size)
1304{
1305 uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
1306 sizeof(struct srp_indirect_buf) +
1307 cmd_sg_cnt * sizeof(struct srp_direct_buf);
1308
1309 if (use_imm_data)
1310 max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
1311 srp_max_imm_data);
1312
1313 if (max_it_iu_size)
1314 max_iu_len = min(max_iu_len, max_it_iu_size);
1315
1316 pr_debug("max_iu_len = %d\n", max_iu_len);
1317
1318 return max_iu_len;
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330static int srp_rport_reconnect(struct srp_rport *rport)
1331{
1332 struct srp_target_port *target = rport->lld_data;
1333 struct srp_rdma_ch *ch;
1334 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
1335 srp_use_imm_data,
1336 target->max_it_iu_size);
1337 int i, j, ret = 0;
1338 bool multich = false;
1339
1340 srp_disconnect_target(target);
1341
1342 if (target->state == SRP_TARGET_SCANNING)
1343 return -ENODEV;
1344
1345
1346
1347
1348
1349
1350 for (i = 0; i < target->ch_count; i++) {
1351 ch = &target->ch[i];
1352 ret += srp_new_cm_id(ch);
1353 }
1354 {
1355 struct srp_terminate_context context = {
1356 .srp_target = target, .scsi_result = DID_RESET << 16};
1357
1358 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
1359 &context);
1360 }
1361 for (i = 0; i < target->ch_count; i++) {
1362 ch = &target->ch[i];
1363
1364
1365
1366
1367
1368 ret += srp_create_ch_ib(ch);
1369
1370 INIT_LIST_HEAD(&ch->free_tx);
1371 for (j = 0; j < target->queue_size; ++j)
1372 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1373 }
1374
1375 target->qp_in_error = false;
1376
1377 for (i = 0; i < target->ch_count; i++) {
1378 ch = &target->ch[i];
1379 if (ret)
1380 break;
1381 ret = srp_connect_ch(ch, max_iu_len, multich);
1382 multich = true;
1383 }
1384
1385 if (ret == 0)
1386 shost_printk(KERN_INFO, target->scsi_host,
1387 PFX "reconnect succeeded\n");
1388
1389 return ret;
1390}
1391
1392static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1393 unsigned int dma_len, u32 rkey)
1394{
1395 struct srp_direct_buf *desc = state->desc;
1396
1397 WARN_ON_ONCE(!dma_len);
1398
1399 desc->va = cpu_to_be64(dma_addr);
1400 desc->key = cpu_to_be32(rkey);
1401 desc->len = cpu_to_be32(dma_len);
1402
1403 state->total_len += dma_len;
1404 state->desc++;
1405 state->ndesc++;
1406}
1407
1408static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1409{
1410 srp_handle_qp_err(cq, wc, "FAST REG");
1411}
1412
1413
1414
1415
1416
1417
1418
1419static int srp_map_finish_fr(struct srp_map_state *state,
1420 struct srp_request *req,
1421 struct srp_rdma_ch *ch, int sg_nents,
1422 unsigned int *sg_offset_p)
1423{
1424 struct srp_target_port *target = ch->target;
1425 struct srp_device *dev = target->srp_host->srp_dev;
1426 struct ib_reg_wr wr;
1427 struct srp_fr_desc *desc;
1428 u32 rkey;
1429 int n, err;
1430
1431 if (state->fr.next >= state->fr.end) {
1432 shost_printk(KERN_ERR, ch->target->scsi_host,
1433 PFX "Out of MRs (mr_per_cmd = %d)\n",
1434 ch->target->mr_per_cmd);
1435 return -ENOMEM;
1436 }
1437
1438 WARN_ON_ONCE(!dev->use_fast_reg);
1439
1440 if (sg_nents == 1 && target->global_rkey) {
1441 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1442
1443 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1444 sg_dma_len(state->sg) - sg_offset,
1445 target->global_rkey);
1446 if (sg_offset_p)
1447 *sg_offset_p = 0;
1448 return 1;
1449 }
1450
1451 desc = srp_fr_pool_get(ch->fr_pool);
1452 if (!desc)
1453 return -ENOMEM;
1454
1455 rkey = ib_inc_rkey(desc->mr->rkey);
1456 ib_update_fast_reg_key(desc->mr, rkey);
1457
1458 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1459 dev->mr_page_size);
1460 if (unlikely(n < 0)) {
1461 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1462 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1463 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1464 sg_offset_p ? *sg_offset_p : -1, n);
1465 return n;
1466 }
1467
1468 WARN_ON_ONCE(desc->mr->length == 0);
1469
1470 req->reg_cqe.done = srp_reg_mr_err_done;
1471
1472 wr.wr.next = NULL;
1473 wr.wr.opcode = IB_WR_REG_MR;
1474 wr.wr.wr_cqe = &req->reg_cqe;
1475 wr.wr.num_sge = 0;
1476 wr.wr.send_flags = 0;
1477 wr.mr = desc->mr;
1478 wr.key = desc->mr->rkey;
1479 wr.access = (IB_ACCESS_LOCAL_WRITE |
1480 IB_ACCESS_REMOTE_READ |
1481 IB_ACCESS_REMOTE_WRITE);
1482
1483 *state->fr.next++ = desc;
1484 state->nmdesc++;
1485
1486 srp_map_desc(state, desc->mr->iova,
1487 desc->mr->length, desc->mr->rkey);
1488
1489 err = ib_post_send(ch->qp, &wr.wr, NULL);
1490 if (unlikely(err)) {
1491 WARN_ON_ONCE(err == -ENOMEM);
1492 return err;
1493 }
1494
1495 return n;
1496}
1497
1498static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1499 struct srp_request *req, struct scatterlist *scat,
1500 int count)
1501{
1502 unsigned int sg_offset = 0;
1503
1504 state->fr.next = req->fr_list;
1505 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1506 state->sg = scat;
1507
1508 if (count == 0)
1509 return 0;
1510
1511 while (count) {
1512 int i, n;
1513
1514 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1515 if (unlikely(n < 0))
1516 return n;
1517
1518 count -= n;
1519 for (i = 0; i < n; i++)
1520 state->sg = sg_next(state->sg);
1521 }
1522
1523 return 0;
1524}
1525
1526static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1527 struct srp_request *req, struct scatterlist *scat,
1528 int count)
1529{
1530 struct srp_target_port *target = ch->target;
1531 struct scatterlist *sg;
1532 int i;
1533
1534 for_each_sg(scat, sg, count, i) {
1535 srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
1536 target->global_rkey);
1537 }
1538
1539 return 0;
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1550 void **next_mr, void **end_mr, u32 idb_len,
1551 __be32 *idb_rkey)
1552{
1553 struct srp_target_port *target = ch->target;
1554 struct srp_device *dev = target->srp_host->srp_dev;
1555 struct srp_map_state state;
1556 struct srp_direct_buf idb_desc;
1557 struct scatterlist idb_sg[1];
1558 int ret;
1559
1560 memset(&state, 0, sizeof(state));
1561 memset(&idb_desc, 0, sizeof(idb_desc));
1562 state.gen.next = next_mr;
1563 state.gen.end = end_mr;
1564 state.desc = &idb_desc;
1565 state.base_dma_addr = req->indirect_dma_addr;
1566 state.dma_len = idb_len;
1567
1568 if (dev->use_fast_reg) {
1569 state.sg = idb_sg;
1570 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1571 idb_sg->dma_address = req->indirect_dma_addr;
1572#ifdef CONFIG_NEED_SG_DMA_LENGTH
1573 idb_sg->dma_length = idb_sg->length;
1574#endif
1575 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1576 if (ret < 0)
1577 return ret;
1578 WARN_ON_ONCE(ret < 1);
1579 } else {
1580 return -EINVAL;
1581 }
1582
1583 *idb_rkey = idb_desc.key;
1584
1585 return 0;
1586}
1587
1588static void srp_check_mapping(struct srp_map_state *state,
1589 struct srp_rdma_ch *ch, struct srp_request *req,
1590 struct scatterlist *scat, int count)
1591{
1592 struct srp_device *dev = ch->target->srp_host->srp_dev;
1593 struct srp_fr_desc **pfr;
1594 u64 desc_len = 0, mr_len = 0;
1595 int i;
1596
1597 for (i = 0; i < state->ndesc; i++)
1598 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1599 if (dev->use_fast_reg)
1600 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1601 mr_len += (*pfr)->mr->length;
1602 if (desc_len != scsi_bufflen(req->scmnd) ||
1603 mr_len > scsi_bufflen(req->scmnd))
1604 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1605 scsi_bufflen(req->scmnd), desc_len, mr_len,
1606 state->ndesc, state->nmdesc);
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1620 struct srp_request *req)
1621{
1622 struct srp_target_port *target = ch->target;
1623 struct scatterlist *scat, *sg;
1624 struct srp_cmd *cmd = req->cmd->buf;
1625 int i, len, nents, count, ret;
1626 struct srp_device *dev;
1627 struct ib_device *ibdev;
1628 struct srp_map_state state;
1629 struct srp_indirect_buf *indirect_hdr;
1630 u64 data_len;
1631 u32 idb_len, table_len;
1632 __be32 idb_rkey;
1633 u8 fmt;
1634
1635 req->cmd->num_sge = 1;
1636
1637 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1638 return sizeof(struct srp_cmd) + cmd->add_cdb_len;
1639
1640 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1641 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1642 shost_printk(KERN_WARNING, target->scsi_host,
1643 PFX "Unhandled data direction %d\n",
1644 scmnd->sc_data_direction);
1645 return -EINVAL;
1646 }
1647
1648 nents = scsi_sg_count(scmnd);
1649 scat = scsi_sglist(scmnd);
1650 data_len = scsi_bufflen(scmnd);
1651
1652 dev = target->srp_host->srp_dev;
1653 ibdev = dev->dev;
1654
1655 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1656 if (unlikely(count == 0))
1657 return -EIO;
1658
1659 if (ch->use_imm_data &&
1660 count <= ch->max_imm_sge &&
1661 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
1662 scmnd->sc_data_direction == DMA_TO_DEVICE) {
1663 struct srp_imm_buf *buf;
1664 struct ib_sge *sge = &req->cmd->sge[1];
1665
1666 fmt = SRP_DATA_DESC_IMM;
1667 len = SRP_IMM_DATA_OFFSET;
1668 req->nmdesc = 0;
1669 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1670 buf->len = cpu_to_be32(data_len);
1671 WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
1672 for_each_sg(scat, sg, count, i) {
1673 sge[i].addr = sg_dma_address(sg);
1674 sge[i].length = sg_dma_len(sg);
1675 sge[i].lkey = target->lkey;
1676 }
1677 req->cmd->num_sge += count;
1678 goto map_complete;
1679 }
1680
1681 fmt = SRP_DATA_DESC_DIRECT;
1682 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1683 sizeof(struct srp_direct_buf);
1684
1685 if (count == 1 && target->global_rkey) {
1686
1687
1688
1689
1690
1691
1692 struct srp_direct_buf *buf;
1693
1694 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1695 buf->va = cpu_to_be64(sg_dma_address(scat));
1696 buf->key = cpu_to_be32(target->global_rkey);
1697 buf->len = cpu_to_be32(sg_dma_len(scat));
1698
1699 req->nmdesc = 0;
1700 goto map_complete;
1701 }
1702
1703
1704
1705
1706
1707 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
1708
1709 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1710 target->indirect_size, DMA_TO_DEVICE);
1711
1712 memset(&state, 0, sizeof(state));
1713 state.desc = req->indirect_desc;
1714 if (dev->use_fast_reg)
1715 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1716 else
1717 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1718 req->nmdesc = state.nmdesc;
1719 if (ret < 0)
1720 goto unmap;
1721
1722 {
1723 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1724 "Memory mapping consistency check");
1725 if (DYNAMIC_DEBUG_BRANCH(ddm))
1726 srp_check_mapping(&state, ch, req, scat, count);
1727 }
1728
1729
1730
1731
1732
1733
1734
1735 if (state.ndesc == 1) {
1736
1737
1738
1739
1740 struct srp_direct_buf *buf;
1741
1742 buf = (void *)cmd->add_data + cmd->add_cdb_len;
1743 *buf = req->indirect_desc[0];
1744 goto map_complete;
1745 }
1746
1747 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1748 !target->allow_ext_sg)) {
1749 shost_printk(KERN_ERR, target->scsi_host,
1750 "Could not fit S/G list into SRP_CMD\n");
1751 ret = -EIO;
1752 goto unmap;
1753 }
1754
1755 count = min(state.ndesc, target->cmd_sg_cnt);
1756 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1757 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1758
1759 fmt = SRP_DATA_DESC_INDIRECT;
1760 len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
1761 sizeof(struct srp_indirect_buf);
1762 len += count * sizeof (struct srp_direct_buf);
1763
1764 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1765 count * sizeof (struct srp_direct_buf));
1766
1767 if (!target->global_rkey) {
1768 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1769 idb_len, &idb_rkey);
1770 if (ret < 0)
1771 goto unmap;
1772 req->nmdesc++;
1773 } else {
1774 idb_rkey = cpu_to_be32(target->global_rkey);
1775 }
1776
1777 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1778 indirect_hdr->table_desc.key = idb_rkey;
1779 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1780 indirect_hdr->len = cpu_to_be32(state.total_len);
1781
1782 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1783 cmd->data_out_desc_cnt = count;
1784 else
1785 cmd->data_in_desc_cnt = count;
1786
1787 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1788 DMA_TO_DEVICE);
1789
1790map_complete:
1791 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1792 cmd->buf_fmt = fmt << 4;
1793 else
1794 cmd->buf_fmt = fmt;
1795
1796 return len;
1797
1798unmap:
1799 srp_unmap_data(scmnd, ch, req);
1800 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1801 ret = -E2BIG;
1802 return ret;
1803}
1804
1805
1806
1807
1808static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1809 enum srp_iu_type iu_type)
1810{
1811 unsigned long flags;
1812
1813 spin_lock_irqsave(&ch->lock, flags);
1814 list_add(&iu->list, &ch->free_tx);
1815 if (iu_type != SRP_IU_RSP)
1816 ++ch->req_lim;
1817 spin_unlock_irqrestore(&ch->lock, flags);
1818}
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1834 enum srp_iu_type iu_type)
1835{
1836 struct srp_target_port *target = ch->target;
1837 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1838 struct srp_iu *iu;
1839
1840 lockdep_assert_held(&ch->lock);
1841
1842 ib_process_cq_direct(ch->send_cq, -1);
1843
1844 if (list_empty(&ch->free_tx))
1845 return NULL;
1846
1847
1848 if (iu_type != SRP_IU_RSP) {
1849 if (ch->req_lim <= rsv) {
1850 ++target->zero_req_lim;
1851 return NULL;
1852 }
1853
1854 --ch->req_lim;
1855 }
1856
1857 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1858 list_del(&iu->list);
1859 return iu;
1860}
1861
1862
1863
1864
1865
1866
1867static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1868{
1869 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1870 struct srp_rdma_ch *ch = cq->cq_context;
1871
1872 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1873 srp_handle_qp_err(cq, wc, "SEND");
1874 return;
1875 }
1876
1877 lockdep_assert_held(&ch->lock);
1878
1879 list_add(&iu->list, &ch->free_tx);
1880}
1881
1882
1883
1884
1885
1886
1887
1888static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1889{
1890 struct srp_target_port *target = ch->target;
1891 struct ib_send_wr wr;
1892
1893 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1894 return -EINVAL;
1895
1896 iu->sge[0].addr = iu->dma;
1897 iu->sge[0].length = len;
1898 iu->sge[0].lkey = target->lkey;
1899
1900 iu->cqe.done = srp_send_done;
1901
1902 wr.next = NULL;
1903 wr.wr_cqe = &iu->cqe;
1904 wr.sg_list = &iu->sge[0];
1905 wr.num_sge = iu->num_sge;
1906 wr.opcode = IB_WR_SEND;
1907 wr.send_flags = IB_SEND_SIGNALED;
1908
1909 return ib_post_send(ch->qp, &wr, NULL);
1910}
1911
1912static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1913{
1914 struct srp_target_port *target = ch->target;
1915 struct ib_recv_wr wr;
1916 struct ib_sge list;
1917
1918 list.addr = iu->dma;
1919 list.length = iu->size;
1920 list.lkey = target->lkey;
1921
1922 iu->cqe.done = srp_recv_done;
1923
1924 wr.next = NULL;
1925 wr.wr_cqe = &iu->cqe;
1926 wr.sg_list = &list;
1927 wr.num_sge = 1;
1928
1929 return ib_post_recv(ch->qp, &wr, NULL);
1930}
1931
1932static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1933{
1934 struct srp_target_port *target = ch->target;
1935 struct srp_request *req;
1936 struct scsi_cmnd *scmnd;
1937 unsigned long flags;
1938
1939 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1940 spin_lock_irqsave(&ch->lock, flags);
1941 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1942 if (rsp->tag == ch->tsk_mgmt_tag) {
1943 ch->tsk_mgmt_status = -1;
1944 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1945 ch->tsk_mgmt_status = rsp->data[3];
1946 complete(&ch->tsk_mgmt_done);
1947 } else {
1948 shost_printk(KERN_ERR, target->scsi_host,
1949 "Received tsk mgmt response too late for tag %#llx\n",
1950 rsp->tag);
1951 }
1952 spin_unlock_irqrestore(&ch->lock, flags);
1953 } else {
1954 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1955 if (scmnd) {
1956 req = scsi_cmd_priv(scmnd);
1957 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1958 } else {
1959 shost_printk(KERN_ERR, target->scsi_host,
1960 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1961 rsp->tag, ch - target->ch, ch->qp->qp_num);
1962
1963 spin_lock_irqsave(&ch->lock, flags);
1964 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1965 spin_unlock_irqrestore(&ch->lock, flags);
1966
1967 return;
1968 }
1969 scmnd->result = rsp->status;
1970
1971 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1972 memcpy(scmnd->sense_buffer, rsp->data +
1973 be32_to_cpu(rsp->resp_data_len),
1974 min_t(int, be32_to_cpu(rsp->sense_data_len),
1975 SCSI_SENSE_BUFFERSIZE));
1976 }
1977
1978 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1979 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1980 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1981 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1982 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1983 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1984 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1985 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1986
1987 srp_free_req(ch, req, scmnd,
1988 be32_to_cpu(rsp->req_lim_delta));
1989
1990 scmnd->scsi_done(scmnd);
1991 }
1992}
1993
1994static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1995 void *rsp, int len)
1996{
1997 struct srp_target_port *target = ch->target;
1998 struct ib_device *dev = target->srp_host->srp_dev->dev;
1999 unsigned long flags;
2000 struct srp_iu *iu;
2001 int err;
2002
2003 spin_lock_irqsave(&ch->lock, flags);
2004 ch->req_lim += req_delta;
2005 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2006 spin_unlock_irqrestore(&ch->lock, flags);
2007
2008 if (!iu) {
2009 shost_printk(KERN_ERR, target->scsi_host, PFX
2010 "no IU available to send response\n");
2011 return 1;
2012 }
2013
2014 iu->num_sge = 1;
2015 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2016 memcpy(iu->buf, rsp, len);
2017 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2018
2019 err = srp_post_send(ch, iu, len);
2020 if (err) {
2021 shost_printk(KERN_ERR, target->scsi_host, PFX
2022 "unable to post response: %d\n", err);
2023 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2024 }
2025
2026 return err;
2027}
2028
2029static void srp_process_cred_req(struct srp_rdma_ch *ch,
2030 struct srp_cred_req *req)
2031{
2032 struct srp_cred_rsp rsp = {
2033 .opcode = SRP_CRED_RSP,
2034 .tag = req->tag,
2035 };
2036 s32 delta = be32_to_cpu(req->req_lim_delta);
2037
2038 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2039 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2040 "problems processing SRP_CRED_REQ\n");
2041}
2042
2043static void srp_process_aer_req(struct srp_rdma_ch *ch,
2044 struct srp_aer_req *req)
2045{
2046 struct srp_target_port *target = ch->target;
2047 struct srp_aer_rsp rsp = {
2048 .opcode = SRP_AER_RSP,
2049 .tag = req->tag,
2050 };
2051 s32 delta = be32_to_cpu(req->req_lim_delta);
2052
2053 shost_printk(KERN_ERR, target->scsi_host, PFX
2054 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2055
2056 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2057 shost_printk(KERN_ERR, target->scsi_host, PFX
2058 "problems processing SRP_AER_REQ\n");
2059}
2060
2061static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2062{
2063 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2064 struct srp_rdma_ch *ch = cq->cq_context;
2065 struct srp_target_port *target = ch->target;
2066 struct ib_device *dev = target->srp_host->srp_dev->dev;
2067 int res;
2068 u8 opcode;
2069
2070 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2071 srp_handle_qp_err(cq, wc, "RECV");
2072 return;
2073 }
2074
2075 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2076 DMA_FROM_DEVICE);
2077
2078 opcode = *(u8 *) iu->buf;
2079
2080 if (0) {
2081 shost_printk(KERN_ERR, target->scsi_host,
2082 PFX "recv completion, opcode 0x%02x\n", opcode);
2083 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2084 iu->buf, wc->byte_len, true);
2085 }
2086
2087 switch (opcode) {
2088 case SRP_RSP:
2089 srp_process_rsp(ch, iu->buf);
2090 break;
2091
2092 case SRP_CRED_REQ:
2093 srp_process_cred_req(ch, iu->buf);
2094 break;
2095
2096 case SRP_AER_REQ:
2097 srp_process_aer_req(ch, iu->buf);
2098 break;
2099
2100 case SRP_T_LOGOUT:
2101
2102 shost_printk(KERN_WARNING, target->scsi_host,
2103 PFX "Got target logout request\n");
2104 break;
2105
2106 default:
2107 shost_printk(KERN_WARNING, target->scsi_host,
2108 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2109 break;
2110 }
2111
2112 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2113 DMA_FROM_DEVICE);
2114
2115 res = srp_post_recv(ch, iu);
2116 if (res != 0)
2117 shost_printk(KERN_ERR, target->scsi_host,
2118 PFX "Recv failed with error code %d\n", res);
2119}
2120
2121
2122
2123
2124
2125
2126
2127
2128static void srp_tl_err_work(struct work_struct *work)
2129{
2130 struct srp_target_port *target;
2131
2132 target = container_of(work, struct srp_target_port, tl_err_work);
2133 if (target->rport)
2134 srp_start_tl_fail_timers(target->rport);
2135}
2136
2137static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2138 const char *opname)
2139{
2140 struct srp_rdma_ch *ch = cq->cq_context;
2141 struct srp_target_port *target = ch->target;
2142
2143 if (ch->connected && !target->qp_in_error) {
2144 shost_printk(KERN_ERR, target->scsi_host,
2145 PFX "failed %s status %s (%d) for CQE %p\n",
2146 opname, ib_wc_status_msg(wc->status), wc->status,
2147 wc->wr_cqe);
2148 queue_work(system_long_wq, &target->tl_err_work);
2149 }
2150 target->qp_in_error = true;
2151}
2152
2153static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2154{
2155 struct srp_target_port *target = host_to_target(shost);
2156 struct srp_rdma_ch *ch;
2157 struct srp_request *req = scsi_cmd_priv(scmnd);
2158 struct srp_iu *iu;
2159 struct srp_cmd *cmd;
2160 struct ib_device *dev;
2161 unsigned long flags;
2162 u32 tag;
2163 int len, ret;
2164
2165 scmnd->result = srp_chkready(target->rport);
2166 if (unlikely(scmnd->result))
2167 goto err;
2168
2169 WARN_ON_ONCE(scmnd->request->tag < 0);
2170 tag = blk_mq_unique_tag(scmnd->request);
2171 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2172
2173 spin_lock_irqsave(&ch->lock, flags);
2174 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2175 spin_unlock_irqrestore(&ch->lock, flags);
2176
2177 if (!iu)
2178 goto err;
2179
2180 dev = target->srp_host->srp_dev->dev;
2181 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2182 DMA_TO_DEVICE);
2183
2184 cmd = iu->buf;
2185 memset(cmd, 0, sizeof *cmd);
2186
2187 cmd->opcode = SRP_CMD;
2188 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2189 cmd->tag = tag;
2190 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2191 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
2192 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
2193 4);
2194 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
2195 goto err_iu;
2196 }
2197
2198 req->scmnd = scmnd;
2199 req->cmd = iu;
2200
2201 len = srp_map_data(scmnd, ch, req);
2202 if (len < 0) {
2203 shost_printk(KERN_ERR, target->scsi_host,
2204 PFX "Failed to map data (%d)\n", len);
2205
2206
2207
2208
2209
2210
2211 scmnd->result = len == -ENOMEM ?
2212 DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
2213 goto err_iu;
2214 }
2215
2216 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2217 DMA_TO_DEVICE);
2218
2219 if (srp_post_send(ch, iu, len)) {
2220 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2221 scmnd->result = DID_ERROR << 16;
2222 goto err_unmap;
2223 }
2224
2225 return 0;
2226
2227err_unmap:
2228 srp_unmap_data(scmnd, ch, req);
2229
2230err_iu:
2231 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2232
2233
2234
2235
2236
2237 req->scmnd = NULL;
2238
2239err:
2240 if (scmnd->result) {
2241 scmnd->scsi_done(scmnd);
2242 ret = 0;
2243 } else {
2244 ret = SCSI_MLQUEUE_HOST_BUSY;
2245 }
2246
2247 return ret;
2248}
2249
2250
2251
2252
2253
2254static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2255{
2256 struct srp_target_port *target = ch->target;
2257 int i;
2258
2259 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2260 GFP_KERNEL);
2261 if (!ch->rx_ring)
2262 goto err_no_ring;
2263 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2264 GFP_KERNEL);
2265 if (!ch->tx_ring)
2266 goto err_no_ring;
2267
2268 for (i = 0; i < target->queue_size; ++i) {
2269 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2270 ch->max_ti_iu_len,
2271 GFP_KERNEL, DMA_FROM_DEVICE);
2272 if (!ch->rx_ring[i])
2273 goto err;
2274 }
2275
2276 for (i = 0; i < target->queue_size; ++i) {
2277 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2278 ch->max_it_iu_len,
2279 GFP_KERNEL, DMA_TO_DEVICE);
2280 if (!ch->tx_ring[i])
2281 goto err;
2282
2283 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2284 }
2285
2286 return 0;
2287
2288err:
2289 for (i = 0; i < target->queue_size; ++i) {
2290 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2291 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2292 }
2293
2294
2295err_no_ring:
2296 kfree(ch->tx_ring);
2297 ch->tx_ring = NULL;
2298 kfree(ch->rx_ring);
2299 ch->rx_ring = NULL;
2300
2301 return -ENOMEM;
2302}
2303
2304static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2305{
2306 uint64_t T_tr_ns, max_compl_time_ms;
2307 uint32_t rq_tmo_jiffies;
2308
2309
2310
2311
2312
2313
2314 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2315 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2316
2317
2318
2319
2320
2321
2322
2323 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2324 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2325 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2326 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2327
2328 return rq_tmo_jiffies;
2329}
2330
2331static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2332 const struct srp_login_rsp *lrsp,
2333 struct srp_rdma_ch *ch)
2334{
2335 struct srp_target_port *target = ch->target;
2336 struct ib_qp_attr *qp_attr = NULL;
2337 int attr_mask = 0;
2338 int ret = 0;
2339 int i;
2340
2341 if (lrsp->opcode == SRP_LOGIN_RSP) {
2342 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2343 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2344 ch->use_imm_data = srp_use_imm_data &&
2345 (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
2346 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
2347 ch->use_imm_data,
2348 target->max_it_iu_size);
2349 WARN_ON_ONCE(ch->max_it_iu_len >
2350 be32_to_cpu(lrsp->max_it_iu_len));
2351
2352 if (ch->use_imm_data)
2353 shost_printk(KERN_DEBUG, target->scsi_host,
2354 PFX "using immediate data\n");
2355
2356
2357
2358
2359
2360 target->scsi_host->can_queue
2361 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2362 target->scsi_host->can_queue);
2363 target->scsi_host->cmd_per_lun
2364 = min_t(int, target->scsi_host->can_queue,
2365 target->scsi_host->cmd_per_lun);
2366 } else {
2367 shost_printk(KERN_WARNING, target->scsi_host,
2368 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2369 ret = -ECONNRESET;
2370 goto error;
2371 }
2372
2373 if (!ch->rx_ring) {
2374 ret = srp_alloc_iu_bufs(ch);
2375 if (ret)
2376 goto error;
2377 }
2378
2379 for (i = 0; i < target->queue_size; i++) {
2380 struct srp_iu *iu = ch->rx_ring[i];
2381
2382 ret = srp_post_recv(ch, iu);
2383 if (ret)
2384 goto error;
2385 }
2386
2387 if (!target->using_rdma_cm) {
2388 ret = -ENOMEM;
2389 qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
2390 if (!qp_attr)
2391 goto error;
2392
2393 qp_attr->qp_state = IB_QPS_RTR;
2394 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2395 if (ret)
2396 goto error_free;
2397
2398 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2399 if (ret)
2400 goto error_free;
2401
2402 qp_attr->qp_state = IB_QPS_RTS;
2403 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2404 if (ret)
2405 goto error_free;
2406
2407 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2408
2409 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2410 if (ret)
2411 goto error_free;
2412
2413 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2414 }
2415
2416error_free:
2417 kfree(qp_attr);
2418
2419error:
2420 ch->status = ret;
2421}
2422
2423static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
2424 const struct ib_cm_event *event,
2425 struct srp_rdma_ch *ch)
2426{
2427 struct srp_target_port *target = ch->target;
2428 struct Scsi_Host *shost = target->scsi_host;
2429 struct ib_class_port_info *cpi;
2430 int opcode;
2431 u16 dlid;
2432
2433 switch (event->param.rej_rcvd.reason) {
2434 case IB_CM_REJ_PORT_CM_REDIRECT:
2435 cpi = event->param.rej_rcvd.ari;
2436 dlid = be16_to_cpu(cpi->redirect_lid);
2437 sa_path_set_dlid(&ch->ib_cm.path, dlid);
2438 ch->ib_cm.path.pkey = cpi->redirect_pkey;
2439 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2440 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
2441
2442 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2443 break;
2444
2445 case IB_CM_REJ_PORT_REDIRECT:
2446 if (srp_target_is_topspin(target)) {
2447 union ib_gid *dgid = &ch->ib_cm.path.dgid;
2448
2449
2450
2451
2452
2453
2454 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
2455
2456 shost_printk(KERN_DEBUG, shost,
2457 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2458 be64_to_cpu(dgid->global.subnet_prefix),
2459 be64_to_cpu(dgid->global.interface_id));
2460
2461 ch->status = SRP_PORT_REDIRECT;
2462 } else {
2463 shost_printk(KERN_WARNING, shost,
2464 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2465 ch->status = -ECONNRESET;
2466 }
2467 break;
2468
2469 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2470 shost_printk(KERN_WARNING, shost,
2471 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2472 ch->status = -ECONNRESET;
2473 break;
2474
2475 case IB_CM_REJ_CONSUMER_DEFINED:
2476 opcode = *(u8 *) event->private_data;
2477 if (opcode == SRP_LOGIN_REJ) {
2478 struct srp_login_rej *rej = event->private_data;
2479 u32 reason = be32_to_cpu(rej->reason);
2480
2481 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2482 shost_printk(KERN_WARNING, shost,
2483 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2484 else
2485 shost_printk(KERN_WARNING, shost, PFX
2486 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2487 target->sgid.raw,
2488 target->ib_cm.orig_dgid.raw,
2489 reason);
2490 } else
2491 shost_printk(KERN_WARNING, shost,
2492 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2493 " opcode 0x%02x\n", opcode);
2494 ch->status = -ECONNRESET;
2495 break;
2496
2497 case IB_CM_REJ_STALE_CONN:
2498 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2499 ch->status = SRP_STALE_CONN;
2500 break;
2501
2502 default:
2503 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2504 event->param.rej_rcvd.reason);
2505 ch->status = -ECONNRESET;
2506 }
2507}
2508
2509static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
2510 const struct ib_cm_event *event)
2511{
2512 struct srp_rdma_ch *ch = cm_id->context;
2513 struct srp_target_port *target = ch->target;
2514 int comp = 0;
2515
2516 switch (event->event) {
2517 case IB_CM_REQ_ERROR:
2518 shost_printk(KERN_DEBUG, target->scsi_host,
2519 PFX "Sending CM REQ failed\n");
2520 comp = 1;
2521 ch->status = -ECONNRESET;
2522 break;
2523
2524 case IB_CM_REP_RECEIVED:
2525 comp = 1;
2526 srp_cm_rep_handler(cm_id, event->private_data, ch);
2527 break;
2528
2529 case IB_CM_REJ_RECEIVED:
2530 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2531 comp = 1;
2532
2533 srp_ib_cm_rej_handler(cm_id, event, ch);
2534 break;
2535
2536 case IB_CM_DREQ_RECEIVED:
2537 shost_printk(KERN_WARNING, target->scsi_host,
2538 PFX "DREQ received - connection closed\n");
2539 ch->connected = false;
2540 if (ib_send_cm_drep(cm_id, NULL, 0))
2541 shost_printk(KERN_ERR, target->scsi_host,
2542 PFX "Sending CM DREP failed\n");
2543 queue_work(system_long_wq, &target->tl_err_work);
2544 break;
2545
2546 case IB_CM_TIMEWAIT_EXIT:
2547 shost_printk(KERN_ERR, target->scsi_host,
2548 PFX "connection closed\n");
2549 comp = 1;
2550
2551 ch->status = 0;
2552 break;
2553
2554 case IB_CM_MRA_RECEIVED:
2555 case IB_CM_DREQ_ERROR:
2556 case IB_CM_DREP_RECEIVED:
2557 break;
2558
2559 default:
2560 shost_printk(KERN_WARNING, target->scsi_host,
2561 PFX "Unhandled CM event %d\n", event->event);
2562 break;
2563 }
2564
2565 if (comp)
2566 complete(&ch->done);
2567
2568 return 0;
2569}
2570
2571static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
2572 struct rdma_cm_event *event)
2573{
2574 struct srp_target_port *target = ch->target;
2575 struct Scsi_Host *shost = target->scsi_host;
2576 int opcode;
2577
2578 switch (event->status) {
2579 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2580 shost_printk(KERN_WARNING, shost,
2581 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2582 ch->status = -ECONNRESET;
2583 break;
2584
2585 case IB_CM_REJ_CONSUMER_DEFINED:
2586 opcode = *(u8 *) event->param.conn.private_data;
2587 if (opcode == SRP_LOGIN_REJ) {
2588 struct srp_login_rej *rej =
2589 (struct srp_login_rej *)
2590 event->param.conn.private_data;
2591 u32 reason = be32_to_cpu(rej->reason);
2592
2593 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2594 shost_printk(KERN_WARNING, shost,
2595 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2596 else
2597 shost_printk(KERN_WARNING, shost,
2598 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
2599 } else {
2600 shost_printk(KERN_WARNING, shost,
2601 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
2602 opcode);
2603 }
2604 ch->status = -ECONNRESET;
2605 break;
2606
2607 case IB_CM_REJ_STALE_CONN:
2608 shost_printk(KERN_WARNING, shost,
2609 " REJ reason: stale connection\n");
2610 ch->status = SRP_STALE_CONN;
2611 break;
2612
2613 default:
2614 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2615 event->status);
2616 ch->status = -ECONNRESET;
2617 break;
2618 }
2619}
2620
2621static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
2622 struct rdma_cm_event *event)
2623{
2624 struct srp_rdma_ch *ch = cm_id->context;
2625 struct srp_target_port *target = ch->target;
2626 int comp = 0;
2627
2628 switch (event->event) {
2629 case RDMA_CM_EVENT_ADDR_RESOLVED:
2630 ch->status = 0;
2631 comp = 1;
2632 break;
2633
2634 case RDMA_CM_EVENT_ADDR_ERROR:
2635 ch->status = -ENXIO;
2636 comp = 1;
2637 break;
2638
2639 case RDMA_CM_EVENT_ROUTE_RESOLVED:
2640 ch->status = 0;
2641 comp = 1;
2642 break;
2643
2644 case RDMA_CM_EVENT_ROUTE_ERROR:
2645 case RDMA_CM_EVENT_UNREACHABLE:
2646 ch->status = -EHOSTUNREACH;
2647 comp = 1;
2648 break;
2649
2650 case RDMA_CM_EVENT_CONNECT_ERROR:
2651 shost_printk(KERN_DEBUG, target->scsi_host,
2652 PFX "Sending CM REQ failed\n");
2653 comp = 1;
2654 ch->status = -ECONNRESET;
2655 break;
2656
2657 case RDMA_CM_EVENT_ESTABLISHED:
2658 comp = 1;
2659 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
2660 break;
2661
2662 case RDMA_CM_EVENT_REJECTED:
2663 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2664 comp = 1;
2665
2666 srp_rdma_cm_rej_handler(ch, event);
2667 break;
2668
2669 case RDMA_CM_EVENT_DISCONNECTED:
2670 if (ch->connected) {
2671 shost_printk(KERN_WARNING, target->scsi_host,
2672 PFX "received DREQ\n");
2673 rdma_disconnect(ch->rdma_cm.cm_id);
2674 comp = 1;
2675 ch->status = 0;
2676 queue_work(system_long_wq, &target->tl_err_work);
2677 }
2678 break;
2679
2680 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2681 shost_printk(KERN_ERR, target->scsi_host,
2682 PFX "connection closed\n");
2683
2684 comp = 1;
2685 ch->status = 0;
2686 break;
2687
2688 default:
2689 shost_printk(KERN_WARNING, target->scsi_host,
2690 PFX "Unhandled CM event %d\n", event->event);
2691 break;
2692 }
2693
2694 if (comp)
2695 complete(&ch->done);
2696
2697 return 0;
2698}
2699
2700
2701
2702
2703
2704
2705
2706
2707static int
2708srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2709{
2710 if (!sdev->tagged_supported)
2711 qdepth = 1;
2712 return scsi_change_queue_depth(sdev, qdepth);
2713}
2714
2715static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2716 u8 func, u8 *status)
2717{
2718 struct srp_target_port *target = ch->target;
2719 struct srp_rport *rport = target->rport;
2720 struct ib_device *dev = target->srp_host->srp_dev->dev;
2721 struct srp_iu *iu;
2722 struct srp_tsk_mgmt *tsk_mgmt;
2723 int res;
2724
2725 if (!ch->connected || target->qp_in_error)
2726 return -1;
2727
2728
2729
2730
2731
2732 mutex_lock(&rport->mutex);
2733 spin_lock_irq(&ch->lock);
2734 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2735 spin_unlock_irq(&ch->lock);
2736
2737 if (!iu) {
2738 mutex_unlock(&rport->mutex);
2739
2740 return -1;
2741 }
2742
2743 iu->num_sge = 1;
2744
2745 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2746 DMA_TO_DEVICE);
2747 tsk_mgmt = iu->buf;
2748 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2749
2750 tsk_mgmt->opcode = SRP_TSK_MGMT;
2751 int_to_scsilun(lun, &tsk_mgmt->lun);
2752 tsk_mgmt->tsk_mgmt_func = func;
2753 tsk_mgmt->task_tag = req_tag;
2754
2755 spin_lock_irq(&ch->lock);
2756 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2757 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2758 spin_unlock_irq(&ch->lock);
2759
2760 init_completion(&ch->tsk_mgmt_done);
2761
2762 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2763 DMA_TO_DEVICE);
2764 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2765 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2766 mutex_unlock(&rport->mutex);
2767
2768 return -1;
2769 }
2770 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2771 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2772 if (res > 0 && status)
2773 *status = ch->tsk_mgmt_status;
2774 mutex_unlock(&rport->mutex);
2775
2776 WARN_ON_ONCE(res < 0);
2777
2778 return res > 0 ? 0 : -1;
2779}
2780
2781static int srp_abort(struct scsi_cmnd *scmnd)
2782{
2783 struct srp_target_port *target = host_to_target(scmnd->device->host);
2784 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2785 u32 tag;
2786 u16 ch_idx;
2787 struct srp_rdma_ch *ch;
2788 int ret;
2789
2790 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2791
2792 if (!req)
2793 return SUCCESS;
2794 tag = blk_mq_unique_tag(scmnd->request);
2795 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2796 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2797 return SUCCESS;
2798 ch = &target->ch[ch_idx];
2799 if (!srp_claim_req(ch, req, NULL, scmnd))
2800 return SUCCESS;
2801 shost_printk(KERN_ERR, target->scsi_host,
2802 "Sending SRP abort for tag %#x\n", tag);
2803 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2804 SRP_TSK_ABORT_TASK, NULL) == 0)
2805 ret = SUCCESS;
2806 else if (target->rport->state == SRP_RPORT_LOST)
2807 ret = FAST_IO_FAIL;
2808 else
2809 ret = FAILED;
2810 if (ret == SUCCESS) {
2811 srp_free_req(ch, req, scmnd, 0);
2812 scmnd->result = DID_ABORT << 16;
2813 scmnd->scsi_done(scmnd);
2814 }
2815
2816 return ret;
2817}
2818
2819static int srp_reset_device(struct scsi_cmnd *scmnd)
2820{
2821 struct srp_target_port *target = host_to_target(scmnd->device->host);
2822 struct srp_rdma_ch *ch;
2823 u8 status;
2824
2825 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2826
2827 ch = &target->ch[0];
2828 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2829 SRP_TSK_LUN_RESET, &status))
2830 return FAILED;
2831 if (status)
2832 return FAILED;
2833
2834 return SUCCESS;
2835}
2836
2837static int srp_reset_host(struct scsi_cmnd *scmnd)
2838{
2839 struct srp_target_port *target = host_to_target(scmnd->device->host);
2840
2841 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2842
2843 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2844}
2845
2846static int srp_target_alloc(struct scsi_target *starget)
2847{
2848 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2849 struct srp_target_port *target = host_to_target(shost);
2850
2851 if (target->target_can_queue)
2852 starget->can_queue = target->target_can_queue;
2853 return 0;
2854}
2855
2856static int srp_slave_configure(struct scsi_device *sdev)
2857{
2858 struct Scsi_Host *shost = sdev->host;
2859 struct srp_target_port *target = host_to_target(shost);
2860 struct request_queue *q = sdev->request_queue;
2861 unsigned long timeout;
2862
2863 if (sdev->type == TYPE_DISK) {
2864 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2865 blk_queue_rq_timeout(q, timeout);
2866 }
2867
2868 return 0;
2869}
2870
2871static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
2872 char *buf)
2873{
2874 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2875
2876 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2877}
2878
2879static DEVICE_ATTR_RO(id_ext);
2880
2881static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
2882 char *buf)
2883{
2884 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2885
2886 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2887}
2888
2889static DEVICE_ATTR_RO(ioc_guid);
2890
2891static ssize_t service_id_show(struct device *dev,
2892 struct device_attribute *attr, char *buf)
2893{
2894 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2895
2896 if (target->using_rdma_cm)
2897 return -ENOENT;
2898 return sysfs_emit(buf, "0x%016llx\n",
2899 be64_to_cpu(target->ib_cm.service_id));
2900}
2901
2902static DEVICE_ATTR_RO(service_id);
2903
2904static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
2905 char *buf)
2906{
2907 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2908
2909 if (target->using_rdma_cm)
2910 return -ENOENT;
2911
2912 return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
2913}
2914
2915static DEVICE_ATTR_RO(pkey);
2916
2917static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
2918 char *buf)
2919{
2920 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2921
2922 return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
2923}
2924
2925static DEVICE_ATTR_RO(sgid);
2926
2927static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
2928 char *buf)
2929{
2930 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2931 struct srp_rdma_ch *ch = &target->ch[0];
2932
2933 if (target->using_rdma_cm)
2934 return -ENOENT;
2935
2936 return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
2937}
2938
2939static DEVICE_ATTR_RO(dgid);
2940
2941static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
2942 char *buf)
2943{
2944 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2945
2946 if (target->using_rdma_cm)
2947 return -ENOENT;
2948
2949 return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
2950}
2951
2952static DEVICE_ATTR_RO(orig_dgid);
2953
2954static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
2955 char *buf)
2956{
2957 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2958 struct srp_rdma_ch *ch;
2959 int i, req_lim = INT_MAX;
2960
2961 for (i = 0; i < target->ch_count; i++) {
2962 ch = &target->ch[i];
2963 req_lim = min(req_lim, ch->req_lim);
2964 }
2965
2966 return sysfs_emit(buf, "%d\n", req_lim);
2967}
2968
2969static DEVICE_ATTR_RO(req_lim);
2970
2971static ssize_t zero_req_lim_show(struct device *dev,
2972 struct device_attribute *attr, char *buf)
2973{
2974 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2975
2976 return sysfs_emit(buf, "%d\n", target->zero_req_lim);
2977}
2978
2979static DEVICE_ATTR_RO(zero_req_lim);
2980
2981static ssize_t local_ib_port_show(struct device *dev,
2982 struct device_attribute *attr, char *buf)
2983{
2984 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2985
2986 return sysfs_emit(buf, "%d\n", target->srp_host->port);
2987}
2988
2989static DEVICE_ATTR_RO(local_ib_port);
2990
2991static ssize_t local_ib_device_show(struct device *dev,
2992 struct device_attribute *attr, char *buf)
2993{
2994 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2995
2996 return sysfs_emit(buf, "%s\n",
2997 dev_name(&target->srp_host->srp_dev->dev->dev));
2998}
2999
3000static DEVICE_ATTR_RO(local_ib_device);
3001
3002static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
3003 char *buf)
3004{
3005 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3006
3007 return sysfs_emit(buf, "%d\n", target->ch_count);
3008}
3009
3010static DEVICE_ATTR_RO(ch_count);
3011
3012static ssize_t comp_vector_show(struct device *dev,
3013 struct device_attribute *attr, char *buf)
3014{
3015 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3016
3017 return sysfs_emit(buf, "%d\n", target->comp_vector);
3018}
3019
3020static DEVICE_ATTR_RO(comp_vector);
3021
3022static ssize_t tl_retry_count_show(struct device *dev,
3023 struct device_attribute *attr, char *buf)
3024{
3025 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3026
3027 return sysfs_emit(buf, "%d\n", target->tl_retry_count);
3028}
3029
3030static DEVICE_ATTR_RO(tl_retry_count);
3031
3032static ssize_t cmd_sg_entries_show(struct device *dev,
3033 struct device_attribute *attr, char *buf)
3034{
3035 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3036
3037 return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
3038}
3039
3040static DEVICE_ATTR_RO(cmd_sg_entries);
3041
3042static ssize_t allow_ext_sg_show(struct device *dev,
3043 struct device_attribute *attr, char *buf)
3044{
3045 struct srp_target_port *target = host_to_target(class_to_shost(dev));
3046
3047 return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
3048}
3049
3050static DEVICE_ATTR_RO(allow_ext_sg);
3051
3052static struct device_attribute *srp_host_attrs[] = {
3053 &dev_attr_id_ext,
3054 &dev_attr_ioc_guid,
3055 &dev_attr_service_id,
3056 &dev_attr_pkey,
3057 &dev_attr_sgid,
3058 &dev_attr_dgid,
3059 &dev_attr_orig_dgid,
3060 &dev_attr_req_lim,
3061 &dev_attr_zero_req_lim,
3062 &dev_attr_local_ib_port,
3063 &dev_attr_local_ib_device,
3064 &dev_attr_ch_count,
3065 &dev_attr_comp_vector,
3066 &dev_attr_tl_retry_count,
3067 &dev_attr_cmd_sg_entries,
3068 &dev_attr_allow_ext_sg,
3069 NULL
3070};
3071
3072static struct scsi_host_template srp_template = {
3073 .module = THIS_MODULE,
3074 .name = "InfiniBand SRP initiator",
3075 .proc_name = DRV_NAME,
3076 .target_alloc = srp_target_alloc,
3077 .slave_configure = srp_slave_configure,
3078 .info = srp_target_info,
3079 .init_cmd_priv = srp_init_cmd_priv,
3080 .exit_cmd_priv = srp_exit_cmd_priv,
3081 .queuecommand = srp_queuecommand,
3082 .change_queue_depth = srp_change_queue_depth,
3083 .eh_timed_out = srp_timed_out,
3084 .eh_abort_handler = srp_abort,
3085 .eh_device_reset_handler = srp_reset_device,
3086 .eh_host_reset_handler = srp_reset_host,
3087 .skip_settle_delay = true,
3088 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
3089 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
3090 .this_id = -1,
3091 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
3092 .shost_attrs = srp_host_attrs,
3093 .track_queue_depth = 1,
3094 .cmd_size = sizeof(struct srp_request),
3095};
3096
3097static int srp_sdev_count(struct Scsi_Host *host)
3098{
3099 struct scsi_device *sdev;
3100 int c = 0;
3101
3102 shost_for_each_device(sdev, host)
3103 c++;
3104
3105 return c;
3106}
3107
3108
3109
3110
3111
3112
3113
3114
3115static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
3116{
3117 struct srp_rport_identifiers ids;
3118 struct srp_rport *rport;
3119
3120 target->state = SRP_TARGET_SCANNING;
3121 sprintf(target->target_name, "SRP.T10:%016llX",
3122 be64_to_cpu(target->id_ext));
3123
3124 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3125 return -ENODEV;
3126
3127 memcpy(ids.port_id, &target->id_ext, 8);
3128 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
3129 ids.roles = SRP_RPORT_ROLE_TARGET;
3130 rport = srp_rport_add(target->scsi_host, &ids);
3131 if (IS_ERR(rport)) {
3132 scsi_remove_host(target->scsi_host);
3133 return PTR_ERR(rport);
3134 }
3135
3136 rport->lld_data = target;
3137 target->rport = rport;
3138
3139 spin_lock(&host->target_lock);
3140 list_add_tail(&target->list, &host->target_list);
3141 spin_unlock(&host->target_lock);
3142
3143 scsi_scan_target(&target->scsi_host->shost_gendev,
3144 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
3145
3146 if (srp_connected_ch(target) < target->ch_count ||
3147 target->qp_in_error) {
3148 shost_printk(KERN_INFO, target->scsi_host,
3149 PFX "SCSI scan failed - removing SCSI host\n");
3150 srp_queue_remove_work(target);
3151 goto out;
3152 }
3153
3154 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
3155 dev_name(&target->scsi_host->shost_gendev),
3156 srp_sdev_count(target->scsi_host));
3157
3158 spin_lock_irq(&target->lock);
3159 if (target->state == SRP_TARGET_SCANNING)
3160 target->state = SRP_TARGET_LIVE;
3161 spin_unlock_irq(&target->lock);
3162
3163out:
3164 return 0;
3165}
3166
3167static void srp_release_dev(struct device *dev)
3168{
3169 struct srp_host *host =
3170 container_of(dev, struct srp_host, dev);
3171
3172 complete(&host->released);
3173}
3174
3175static struct class srp_class = {
3176 .name = "infiniband_srp",
3177 .dev_release = srp_release_dev
3178};
3179
3180
3181
3182
3183
3184
3185static bool srp_conn_unique(struct srp_host *host,
3186 struct srp_target_port *target)
3187{
3188 struct srp_target_port *t;
3189 bool ret = false;
3190
3191 if (target->state == SRP_TARGET_REMOVED)
3192 goto out;
3193
3194 ret = true;
3195
3196 spin_lock(&host->target_lock);
3197 list_for_each_entry(t, &host->target_list, list) {
3198 if (t != target &&
3199 target->id_ext == t->id_ext &&
3200 target->ioc_guid == t->ioc_guid &&
3201 target->initiator_ext == t->initiator_ext) {
3202 ret = false;
3203 break;
3204 }
3205 }
3206 spin_unlock(&host->target_lock);
3207
3208out:
3209 return ret;
3210}
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223enum {
3224 SRP_OPT_ERR = 0,
3225 SRP_OPT_ID_EXT = 1 << 0,
3226 SRP_OPT_IOC_GUID = 1 << 1,
3227 SRP_OPT_DGID = 1 << 2,
3228 SRP_OPT_PKEY = 1 << 3,
3229 SRP_OPT_SERVICE_ID = 1 << 4,
3230 SRP_OPT_MAX_SECT = 1 << 5,
3231 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
3232 SRP_OPT_IO_CLASS = 1 << 7,
3233 SRP_OPT_INITIATOR_EXT = 1 << 8,
3234 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
3235 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3236 SRP_OPT_SG_TABLESIZE = 1 << 11,
3237 SRP_OPT_COMP_VECTOR = 1 << 12,
3238 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
3239 SRP_OPT_QUEUE_SIZE = 1 << 14,
3240 SRP_OPT_IP_SRC = 1 << 15,
3241 SRP_OPT_IP_DEST = 1 << 16,
3242 SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
3243 SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
3244 SRP_OPT_CH_COUNT = 1 << 19,
3245};
3246
3247static unsigned int srp_opt_mandatory[] = {
3248 SRP_OPT_ID_EXT |
3249 SRP_OPT_IOC_GUID |
3250 SRP_OPT_DGID |
3251 SRP_OPT_PKEY |
3252 SRP_OPT_SERVICE_ID,
3253 SRP_OPT_ID_EXT |
3254 SRP_OPT_IOC_GUID |
3255 SRP_OPT_IP_DEST,
3256};
3257
3258static const match_table_t srp_opt_tokens = {
3259 { SRP_OPT_ID_EXT, "id_ext=%s" },
3260 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3261 { SRP_OPT_DGID, "dgid=%s" },
3262 { SRP_OPT_PKEY, "pkey=%x" },
3263 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3264 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3265 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
3266 { SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
3267 { SRP_OPT_IO_CLASS, "io_class=%x" },
3268 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
3269 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
3270 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3271 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
3272 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
3273 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
3274 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
3275 { SRP_OPT_IP_SRC, "src=%s" },
3276 { SRP_OPT_IP_DEST, "dest=%s" },
3277 { SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
3278 { SRP_OPT_CH_COUNT, "ch_count=%u", },
3279 { SRP_OPT_ERR, NULL }
3280};
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3294 const char *addr_port_str, bool *has_port)
3295{
3296 char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3297 char *port_str;
3298 int ret;
3299
3300 if (!addr)
3301 return -ENOMEM;
3302 port_str = strrchr(addr, ':');
3303 if (port_str && strchr(port_str, ']'))
3304 port_str = NULL;
3305 if (port_str)
3306 *port_str++ = '\0';
3307 if (has_port)
3308 *has_port = port_str != NULL;
3309 ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3310 if (ret && addr[0]) {
3311 addr_end = addr + strlen(addr) - 1;
3312 if (addr[0] == '[' && *addr_end == ']') {
3313 *addr_end = '\0';
3314 ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
3315 port_str, sa);
3316 }
3317 }
3318 kfree(addr);
3319 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
3320 return ret;
3321}
3322
3323static int srp_parse_options(struct net *net, const char *buf,
3324 struct srp_target_port *target)
3325{
3326 char *options, *sep_opt;
3327 char *p;
3328 substring_t args[MAX_OPT_ARGS];
3329 unsigned long long ull;
3330 bool has_port;
3331 int opt_mask = 0;
3332 int token;
3333 int ret = -EINVAL;
3334 int i;
3335
3336 options = kstrdup(buf, GFP_KERNEL);
3337 if (!options)
3338 return -ENOMEM;
3339
3340 sep_opt = options;
3341 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3342 if (!*p)
3343 continue;
3344
3345 token = match_token(p, srp_opt_tokens, args);
3346 opt_mask |= token;
3347
3348 switch (token) {
3349 case SRP_OPT_ID_EXT:
3350 p = match_strdup(args);
3351 if (!p) {
3352 ret = -ENOMEM;
3353 goto out;
3354 }
3355 ret = kstrtoull(p, 16, &ull);
3356 if (ret) {
3357 pr_warn("invalid id_ext parameter '%s'\n", p);
3358 kfree(p);
3359 goto out;
3360 }
3361 target->id_ext = cpu_to_be64(ull);
3362 kfree(p);
3363 break;
3364
3365 case SRP_OPT_IOC_GUID:
3366 p = match_strdup(args);
3367 if (!p) {
3368 ret = -ENOMEM;
3369 goto out;
3370 }
3371 ret = kstrtoull(p, 16, &ull);
3372 if (ret) {
3373 pr_warn("invalid ioc_guid parameter '%s'\n", p);
3374 kfree(p);
3375 goto out;
3376 }
3377 target->ioc_guid = cpu_to_be64(ull);
3378 kfree(p);
3379 break;
3380
3381 case SRP_OPT_DGID:
3382 p = match_strdup(args);
3383 if (!p) {
3384 ret = -ENOMEM;
3385 goto out;
3386 }
3387 if (strlen(p) != 32) {
3388 pr_warn("bad dest GID parameter '%s'\n", p);
3389 kfree(p);
3390 goto out;
3391 }
3392
3393 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
3394 kfree(p);
3395 if (ret < 0)
3396 goto out;
3397 break;
3398
3399 case SRP_OPT_PKEY:
3400 if (match_hex(args, &token)) {
3401 pr_warn("bad P_Key parameter '%s'\n", p);
3402 goto out;
3403 }
3404 target->ib_cm.pkey = cpu_to_be16(token);
3405 break;
3406
3407 case SRP_OPT_SERVICE_ID:
3408 p = match_strdup(args);
3409 if (!p) {
3410 ret = -ENOMEM;
3411 goto out;
3412 }
3413 ret = kstrtoull(p, 16, &ull);
3414 if (ret) {
3415 pr_warn("bad service_id parameter '%s'\n", p);
3416 kfree(p);
3417 goto out;
3418 }
3419 target->ib_cm.service_id = cpu_to_be64(ull);
3420 kfree(p);
3421 break;
3422
3423 case SRP_OPT_IP_SRC:
3424 p = match_strdup(args);
3425 if (!p) {
3426 ret = -ENOMEM;
3427 goto out;
3428 }
3429 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3430 NULL);
3431 if (ret < 0) {
3432 pr_warn("bad source parameter '%s'\n", p);
3433 kfree(p);
3434 goto out;
3435 }
3436 target->rdma_cm.src_specified = true;
3437 kfree(p);
3438 break;
3439
3440 case SRP_OPT_IP_DEST:
3441 p = match_strdup(args);
3442 if (!p) {
3443 ret = -ENOMEM;
3444 goto out;
3445 }
3446 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3447 &has_port);
3448 if (!has_port)
3449 ret = -EINVAL;
3450 if (ret < 0) {
3451 pr_warn("bad dest parameter '%s'\n", p);
3452 kfree(p);
3453 goto out;
3454 }
3455 target->using_rdma_cm = true;
3456 kfree(p);
3457 break;
3458
3459 case SRP_OPT_MAX_SECT:
3460 if (match_int(args, &token)) {
3461 pr_warn("bad max sect parameter '%s'\n", p);
3462 goto out;
3463 }
3464 target->scsi_host->max_sectors = token;
3465 break;
3466
3467 case SRP_OPT_QUEUE_SIZE:
3468 if (match_int(args, &token) || token < 1) {
3469 pr_warn("bad queue_size parameter '%s'\n", p);
3470 goto out;
3471 }
3472 target->scsi_host->can_queue = token;
3473 target->queue_size = token + SRP_RSP_SQ_SIZE +
3474 SRP_TSK_MGMT_SQ_SIZE;
3475 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3476 target->scsi_host->cmd_per_lun = token;
3477 break;
3478
3479 case SRP_OPT_MAX_CMD_PER_LUN:
3480 if (match_int(args, &token) || token < 1) {
3481 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3482 p);
3483 goto out;
3484 }
3485 target->scsi_host->cmd_per_lun = token;
3486 break;
3487
3488 case SRP_OPT_TARGET_CAN_QUEUE:
3489 if (match_int(args, &token) || token < 1) {
3490 pr_warn("bad max target_can_queue parameter '%s'\n",
3491 p);
3492 goto out;
3493 }
3494 target->target_can_queue = token;
3495 break;
3496
3497 case SRP_OPT_IO_CLASS:
3498 if (match_hex(args, &token)) {
3499 pr_warn("bad IO class parameter '%s'\n", p);
3500 goto out;
3501 }
3502 if (token != SRP_REV10_IB_IO_CLASS &&
3503 token != SRP_REV16A_IB_IO_CLASS) {
3504 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3505 token, SRP_REV10_IB_IO_CLASS,
3506 SRP_REV16A_IB_IO_CLASS);
3507 goto out;
3508 }
3509 target->io_class = token;
3510 break;
3511
3512 case SRP_OPT_INITIATOR_EXT:
3513 p = match_strdup(args);
3514 if (!p) {
3515 ret = -ENOMEM;
3516 goto out;
3517 }
3518 ret = kstrtoull(p, 16, &ull);
3519 if (ret) {
3520 pr_warn("bad initiator_ext value '%s'\n", p);
3521 kfree(p);
3522 goto out;
3523 }
3524 target->initiator_ext = cpu_to_be64(ull);
3525 kfree(p);
3526 break;
3527
3528 case SRP_OPT_CMD_SG_ENTRIES:
3529 if (match_int(args, &token) || token < 1 || token > 255) {
3530 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3531 p);
3532 goto out;
3533 }
3534 target->cmd_sg_cnt = token;
3535 break;
3536
3537 case SRP_OPT_ALLOW_EXT_SG:
3538 if (match_int(args, &token)) {
3539 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3540 goto out;
3541 }
3542 target->allow_ext_sg = !!token;
3543 break;
3544
3545 case SRP_OPT_SG_TABLESIZE:
3546 if (match_int(args, &token) || token < 1 ||
3547 token > SG_MAX_SEGMENTS) {
3548 pr_warn("bad max sg_tablesize parameter '%s'\n",
3549 p);
3550 goto out;
3551 }
3552 target->sg_tablesize = token;
3553 break;
3554
3555 case SRP_OPT_COMP_VECTOR:
3556 if (match_int(args, &token) || token < 0) {
3557 pr_warn("bad comp_vector parameter '%s'\n", p);
3558 goto out;
3559 }
3560 target->comp_vector = token;
3561 break;
3562
3563 case SRP_OPT_TL_RETRY_COUNT:
3564 if (match_int(args, &token) || token < 2 || token > 7) {
3565 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3566 p);
3567 goto out;
3568 }
3569 target->tl_retry_count = token;
3570 break;
3571
3572 case SRP_OPT_MAX_IT_IU_SIZE:
3573 if (match_int(args, &token) || token < 0) {
3574 pr_warn("bad maximum initiator to target IU size '%s'\n", p);
3575 goto out;
3576 }
3577 target->max_it_iu_size = token;
3578 break;
3579
3580 case SRP_OPT_CH_COUNT:
3581 if (match_int(args, &token) || token < 1) {
3582 pr_warn("bad channel count %s\n", p);
3583 goto out;
3584 }
3585 target->ch_count = token;
3586 break;
3587
3588 default:
3589 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3590 p);
3591 goto out;
3592 }
3593 }
3594
3595 for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
3596 if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
3597 ret = 0;
3598 break;
3599 }
3600 }
3601 if (ret)
3602 pr_warn("target creation request is missing one or more parameters\n");
3603
3604 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3605 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3606 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3607 target->scsi_host->cmd_per_lun,
3608 target->scsi_host->can_queue);
3609
3610out:
3611 kfree(options);
3612 return ret;
3613}
3614
3615static ssize_t add_target_store(struct device *dev,
3616 struct device_attribute *attr, const char *buf,
3617 size_t count)
3618{
3619 struct srp_host *host =
3620 container_of(dev, struct srp_host, dev);
3621 struct Scsi_Host *target_host;
3622 struct srp_target_port *target;
3623 struct srp_rdma_ch *ch;
3624 struct srp_device *srp_dev = host->srp_dev;
3625 struct ib_device *ibdev = srp_dev->dev;
3626 int ret, i, ch_idx;
3627 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3628 bool multich = false;
3629 uint32_t max_iu_len;
3630
3631 target_host = scsi_host_alloc(&srp_template,
3632 sizeof (struct srp_target_port));
3633 if (!target_host)
3634 return -ENOMEM;
3635
3636 target_host->transportt = ib_srp_transport_template;
3637 target_host->max_channel = 0;
3638 target_host->max_id = 1;
3639 target_host->max_lun = -1LL;
3640 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3641 target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
3642
3643 if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
3644 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3645
3646 target = host_to_target(target_host);
3647
3648 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
3649 target->io_class = SRP_REV16A_IB_IO_CLASS;
3650 target->scsi_host = target_host;
3651 target->srp_host = host;
3652 target->lkey = host->srp_dev->pd->local_dma_lkey;
3653 target->global_rkey = host->srp_dev->global_rkey;
3654 target->cmd_sg_cnt = cmd_sg_entries;
3655 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3656 target->allow_ext_sg = allow_ext_sg;
3657 target->tl_retry_count = 7;
3658 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3659
3660
3661
3662
3663
3664 scsi_host_get(target->scsi_host);
3665
3666 ret = mutex_lock_interruptible(&host->add_target_mutex);
3667 if (ret < 0)
3668 goto put;
3669
3670 ret = srp_parse_options(target->net, buf, target);
3671 if (ret)
3672 goto out;
3673
3674 if (!srp_conn_unique(target->srp_host, target)) {
3675 if (target->using_rdma_cm) {
3676 shost_printk(KERN_INFO, target->scsi_host,
3677 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
3678 be64_to_cpu(target->id_ext),
3679 be64_to_cpu(target->ioc_guid),
3680 &target->rdma_cm.dst);
3681 } else {
3682 shost_printk(KERN_INFO, target->scsi_host,
3683 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3684 be64_to_cpu(target->id_ext),
3685 be64_to_cpu(target->ioc_guid),
3686 be64_to_cpu(target->initiator_ext));
3687 }
3688 ret = -EEXIST;
3689 goto out;
3690 }
3691
3692 if (!srp_dev->has_fr && !target->allow_ext_sg &&
3693 target->cmd_sg_cnt < target->sg_tablesize) {
3694 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3695 target->sg_tablesize = target->cmd_sg_cnt;
3696 }
3697
3698 if (srp_dev->use_fast_reg) {
3699 bool gaps_reg = (ibdev->attrs.device_cap_flags &
3700 IB_DEVICE_SG_GAPS_REG);
3701
3702 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3703 (ilog2(srp_dev->mr_page_size) - 9);
3704 if (!gaps_reg) {
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718 mr_per_cmd = register_always +
3719 (target->scsi_host->max_sectors + 1 +
3720 max_sectors_per_mr - 1) / max_sectors_per_mr;
3721 } else {
3722 mr_per_cmd = register_always +
3723 (target->sg_tablesize +
3724 srp_dev->max_pages_per_mr - 1) /
3725 srp_dev->max_pages_per_mr;
3726 }
3727 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3728 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3729 max_sectors_per_mr, mr_per_cmd);
3730 }
3731
3732 target_host->sg_tablesize = target->sg_tablesize;
3733 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3734 target->mr_per_cmd = mr_per_cmd;
3735 target->indirect_size = target->sg_tablesize *
3736 sizeof (struct srp_direct_buf);
3737 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
3738 srp_use_imm_data,
3739 target->max_it_iu_size);
3740
3741 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3742 INIT_WORK(&target->remove_work, srp_remove_work);
3743 spin_lock_init(&target->lock);
3744 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
3745 if (ret)
3746 goto out;
3747
3748 ret = -ENOMEM;
3749 if (target->ch_count == 0) {
3750 target->ch_count =
3751 min(ch_count ?:
3752 max(4 * num_online_nodes(),
3753 ibdev->num_comp_vectors),
3754 num_online_cpus());
3755 }
3756
3757 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3758 GFP_KERNEL);
3759 if (!target->ch)
3760 goto out;
3761
3762 for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
3763 ch = &target->ch[ch_idx];
3764 ch->target = target;
3765 ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
3766 spin_lock_init(&ch->lock);
3767 INIT_LIST_HEAD(&ch->free_tx);
3768 ret = srp_new_cm_id(ch);
3769 if (ret)
3770 goto err_disconnect;
3771
3772 ret = srp_create_ch_ib(ch);
3773 if (ret)
3774 goto err_disconnect;
3775
3776 ret = srp_connect_ch(ch, max_iu_len, multich);
3777 if (ret) {
3778 char dst[64];
3779
3780 if (target->using_rdma_cm)
3781 snprintf(dst, sizeof(dst), "%pIS",
3782 &target->rdma_cm.dst);
3783 else
3784 snprintf(dst, sizeof(dst), "%pI6",
3785 target->ib_cm.orig_dgid.raw);
3786 shost_printk(KERN_ERR, target->scsi_host,
3787 PFX "Connection %d/%d to %s failed\n",
3788 ch_idx,
3789 target->ch_count, dst);
3790 if (ch_idx == 0) {
3791 goto free_ch;
3792 } else {
3793 srp_free_ch_ib(target, ch);
3794 target->ch_count = ch - target->ch;
3795 goto connected;
3796 }
3797 }
3798 multich = true;
3799 }
3800
3801connected:
3802 target->scsi_host->nr_hw_queues = target->ch_count;
3803
3804 ret = srp_add_target(host, target);
3805 if (ret)
3806 goto err_disconnect;
3807
3808 if (target->state != SRP_TARGET_REMOVED) {
3809 if (target->using_rdma_cm) {
3810 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3811 "new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
3812 be64_to_cpu(target->id_ext),
3813 be64_to_cpu(target->ioc_guid),
3814 target->sgid.raw, &target->rdma_cm.dst);
3815 } else {
3816 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3817 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3818 be64_to_cpu(target->id_ext),
3819 be64_to_cpu(target->ioc_guid),
3820 be16_to_cpu(target->ib_cm.pkey),
3821 be64_to_cpu(target->ib_cm.service_id),
3822 target->sgid.raw,
3823 target->ib_cm.orig_dgid.raw);
3824 }
3825 }
3826
3827 ret = count;
3828
3829out:
3830 mutex_unlock(&host->add_target_mutex);
3831
3832put:
3833 scsi_host_put(target->scsi_host);
3834 if (ret < 0) {
3835
3836
3837
3838
3839
3840 if (target->state != SRP_TARGET_REMOVED)
3841 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
3842 scsi_host_put(target->scsi_host);
3843 }
3844
3845 return ret;
3846
3847err_disconnect:
3848 srp_disconnect_target(target);
3849
3850free_ch:
3851 for (i = 0; i < target->ch_count; i++) {
3852 ch = &target->ch[i];
3853 srp_free_ch_ib(target, ch);
3854 }
3855
3856 kfree(target->ch);
3857 goto out;
3858}
3859
3860static DEVICE_ATTR_WO(add_target);
3861
3862static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
3863 char *buf)
3864{
3865 struct srp_host *host = container_of(dev, struct srp_host, dev);
3866
3867 return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3868}
3869
3870static DEVICE_ATTR_RO(ibdev);
3871
3872static ssize_t port_show(struct device *dev, struct device_attribute *attr,
3873 char *buf)
3874{
3875 struct srp_host *host = container_of(dev, struct srp_host, dev);
3876
3877 return sysfs_emit(buf, "%d\n", host->port);
3878}
3879
3880static DEVICE_ATTR_RO(port);
3881
3882static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3883{
3884 struct srp_host *host;
3885
3886 host = kzalloc(sizeof *host, GFP_KERNEL);
3887 if (!host)
3888 return NULL;
3889
3890 INIT_LIST_HEAD(&host->target_list);
3891 spin_lock_init(&host->target_lock);
3892 init_completion(&host->released);
3893 mutex_init(&host->add_target_mutex);
3894 host->srp_dev = device;
3895 host->port = port;
3896
3897 host->dev.class = &srp_class;
3898 host->dev.parent = device->dev->dev.parent;
3899 dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
3900 port);
3901
3902 if (device_register(&host->dev))
3903 goto free_host;
3904 if (device_create_file(&host->dev, &dev_attr_add_target))
3905 goto err_class;
3906 if (device_create_file(&host->dev, &dev_attr_ibdev))
3907 goto err_class;
3908 if (device_create_file(&host->dev, &dev_attr_port))
3909 goto err_class;
3910
3911 return host;
3912
3913err_class:
3914 device_unregister(&host->dev);
3915
3916free_host:
3917 kfree(host);
3918
3919 return NULL;
3920}
3921
3922static void srp_rename_dev(struct ib_device *device, void *client_data)
3923{
3924 struct srp_device *srp_dev = client_data;
3925 struct srp_host *host, *tmp_host;
3926
3927 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3928 char name[IB_DEVICE_NAME_MAX + 8];
3929
3930 snprintf(name, sizeof(name), "srp-%s-%d",
3931 dev_name(&device->dev), host->port);
3932 device_rename(&host->dev, name);
3933 }
3934}
3935
3936static int srp_add_one(struct ib_device *device)
3937{
3938 struct srp_device *srp_dev;
3939 struct ib_device_attr *attr = &device->attrs;
3940 struct srp_host *host;
3941 int mr_page_shift;
3942 unsigned int p;
3943 u64 max_pages_per_mr;
3944 unsigned int flags = 0;
3945
3946 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3947 if (!srp_dev)
3948 return -ENOMEM;
3949
3950
3951
3952
3953
3954
3955 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
3956 srp_dev->mr_page_size = 1 << mr_page_shift;
3957 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3958 max_pages_per_mr = attr->max_mr_size;
3959 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3960 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3961 attr->max_mr_size, srp_dev->mr_page_size,
3962 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3963 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3964 max_pages_per_mr);
3965
3966 srp_dev->has_fr = (attr->device_cap_flags &
3967 IB_DEVICE_MEM_MGT_EXTENSIONS);
3968 if (!never_register && !srp_dev->has_fr)
3969 dev_warn(&device->dev, "FR is not supported\n");
3970 else if (!never_register &&
3971 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
3972 srp_dev->use_fast_reg = srp_dev->has_fr;
3973
3974 if (never_register || !register_always || !srp_dev->has_fr)
3975 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3976
3977 if (srp_dev->use_fast_reg) {
3978 srp_dev->max_pages_per_mr =
3979 min_t(u32, srp_dev->max_pages_per_mr,
3980 attr->max_fast_reg_page_list_len);
3981 }
3982 srp_dev->mr_max_size = srp_dev->mr_page_size *
3983 srp_dev->max_pages_per_mr;
3984 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3985 dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
3986 attr->max_fast_reg_page_list_len,
3987 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3988
3989 INIT_LIST_HEAD(&srp_dev->dev_list);
3990
3991 srp_dev->dev = device;
3992 srp_dev->pd = ib_alloc_pd(device, flags);
3993 if (IS_ERR(srp_dev->pd)) {
3994 int ret = PTR_ERR(srp_dev->pd);
3995
3996 kfree(srp_dev);
3997 return ret;
3998 }
3999
4000 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
4001 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4002 WARN_ON_ONCE(srp_dev->global_rkey == 0);
4003 }
4004
4005 rdma_for_each_port (device, p) {
4006 host = srp_add_port(srp_dev, p);
4007 if (host)
4008 list_add_tail(&host->list, &srp_dev->dev_list);
4009 }
4010
4011 ib_set_client_data(device, &srp_client, srp_dev);
4012 return 0;
4013}
4014
4015static void srp_remove_one(struct ib_device *device, void *client_data)
4016{
4017 struct srp_device *srp_dev;
4018 struct srp_host *host, *tmp_host;
4019 struct srp_target_port *target;
4020
4021 srp_dev = client_data;
4022
4023 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4024 device_unregister(&host->dev);
4025
4026
4027
4028
4029 wait_for_completion(&host->released);
4030
4031
4032
4033
4034 spin_lock(&host->target_lock);
4035 list_for_each_entry(target, &host->target_list, list)
4036 srp_queue_remove_work(target);
4037 spin_unlock(&host->target_lock);
4038
4039
4040
4041
4042 flush_workqueue(system_long_wq);
4043 flush_workqueue(srp_remove_wq);
4044
4045 kfree(host);
4046 }
4047
4048 ib_dealloc_pd(srp_dev->pd);
4049
4050 kfree(srp_dev);
4051}
4052
4053static struct srp_function_template ib_srp_transport_functions = {
4054 .has_rport_state = true,
4055 .reset_timer_if_blocked = true,
4056 .reconnect_delay = &srp_reconnect_delay,
4057 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
4058 .dev_loss_tmo = &srp_dev_loss_tmo,
4059 .reconnect = srp_rport_reconnect,
4060 .rport_delete = srp_rport_delete,
4061 .terminate_rport_io = srp_terminate_io,
4062};
4063
4064static int __init srp_init_module(void)
4065{
4066 int ret;
4067
4068 BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
4069 BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
4070 BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
4071 BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
4072 BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
4073 BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
4074 BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
4075
4076 if (srp_sg_tablesize) {
4077 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
4078 if (!cmd_sg_entries)
4079 cmd_sg_entries = srp_sg_tablesize;
4080 }
4081
4082 if (!cmd_sg_entries)
4083 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
4084
4085 if (cmd_sg_entries > 255) {
4086 pr_warn("Clamping cmd_sg_entries to 255\n");
4087 cmd_sg_entries = 255;
4088 }
4089
4090 if (!indirect_sg_entries)
4091 indirect_sg_entries = cmd_sg_entries;
4092 else if (indirect_sg_entries < cmd_sg_entries) {
4093 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
4094 cmd_sg_entries);
4095 indirect_sg_entries = cmd_sg_entries;
4096 }
4097
4098 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
4099 pr_warn("Clamping indirect_sg_entries to %u\n",
4100 SG_MAX_SEGMENTS);
4101 indirect_sg_entries = SG_MAX_SEGMENTS;
4102 }
4103
4104 srp_remove_wq = create_workqueue("srp_remove");
4105 if (!srp_remove_wq) {
4106 ret = -ENOMEM;
4107 goto out;
4108 }
4109
4110 ret = -ENOMEM;
4111 ib_srp_transport_template =
4112 srp_attach_transport(&ib_srp_transport_functions);
4113 if (!ib_srp_transport_template)
4114 goto destroy_wq;
4115
4116 ret = class_register(&srp_class);
4117 if (ret) {
4118 pr_err("couldn't register class infiniband_srp\n");
4119 goto release_tr;
4120 }
4121
4122 ib_sa_register_client(&srp_sa_client);
4123
4124 ret = ib_register_client(&srp_client);
4125 if (ret) {
4126 pr_err("couldn't register IB client\n");
4127 goto unreg_sa;
4128 }
4129
4130out:
4131 return ret;
4132
4133unreg_sa:
4134 ib_sa_unregister_client(&srp_sa_client);
4135 class_unregister(&srp_class);
4136
4137release_tr:
4138 srp_release_transport(ib_srp_transport_template);
4139
4140destroy_wq:
4141 destroy_workqueue(srp_remove_wq);
4142 goto out;
4143}
4144
4145static void __exit srp_cleanup_module(void)
4146{
4147 ib_unregister_client(&srp_client);
4148 ib_sa_unregister_client(&srp_sa_client);
4149 class_unregister(&srp_class);
4150 srp_release_transport(ib_srp_transport_template);
4151 destroy_workqueue(srp_remove_wq);
4152}
4153
4154module_init(srp_init_module);
4155module_exit(srp_cleanup_module);
4156