1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/types.h>
49#include <linux/list.h>
50#include <linux/hardirq.h>
51#include <linux/kfifo.h>
52#include <linux/blkdev.h>
53#include <linux/init.h>
54#include <linux/ioctl.h>
55#include <linux/cdev.h>
56#include <linux/in.h>
57#include <linux/net.h>
58#include <linux/scatterlist.h>
59#include <linux/delay.h>
60#include <linux/slab.h>
61#include <linux/module.h>
62
63#include <net/sock.h>
64
65#include <linux/uaccess.h>
66
67#include <scsi/scsi_cmnd.h>
68#include <scsi/scsi_device.h>
69#include <scsi/scsi_eh.h>
70#include <scsi/scsi_tcq.h>
71#include <scsi/scsi_host.h>
72#include <scsi/scsi.h>
73#include <scsi/scsi_transport_iscsi.h>
74
75#include "iscsi_iser.h"
76
77MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
78MODULE_LICENSE("Dual BSD/GPL");
79MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
80
81static struct scsi_host_template iscsi_iser_sht;
82static struct iscsi_transport iscsi_iser_transport;
83static struct scsi_transport_template *iscsi_iser_scsi_transport;
84static struct workqueue_struct *release_wq;
85static DEFINE_MUTEX(unbind_iser_conn_mutex);
86struct iser_global ig;
87
88int iser_debug_level = 0;
89module_param_named(debug_level, iser_debug_level, int, S_IRUGO | S_IWUSR);
90MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
91
92static unsigned int iscsi_max_lun = 512;
93module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
94MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session (default:512");
95
96unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
97module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
98MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
99
100bool iser_always_reg = true;
101module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
102MODULE_PARM_DESC(always_register,
103 "Always register memory, even for continuous memory regions (default:true)");
104
105bool iser_pi_enable = false;
106module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
107MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
108
109int iser_pi_guard;
110module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO);
111MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
112
113
114
115
116
117
118
119
120
121
122
123void
124iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
125 char *rx_data, int rx_data_len)
126{
127 int rc = 0;
128 int datalen;
129
130
131 datalen = ntoh24(hdr->dlength);
132 if (datalen > rx_data_len || (datalen + 4) < rx_data_len) {
133 iser_err("wrong datalen %d (hdr), %d (IB)\n",
134 datalen, rx_data_len);
135 rc = ISCSI_ERR_DATALEN;
136 goto error;
137 }
138
139 if (datalen != rx_data_len)
140 iser_dbg("aligned datalen (%d) hdr, %d (IB)\n",
141 datalen, rx_data_len);
142
143 rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
144 if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
145 goto error;
146
147 return;
148error:
149 iscsi_conn_failure(conn, rc);
150}
151
152
153
154
155
156
157
158
159
160static int
161iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
162{
163 struct iscsi_iser_task *iser_task = task->dd_data;
164
165 task->hdr = (struct iscsi_hdr *)&iser_task->desc.iscsi_header;
166 task->hdr_max = sizeof(iser_task->desc.iscsi_header);
167
168 return 0;
169}
170
171
172
173
174
175
176
177
178
179
180
181
182int
183iser_initialize_task_headers(struct iscsi_task *task,
184 struct iser_tx_desc *tx_desc)
185{
186 struct iser_conn *iser_conn = task->conn->dd_data;
187 struct iser_device *device = iser_conn->ib_conn.device;
188 struct iscsi_iser_task *iser_task = task->dd_data;
189 u64 dma_addr;
190 const bool mgmt_task = !task->sc && !in_interrupt();
191 int ret = 0;
192
193 if (unlikely(mgmt_task))
194 mutex_lock(&iser_conn->state_mutex);
195
196 if (unlikely(iser_conn->state != ISER_CONN_UP)) {
197 ret = -ENODEV;
198 goto out;
199 }
200
201 dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
202 ISER_HEADERS_LEN, DMA_TO_DEVICE);
203 if (ib_dma_mapping_error(device->ib_device, dma_addr)) {
204 ret = -ENOMEM;
205 goto out;
206 }
207
208 tx_desc->inv_wr.next = NULL;
209 tx_desc->reg_wr.wr.next = NULL;
210 tx_desc->mapped = true;
211 tx_desc->dma_addr = dma_addr;
212 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
213 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
214 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
215
216 iser_task->iser_conn = iser_conn;
217out:
218 if (unlikely(mgmt_task))
219 mutex_unlock(&iser_conn->state_mutex);
220
221 return ret;
222}
223
224
225
226
227
228
229
230
231
232
233static int
234iscsi_iser_task_init(struct iscsi_task *task)
235{
236 struct iscsi_iser_task *iser_task = task->dd_data;
237 int ret;
238
239 ret = iser_initialize_task_headers(task, &iser_task->desc);
240 if (ret) {
241 iser_err("Failed to init task %p, err = %d\n",
242 iser_task, ret);
243 return ret;
244 }
245
246
247 if (!task->sc)
248 return 0;
249
250 iser_task->command_sent = 0;
251 iser_task_rdma_init(iser_task);
252 iser_task->sc = task->sc;
253
254 return 0;
255}
256
257
258
259
260
261
262
263
264
265
266
267
268static int
269iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
270{
271 int error = 0;
272
273 iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt);
274
275 error = iser_send_control(conn, task);
276
277
278
279
280
281
282
283 return error;
284}
285
286static int
287iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
288 struct iscsi_task *task)
289{
290 struct iscsi_r2t_info *r2t = &task->unsol_r2t;
291 struct iscsi_data hdr;
292 int error = 0;
293
294
295 while (iscsi_task_has_unsol_data(task)) {
296 iscsi_prep_data_out_pdu(task, r2t, &hdr);
297 iser_dbg("Sending data-out: itt 0x%x, data count %d\n",
298 hdr.itt, r2t->data_count);
299
300
301
302 error = iser_send_data_out(conn, task, &hdr);
303 if (error) {
304 r2t->datasn--;
305 goto iscsi_iser_task_xmit_unsol_data_exit;
306 }
307 r2t->sent += r2t->data_count;
308 iser_dbg("Need to send %d more as data-out PDUs\n",
309 r2t->data_length - r2t->sent);
310 }
311
312iscsi_iser_task_xmit_unsol_data_exit:
313 return error;
314}
315
316
317
318
319
320
321
322static int
323iscsi_iser_task_xmit(struct iscsi_task *task)
324{
325 struct iscsi_conn *conn = task->conn;
326 struct iscsi_iser_task *iser_task = task->dd_data;
327 int error = 0;
328
329 if (!task->sc)
330 return iscsi_iser_mtask_xmit(conn, task);
331
332 if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
333 BUG_ON(scsi_bufflen(task->sc) == 0);
334
335 iser_dbg("cmd [itt %x total %d imm %d unsol_data %d\n",
336 task->itt, scsi_bufflen(task->sc),
337 task->imm_count, task->unsol_r2t.data_length);
338 }
339
340 iser_dbg("ctask xmit [cid %d itt 0x%x]\n",
341 conn->id, task->itt);
342
343
344 if (!iser_task->command_sent) {
345 error = iser_send_command(conn, task);
346 if (error)
347 goto iscsi_iser_task_xmit_exit;
348 iser_task->command_sent = 1;
349 }
350
351
352 if (iscsi_task_has_unsol_data(task))
353 error = iscsi_iser_task_xmit_unsol_data(conn, task);
354
355 iscsi_iser_task_xmit_exit:
356 return error;
357}
358
359
360
361
362
363
364
365
366
367static void iscsi_iser_cleanup_task(struct iscsi_task *task)
368{
369 struct iscsi_iser_task *iser_task = task->dd_data;
370 struct iser_tx_desc *tx_desc = &iser_task->desc;
371 struct iser_conn *iser_conn = task->conn->dd_data;
372 struct iser_device *device = iser_conn->ib_conn.device;
373
374
375 if (!device)
376 return;
377
378 if (likely(tx_desc->mapped)) {
379 ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
380 ISER_HEADERS_LEN, DMA_TO_DEVICE);
381 tx_desc->mapped = false;
382 }
383
384
385 if (!task->sc)
386 return;
387
388 if (iser_task->status == ISER_TASK_STATUS_STARTED) {
389 iser_task->status = ISER_TASK_STATUS_COMPLETED;
390 iser_task_rdma_finalize(iser_task);
391 }
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406static u8
407iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
408{
409 struct iscsi_iser_task *iser_task = task->dd_data;
410 enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ?
411 ISER_DIR_IN : ISER_DIR_OUT;
412
413 return iser_check_task_pi_status(iser_task, dir, sector);
414}
415
416
417
418
419
420
421
422
423
424static struct iscsi_cls_conn *
425iscsi_iser_conn_create(struct iscsi_cls_session *cls_session,
426 uint32_t conn_idx)
427{
428 struct iscsi_conn *conn;
429 struct iscsi_cls_conn *cls_conn;
430
431 cls_conn = iscsi_conn_setup(cls_session, 0, conn_idx);
432 if (!cls_conn)
433 return NULL;
434 conn = cls_conn->dd_data;
435
436
437
438
439
440 conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN;
441
442 return cls_conn;
443}
444
445
446
447
448
449
450
451
452
453
454
455
456static int
457iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
458 struct iscsi_cls_conn *cls_conn,
459 uint64_t transport_eph,
460 int is_leading)
461{
462 struct iscsi_conn *conn = cls_conn->dd_data;
463 struct iser_conn *iser_conn;
464 struct iscsi_endpoint *ep;
465 int error;
466
467 error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
468 if (error)
469 return error;
470
471
472
473 ep = iscsi_lookup_endpoint(transport_eph);
474 if (!ep) {
475 iser_err("can't bind eph %llx\n",
476 (unsigned long long)transport_eph);
477 return -EINVAL;
478 }
479 iser_conn = ep->dd_data;
480
481 mutex_lock(&iser_conn->state_mutex);
482 if (iser_conn->state != ISER_CONN_UP) {
483 error = -EINVAL;
484 iser_err("iser_conn %p state is %d, teardown started\n",
485 iser_conn, iser_conn->state);
486 goto out;
487 }
488
489 error = iser_alloc_rx_descriptors(iser_conn, conn->session);
490 if (error)
491 goto out;
492
493
494
495
496 iser_info("binding iscsi conn %p to iser_conn %p\n", conn, iser_conn);
497
498 conn->dd_data = iser_conn;
499 iser_conn->iscsi_conn = conn;
500
501out:
502 mutex_unlock(&iser_conn->state_mutex);
503 return error;
504}
505
506
507
508
509
510
511
512
513
514static int
515iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
516{
517 struct iscsi_conn *iscsi_conn;
518 struct iser_conn *iser_conn;
519
520 iscsi_conn = cls_conn->dd_data;
521 iser_conn = iscsi_conn->dd_data;
522 reinit_completion(&iser_conn->stop_completion);
523
524 return iscsi_conn_start(cls_conn);
525}
526
527
528
529
530
531
532
533
534
535
536
537static void
538iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
539{
540 struct iscsi_conn *conn = cls_conn->dd_data;
541 struct iser_conn *iser_conn = conn->dd_data;
542
543 iser_info("stopping iscsi_conn: %p, iser_conn: %p\n", conn, iser_conn);
544
545
546
547
548
549 if (iser_conn) {
550 mutex_lock(&iser_conn->state_mutex);
551 mutex_lock(&unbind_iser_conn_mutex);
552 iser_conn_terminate(iser_conn);
553 iscsi_conn_stop(cls_conn, flag);
554
555
556 iser_conn->iscsi_conn = NULL;
557 conn->dd_data = NULL;
558 mutex_unlock(&unbind_iser_conn_mutex);
559
560 complete(&iser_conn->stop_completion);
561 mutex_unlock(&iser_conn->state_mutex);
562 } else {
563 iscsi_conn_stop(cls_conn, flag);
564 }
565}
566
567
568
569
570
571
572
573static void
574iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
575{
576 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
577
578 iscsi_session_teardown(cls_session);
579 iscsi_host_remove(shost);
580 iscsi_host_free(shost);
581}
582
583static inline unsigned int
584iser_dif_prot_caps(int prot_caps)
585{
586 return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ?
587 SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE0_PROTECTION |
588 SHOST_DIX_TYPE1_PROTECTION : 0) |
589 ((prot_caps & IB_PROT_T10DIF_TYPE_2) ?
590 SHOST_DIF_TYPE2_PROTECTION | SHOST_DIX_TYPE2_PROTECTION : 0) |
591 ((prot_caps & IB_PROT_T10DIF_TYPE_3) ?
592 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE3_PROTECTION : 0);
593}
594
595
596
597
598
599
600
601
602
603
604
605static struct iscsi_cls_session *
606iscsi_iser_session_create(struct iscsi_endpoint *ep,
607 uint16_t cmds_max, uint16_t qdepth,
608 uint32_t initial_cmdsn)
609{
610 struct iscsi_cls_session *cls_session;
611 struct Scsi_Host *shost;
612 struct iser_conn *iser_conn = NULL;
613 struct ib_conn *ib_conn;
614 struct ib_device *ib_dev;
615 u32 max_fr_sectors;
616
617 shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
618 if (!shost)
619 return NULL;
620 shost->transportt = iscsi_iser_scsi_transport;
621 shost->cmd_per_lun = qdepth;
622 shost->max_lun = iscsi_max_lun;
623 shost->max_id = 0;
624 shost->max_channel = 0;
625 shost->max_cmd_len = 16;
626
627
628
629
630
631 if (ep) {
632 iser_conn = ep->dd_data;
633 shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
634 shost->can_queue = min_t(u16, cmds_max, iser_conn->max_cmds);
635
636 mutex_lock(&iser_conn->state_mutex);
637 if (iser_conn->state != ISER_CONN_UP) {
638 iser_err("iser conn %p already started teardown\n",
639 iser_conn);
640 mutex_unlock(&iser_conn->state_mutex);
641 goto free_host;
642 }
643
644 ib_conn = &iser_conn->ib_conn;
645 ib_dev = ib_conn->device->ib_device;
646 if (ib_conn->pi_support) {
647 u32 sig_caps = ib_dev->attrs.sig_prot_cap;
648
649 shost->sg_prot_tablesize = shost->sg_tablesize;
650 scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
651 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
652 SHOST_DIX_GUARD_CRC);
653 }
654
655 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
656 shost->virt_boundary_mask = SZ_4K - 1;
657
658 if (iscsi_host_add(shost, ib_dev->dev.parent)) {
659 mutex_unlock(&iser_conn->state_mutex);
660 goto free_host;
661 }
662 mutex_unlock(&iser_conn->state_mutex);
663 } else {
664 shost->can_queue = min_t(u16, cmds_max, ISER_DEF_XMIT_CMDS_MAX);
665 if (iscsi_host_add(shost, NULL))
666 goto free_host;
667 }
668
669 max_fr_sectors = (shost->sg_tablesize * PAGE_SIZE) >> 9;
670 shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
671
672 iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
673 iser_conn, shost->sg_tablesize,
674 shost->max_sectors);
675
676 if (shost->max_sectors < iser_max_sectors)
677 iser_warn("max_sectors was reduced from %u to %u\n",
678 iser_max_sectors, shost->max_sectors);
679
680 cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
681 shost->can_queue, 0,
682 sizeof(struct iscsi_iser_task),
683 initial_cmdsn, 0);
684 if (!cls_session)
685 goto remove_host;
686
687 return cls_session;
688
689remove_host:
690 iscsi_host_remove(shost);
691free_host:
692 iscsi_host_free(shost);
693 return NULL;
694}
695
696static int
697iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
698 enum iscsi_param param, char *buf, int buflen)
699{
700 int value;
701
702 switch (param) {
703 case ISCSI_PARAM_MAX_RECV_DLENGTH:
704
705 break;
706 case ISCSI_PARAM_HDRDGST_EN:
707 sscanf(buf, "%d", &value);
708 if (value) {
709 iser_err("DataDigest wasn't negotiated to None\n");
710 return -EPROTO;
711 }
712 break;
713 case ISCSI_PARAM_DATADGST_EN:
714 sscanf(buf, "%d", &value);
715 if (value) {
716 iser_err("DataDigest wasn't negotiated to None\n");
717 return -EPROTO;
718 }
719 break;
720 case ISCSI_PARAM_IFMARKER_EN:
721 sscanf(buf, "%d", &value);
722 if (value) {
723 iser_err("IFMarker wasn't negotiated to No\n");
724 return -EPROTO;
725 }
726 break;
727 case ISCSI_PARAM_OFMARKER_EN:
728 sscanf(buf, "%d", &value);
729 if (value) {
730 iser_err("OFMarker wasn't negotiated to No\n");
731 return -EPROTO;
732 }
733 break;
734 default:
735 return iscsi_set_param(cls_conn, param, buf, buflen);
736 }
737
738 return 0;
739}
740
741
742
743
744
745
746
747
748static void
749iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
750{
751 struct iscsi_conn *conn = cls_conn->dd_data;
752
753 stats->txdata_octets = conn->txdata_octets;
754 stats->rxdata_octets = conn->rxdata_octets;
755 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
756 stats->dataout_pdus = conn->dataout_pdus_cnt;
757 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
758 stats->datain_pdus = conn->datain_pdus_cnt;
759 stats->r2t_pdus = conn->r2t_pdus_cnt;
760 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
761 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
762 stats->custom_length = 0;
763}
764
765static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
766 enum iscsi_param param, char *buf)
767{
768 struct iser_conn *iser_conn = ep->dd_data;
769
770 switch (param) {
771 case ISCSI_PARAM_CONN_PORT:
772 case ISCSI_PARAM_CONN_ADDRESS:
773 if (!iser_conn || !iser_conn->ib_conn.cma_id)
774 return -ENOTCONN;
775
776 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
777 &iser_conn->ib_conn.cma_id->route.addr.dst_addr,
778 param, buf);
779 default:
780 break;
781 }
782 return -ENOSYS;
783}
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800static struct iscsi_endpoint *
801iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
802 int non_blocking)
803{
804 int err;
805 struct iser_conn *iser_conn;
806 struct iscsi_endpoint *ep;
807
808 ep = iscsi_create_endpoint(0);
809 if (!ep)
810 return ERR_PTR(-ENOMEM);
811
812 iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
813 if (!iser_conn) {
814 err = -ENOMEM;
815 goto failure;
816 }
817
818 ep->dd_data = iser_conn;
819 iser_conn->ep = ep;
820 iser_conn_init(iser_conn);
821
822 err = iser_connect(iser_conn, NULL, dst_addr, non_blocking);
823 if (err)
824 goto failure;
825
826 return ep;
827failure:
828 iscsi_destroy_endpoint(ep);
829 return ERR_PTR(err);
830}
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845static int
846iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
847{
848 struct iser_conn *iser_conn = ep->dd_data;
849 int rc;
850
851 rc = wait_for_completion_interruptible_timeout(&iser_conn->up_completion,
852 msecs_to_jiffies(timeout_ms));
853
854 if (rc == 0) {
855 mutex_lock(&iser_conn->state_mutex);
856 if (iser_conn->state == ISER_CONN_TERMINATING ||
857 iser_conn->state == ISER_CONN_DOWN)
858 rc = -1;
859 mutex_unlock(&iser_conn->state_mutex);
860 }
861
862 iser_info("iser conn %p rc = %d\n", iser_conn, rc);
863
864 if (rc > 0)
865 return 1;
866 else if (!rc)
867 return 0;
868 else
869 return rc;
870}
871
872
873
874
875
876
877
878
879
880
881static void
882iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
883{
884 struct iser_conn *iser_conn = ep->dd_data;
885
886 iser_info("ep %p iser conn %p\n", ep, iser_conn);
887
888 mutex_lock(&iser_conn->state_mutex);
889 iser_conn_terminate(iser_conn);
890
891
892
893
894
895
896
897 if (iser_conn->iscsi_conn) {
898 INIT_WORK(&iser_conn->release_work, iser_release_work);
899 queue_work(release_wq, &iser_conn->release_work);
900 mutex_unlock(&iser_conn->state_mutex);
901 } else {
902 iser_conn->state = ISER_CONN_DOWN;
903 mutex_unlock(&iser_conn->state_mutex);
904 iser_conn_release(iser_conn);
905 }
906
907 iscsi_destroy_endpoint(ep);
908}
909
910static umode_t iser_attr_is_visible(int param_type, int param)
911{
912 switch (param_type) {
913 case ISCSI_HOST_PARAM:
914 switch (param) {
915 case ISCSI_HOST_PARAM_NETDEV_NAME:
916 case ISCSI_HOST_PARAM_HWADDRESS:
917 case ISCSI_HOST_PARAM_INITIATOR_NAME:
918 return S_IRUGO;
919 default:
920 return 0;
921 }
922 case ISCSI_PARAM:
923 switch (param) {
924 case ISCSI_PARAM_MAX_RECV_DLENGTH:
925 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
926 case ISCSI_PARAM_HDRDGST_EN:
927 case ISCSI_PARAM_DATADGST_EN:
928 case ISCSI_PARAM_CONN_ADDRESS:
929 case ISCSI_PARAM_CONN_PORT:
930 case ISCSI_PARAM_EXP_STATSN:
931 case ISCSI_PARAM_PERSISTENT_ADDRESS:
932 case ISCSI_PARAM_PERSISTENT_PORT:
933 case ISCSI_PARAM_PING_TMO:
934 case ISCSI_PARAM_RECV_TMO:
935 case ISCSI_PARAM_INITIAL_R2T_EN:
936 case ISCSI_PARAM_MAX_R2T:
937 case ISCSI_PARAM_IMM_DATA_EN:
938 case ISCSI_PARAM_FIRST_BURST:
939 case ISCSI_PARAM_MAX_BURST:
940 case ISCSI_PARAM_PDU_INORDER_EN:
941 case ISCSI_PARAM_DATASEQ_INORDER_EN:
942 case ISCSI_PARAM_TARGET_NAME:
943 case ISCSI_PARAM_TPGT:
944 case ISCSI_PARAM_USERNAME:
945 case ISCSI_PARAM_PASSWORD:
946 case ISCSI_PARAM_USERNAME_IN:
947 case ISCSI_PARAM_PASSWORD_IN:
948 case ISCSI_PARAM_FAST_ABORT:
949 case ISCSI_PARAM_ABORT_TMO:
950 case ISCSI_PARAM_LU_RESET_TMO:
951 case ISCSI_PARAM_TGT_RESET_TMO:
952 case ISCSI_PARAM_IFACE_NAME:
953 case ISCSI_PARAM_INITIATOR_NAME:
954 case ISCSI_PARAM_DISCOVERY_SESS:
955 return S_IRUGO;
956 default:
957 return 0;
958 }
959 }
960
961 return 0;
962}
963
964static struct scsi_host_template iscsi_iser_sht = {
965 .module = THIS_MODULE,
966 .name = "iSCSI Initiator over iSER",
967 .queuecommand = iscsi_queuecommand,
968 .change_queue_depth = scsi_change_queue_depth,
969 .sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE,
970 .cmd_per_lun = ISER_DEF_CMD_PER_LUN,
971 .eh_timed_out = iscsi_eh_cmd_timed_out,
972 .eh_abort_handler = iscsi_eh_abort,
973 .eh_device_reset_handler= iscsi_eh_device_reset,
974 .eh_target_reset_handler = iscsi_eh_recover_target,
975 .target_alloc = iscsi_target_alloc,
976 .proc_name = "iscsi_iser",
977 .this_id = -1,
978 .track_queue_depth = 1,
979};
980
981static struct iscsi_transport iscsi_iser_transport = {
982 .owner = THIS_MODULE,
983 .name = "iser",
984 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_TEXT_NEGO,
985
986 .create_session = iscsi_iser_session_create,
987 .destroy_session = iscsi_iser_session_destroy,
988
989 .create_conn = iscsi_iser_conn_create,
990 .bind_conn = iscsi_iser_conn_bind,
991 .destroy_conn = iscsi_conn_teardown,
992 .attr_is_visible = iser_attr_is_visible,
993 .set_param = iscsi_iser_set_param,
994 .get_conn_param = iscsi_conn_get_param,
995 .get_ep_param = iscsi_iser_get_ep_param,
996 .get_session_param = iscsi_session_get_param,
997 .start_conn = iscsi_iser_conn_start,
998 .stop_conn = iscsi_iser_conn_stop,
999
1000 .get_host_param = iscsi_host_get_param,
1001 .set_host_param = iscsi_host_set_param,
1002
1003 .send_pdu = iscsi_conn_send_pdu,
1004 .get_stats = iscsi_iser_conn_get_stats,
1005 .init_task = iscsi_iser_task_init,
1006 .xmit_task = iscsi_iser_task_xmit,
1007 .cleanup_task = iscsi_iser_cleanup_task,
1008 .alloc_pdu = iscsi_iser_pdu_alloc,
1009 .check_protection = iscsi_iser_check_protection,
1010
1011 .session_recovery_timedout = iscsi_session_recovery_timedout,
1012
1013 .ep_connect = iscsi_iser_ep_connect,
1014 .ep_poll = iscsi_iser_ep_poll,
1015 .ep_disconnect = iscsi_iser_ep_disconnect
1016};
1017
1018static int __init iser_init(void)
1019{
1020 int err;
1021
1022 iser_dbg("Starting iSER datamover...\n");
1023
1024 if (iscsi_max_lun < 1) {
1025 iser_err("Invalid max_lun value of %u\n", iscsi_max_lun);
1026 return -EINVAL;
1027 }
1028
1029 memset(&ig, 0, sizeof(struct iser_global));
1030
1031 ig.desc_cache = kmem_cache_create("iser_descriptors",
1032 sizeof(struct iser_tx_desc),
1033 0, SLAB_HWCACHE_ALIGN,
1034 NULL);
1035 if (ig.desc_cache == NULL)
1036 return -ENOMEM;
1037
1038
1039 mutex_init(&ig.device_list_mutex);
1040 INIT_LIST_HEAD(&ig.device_list);
1041 mutex_init(&ig.connlist_mutex);
1042 INIT_LIST_HEAD(&ig.connlist);
1043
1044 release_wq = alloc_workqueue("release workqueue", 0, 0);
1045 if (!release_wq) {
1046 iser_err("failed to allocate release workqueue\n");
1047 err = -ENOMEM;
1048 goto err_alloc_wq;
1049 }
1050
1051 iscsi_iser_scsi_transport = iscsi_register_transport(
1052 &iscsi_iser_transport);
1053 if (!iscsi_iser_scsi_transport) {
1054 iser_err("iscsi_register_transport failed\n");
1055 err = -EINVAL;
1056 goto err_reg;
1057 }
1058
1059 return 0;
1060
1061err_reg:
1062 destroy_workqueue(release_wq);
1063err_alloc_wq:
1064 kmem_cache_destroy(ig.desc_cache);
1065
1066 return err;
1067}
1068
1069static void __exit iser_exit(void)
1070{
1071 struct iser_conn *iser_conn, *n;
1072 int connlist_empty;
1073
1074 iser_dbg("Removing iSER datamover...\n");
1075 destroy_workqueue(release_wq);
1076
1077 mutex_lock(&ig.connlist_mutex);
1078 connlist_empty = list_empty(&ig.connlist);
1079 mutex_unlock(&ig.connlist_mutex);
1080
1081 if (!connlist_empty) {
1082 iser_err("Error cleanup stage completed but we still have iser "
1083 "connections, destroying them anyway\n");
1084 list_for_each_entry_safe(iser_conn, n, &ig.connlist,
1085 conn_list) {
1086 iser_conn_release(iser_conn);
1087 }
1088 }
1089
1090 iscsi_unregister_transport(&iscsi_iser_transport);
1091 kmem_cache_destroy(ig.desc_cache);
1092}
1093
1094module_init(iser_init);
1095module_exit(iser_exit);
1096