1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/slab.h>
17#include <scsi/scsi_tcq.h>
18#include <scsi/libiscsi.h>
19#include "bnx2i.h"
20
21struct scsi_transport_template *bnx2i_scsi_xport_template;
22struct iscsi_transport bnx2i_iscsi_transport;
23static struct scsi_host_template bnx2i_host_template;
24
25
26
27
28static DEFINE_SPINLOCK(bnx2i_resc_lock);
29
30DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
31
32static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
33{
34 int retval = 0;
35
36 if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
37 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
38 test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
39 retval = -EPERM;
40 return retval;
41}
42
43
44
45
46
47
48
49
50
51
52
53
54static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
55 u32 *start_bd_off, u32 *start_bd_idx)
56{
57 struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
58 u32 cur_offset = 0;
59 u32 cur_bd_idx = 0;
60
61 if (buf_off) {
62 while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
63 cur_offset += bd_tbl->buffer_length;
64 cur_bd_idx++;
65 bd_tbl++;
66 }
67 }
68
69 *start_bd_off = buf_off - cur_offset;
70 *start_bd_idx = cur_bd_idx;
71}
72
73
74
75
76
77
78
79
80
81
82static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
83{
84 struct bnx2i_cmd *cmd = task->dd_data;
85 u32 start_bd_offset;
86 u32 start_bd_idx;
87 u32 buffer_offset = 0;
88 u32 cmd_len = cmd->req.total_data_transfer_length;
89
90
91
92
93 if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
94 return;
95
96
97 buffer_offset += task->imm_count;
98 if (task->imm_count == cmd_len)
99 return;
100
101 if (iscsi_task_has_unsol_data(task)) {
102 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
103 &start_bd_offset, &start_bd_idx);
104 cmd->req.ud_buffer_offset = start_bd_offset;
105 cmd->req.ud_start_bd_index = start_bd_idx;
106 buffer_offset += task->unsol_r2t.data_length;
107 }
108
109 if (buffer_offset != cmd_len) {
110 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
111 &start_bd_offset, &start_bd_idx);
112 if ((start_bd_offset > task->conn->session->first_burst) ||
113 (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
114 int i = 0;
115
116 iscsi_conn_printk(KERN_ALERT, task->conn,
117 "bnx2i- error, buf offset 0x%x "
118 "bd_valid %d use_sg %d\n",
119 buffer_offset, cmd->io_tbl.bd_valid,
120 scsi_sg_count(cmd->scsi_cmd));
121 for (i = 0; i < cmd->io_tbl.bd_valid; i++)
122 iscsi_conn_printk(KERN_ALERT, task->conn,
123 "bnx2i err, bd[%d]: len %x\n",
124 i, cmd->io_tbl.bd_tbl[i].\
125 buffer_length);
126 }
127 cmd->req.sd_buffer_offset = start_bd_offset;
128 cmd->req.sd_start_bd_index = start_bd_idx;
129 }
130}
131
132
133
134
135
136
137
138
139
140
141static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
142{
143 struct scsi_cmnd *sc = cmd->scsi_cmd;
144 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
145 struct scatterlist *sg;
146 int byte_count = 0;
147 int bd_count = 0;
148 int sg_count;
149 int sg_len;
150 u64 addr;
151 int i;
152
153 BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
154
155 sg_count = scsi_dma_map(sc);
156
157 scsi_for_each_sg(sc, sg, sg_count, i) {
158 sg_len = sg_dma_len(sg);
159 addr = (u64) sg_dma_address(sg);
160 bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
161 bd[bd_count].buffer_addr_hi = addr >> 32;
162 bd[bd_count].buffer_length = sg_len;
163 bd[bd_count].flags = 0;
164 if (bd_count == 0)
165 bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
166
167 byte_count += sg_len;
168 bd_count++;
169 }
170
171 if (bd_count)
172 bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
173
174 BUG_ON(byte_count != scsi_bufflen(sc));
175 return bd_count;
176}
177
178
179
180
181
182
183
184static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
185{
186 int bd_count;
187
188 bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
189 if (!bd_count) {
190 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
191
192 bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
193 bd[0].buffer_length = bd[0].flags = 0;
194 }
195 cmd->io_tbl.bd_valid = bd_count;
196}
197
198
199
200
201
202
203
204
205void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
206{
207 struct scsi_cmnd *sc = cmd->scsi_cmd;
208
209 if (cmd->io_tbl.bd_valid && sc) {
210 scsi_dma_unmap(sc);
211 cmd->io_tbl.bd_valid = 0;
212 }
213}
214
215static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
216{
217 memset(&cmd->req, 0x00, sizeof(cmd->req));
218 cmd->req.op_code = 0xFF;
219 cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
220 cmd->req.bd_list_addr_hi =
221 (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
222
223}
224
225
226
227
228
229
230
231
232
233
234
235
236static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
237 struct bnx2i_conn *bnx2i_conn,
238 u32 iscsi_cid)
239{
240 if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
241 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
242 "conn bind - entry #%d not free\n", iscsi_cid);
243 return -EBUSY;
244 }
245
246 hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
247 return 0;
248}
249
250
251
252
253
254
255
256struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
257 u16 iscsi_cid)
258{
259 if (!hba->cid_que.conn_cid_tbl) {
260 printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
261 return NULL;
262
263 } else if (iscsi_cid >= hba->max_active_conns) {
264 printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
265 return NULL;
266 }
267 return hba->cid_que.conn_cid_tbl[iscsi_cid];
268}
269
270
271
272
273
274
275static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
276{
277 int idx;
278
279 if (!hba->cid_que.cid_free_cnt)
280 return -1;
281
282 idx = hba->cid_que.cid_q_cons_idx;
283 hba->cid_que.cid_q_cons_idx++;
284 if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
285 hba->cid_que.cid_q_cons_idx = 0;
286
287 hba->cid_que.cid_free_cnt--;
288 return hba->cid_que.cid_que[idx];
289}
290
291
292
293
294
295
296
297static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
298{
299 int idx;
300
301 if (iscsi_cid == (u16) -1)
302 return;
303
304 hba->cid_que.cid_free_cnt++;
305
306 idx = hba->cid_que.cid_q_prod_idx;
307 hba->cid_que.cid_que[idx] = iscsi_cid;
308 hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
309 hba->cid_que.cid_q_prod_idx++;
310 if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
311 hba->cid_que.cid_q_prod_idx = 0;
312}
313
314
315
316
317
318
319
320
321
322static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
323{
324 int mem_size;
325 int i;
326
327 mem_size = hba->max_active_conns * sizeof(u32);
328 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
329
330 hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
331 if (!hba->cid_que.cid_que_base)
332 return -ENOMEM;
333
334 mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
335 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
336 hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
337 if (!hba->cid_que.conn_cid_tbl) {
338 kfree(hba->cid_que.cid_que_base);
339 hba->cid_que.cid_que_base = NULL;
340 return -ENOMEM;
341 }
342
343 hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
344 hba->cid_que.cid_q_prod_idx = 0;
345 hba->cid_que.cid_q_cons_idx = 0;
346 hba->cid_que.cid_q_max_idx = hba->max_active_conns;
347 hba->cid_que.cid_free_cnt = hba->max_active_conns;
348
349 for (i = 0; i < hba->max_active_conns; i++) {
350 hba->cid_que.cid_que[i] = i;
351 hba->cid_que.conn_cid_tbl[i] = NULL;
352 }
353 return 0;
354}
355
356
357
358
359
360
361static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
362{
363 kfree(hba->cid_que.cid_que_base);
364 hba->cid_que.cid_que_base = NULL;
365
366 kfree(hba->cid_que.conn_cid_tbl);
367 hba->cid_que.conn_cid_tbl = NULL;
368}
369
370
371
372
373
374
375
376
377
378
379static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
380{
381 struct iscsi_endpoint *ep;
382 struct bnx2i_endpoint *bnx2i_ep;
383 u32 ec_div;
384
385 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
386 if (!ep) {
387 printk(KERN_ERR "bnx2i: Could not allocate ep\n");
388 return NULL;
389 }
390
391 bnx2i_ep = ep->dd_data;
392 bnx2i_ep->cls_ep = ep;
393 INIT_LIST_HEAD(&bnx2i_ep->link);
394 bnx2i_ep->state = EP_STATE_IDLE;
395 bnx2i_ep->ep_iscsi_cid = (u16) -1;
396 bnx2i_ep->hba = hba;
397 bnx2i_ep->hba_age = hba->age;
398
399 ec_div = event_coal_div;
400 while (ec_div >>= 1)
401 bnx2i_ep->ec_shift += 1;
402
403 hba->ofld_conns_active++;
404 init_waitqueue_head(&bnx2i_ep->ofld_wait);
405 return ep;
406}
407
408
409
410
411
412
413static void bnx2i_free_ep(struct iscsi_endpoint *ep)
414{
415 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
416 unsigned long flags;
417
418 spin_lock_irqsave(&bnx2i_resc_lock, flags);
419 bnx2i_ep->state = EP_STATE_IDLE;
420 bnx2i_ep->hba->ofld_conns_active--;
421
422 if (bnx2i_ep->ep_iscsi_cid != (u16) -1)
423 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
424
425 if (bnx2i_ep->conn) {
426 bnx2i_ep->conn->ep = NULL;
427 bnx2i_ep->conn = NULL;
428 }
429
430 bnx2i_ep->hba = NULL;
431 spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
432 iscsi_destroy_endpoint(ep);
433}
434
435
436
437
438
439
440
441
442static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
443 struct bnx2i_cmd *cmd)
444{
445 struct io_bdt *io = &cmd->io_tbl;
446 struct iscsi_bd *bd;
447
448 io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
449 ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
450 &io->bd_tbl_dma, GFP_KERNEL);
451 if (!io->bd_tbl) {
452 iscsi_session_printk(KERN_ERR, session, "Could not "
453 "allocate bdt.\n");
454 return -ENOMEM;
455 }
456 io->bd_valid = 0;
457 return 0;
458}
459
460
461
462
463
464
465
466static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
467 struct iscsi_session *session)
468{
469 int i;
470
471 for (i = 0; i < session->cmds_max; i++) {
472 struct iscsi_task *task = session->cmds[i];
473 struct bnx2i_cmd *cmd = task->dd_data;
474
475 if (cmd->io_tbl.bd_tbl)
476 dma_free_coherent(&hba->pcidev->dev,
477 ISCSI_MAX_BDS_PER_CMD *
478 sizeof(struct iscsi_bd),
479 cmd->io_tbl.bd_tbl,
480 cmd->io_tbl.bd_tbl_dma);
481 }
482
483}
484
485
486
487
488
489
490
491static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
492 struct iscsi_session *session)
493{
494 int i;
495
496 for (i = 0; i < session->cmds_max; i++) {
497 struct iscsi_task *task = session->cmds[i];
498 struct bnx2i_cmd *cmd = task->dd_data;
499
500 task->hdr = &cmd->hdr;
501 task->hdr_max = sizeof(struct iscsi_hdr);
502
503 if (bnx2i_alloc_bdt(hba, session, cmd))
504 goto free_bdts;
505 }
506
507 return 0;
508
509free_bdts:
510 bnx2i_destroy_cmd_pool(hba, session);
511 return -ENOMEM;
512}
513
514
515
516
517
518
519
520
521
522static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
523{
524 int rc = 0;
525 struct iscsi_bd *mp_bdt;
526 u64 addr;
527
528 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
529 &hba->mp_bd_dma, GFP_KERNEL);
530 if (!hba->mp_bd_tbl) {
531 printk(KERN_ERR "unable to allocate Middle Path BDT\n");
532 rc = -1;
533 goto out;
534 }
535
536 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
537 &hba->dummy_buf_dma, GFP_KERNEL);
538 if (!hba->dummy_buffer) {
539 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
540 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
541 hba->mp_bd_tbl, hba->mp_bd_dma);
542 hba->mp_bd_tbl = NULL;
543 rc = -1;
544 goto out;
545 }
546
547 mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
548 addr = (unsigned long) hba->dummy_buf_dma;
549 mp_bdt->buffer_addr_lo = addr & 0xffffffff;
550 mp_bdt->buffer_addr_hi = addr >> 32;
551 mp_bdt->buffer_length = PAGE_SIZE;
552 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
553 ISCSI_BD_FIRST_IN_BD_CHAIN;
554out:
555 return rc;
556}
557
558
559
560
561
562
563
564
565static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
566{
567 if (hba->mp_bd_tbl) {
568 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
569 hba->mp_bd_tbl, hba->mp_bd_dma);
570 hba->mp_bd_tbl = NULL;
571 }
572 if (hba->dummy_buffer) {
573 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
574 hba->dummy_buffer, hba->dummy_buf_dma);
575 hba->dummy_buffer = NULL;
576 }
577 return;
578}
579
580
581
582
583
584
585
586
587
588
589
590
591void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
592{
593 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
594}
595
596
597
598
599
600
601
602
603static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
604 struct bnx2i_endpoint *ep)
605{
606 write_lock_bh(&hba->ep_rdwr_lock);
607 list_add_tail(&ep->link, &hba->ep_destroy_list);
608 write_unlock_bh(&hba->ep_rdwr_lock);
609 return 0;
610}
611
612
613
614
615
616
617
618
619
620static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
621 struct bnx2i_endpoint *ep)
622{
623 write_lock_bh(&hba->ep_rdwr_lock);
624 list_del_init(&ep->link);
625 write_unlock_bh(&hba->ep_rdwr_lock);
626
627 return 0;
628}
629
630
631
632
633
634
635
636
637static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
638 struct bnx2i_endpoint *ep)
639{
640 write_lock_bh(&hba->ep_rdwr_lock);
641 list_add_tail(&ep->link, &hba->ep_ofld_list);
642 write_unlock_bh(&hba->ep_rdwr_lock);
643 return 0;
644}
645
646
647
648
649
650
651
652
653static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
654 struct bnx2i_endpoint *ep)
655{
656 write_lock_bh(&hba->ep_rdwr_lock);
657 list_del_init(&ep->link);
658 write_unlock_bh(&hba->ep_rdwr_lock);
659 return 0;
660}
661
662
663
664
665
666
667
668
669
670struct bnx2i_endpoint *
671bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
672{
673 struct list_head *list;
674 struct list_head *tmp;
675 struct bnx2i_endpoint *ep;
676
677 read_lock_bh(&hba->ep_rdwr_lock);
678 list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
679 ep = (struct bnx2i_endpoint *)list;
680
681 if (ep->ep_iscsi_cid == iscsi_cid)
682 break;
683 ep = NULL;
684 }
685 read_unlock_bh(&hba->ep_rdwr_lock);
686
687 if (!ep)
688 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
689 return ep;
690}
691
692
693
694
695
696
697
698struct bnx2i_endpoint *
699bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
700{
701 struct list_head *list;
702 struct list_head *tmp;
703 struct bnx2i_endpoint *ep;
704
705 read_lock_bh(&hba->ep_rdwr_lock);
706 list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
707 ep = (struct bnx2i_endpoint *)list;
708
709 if (ep->ep_iscsi_cid == iscsi_cid)
710 break;
711 ep = NULL;
712 }
713 read_unlock_bh(&hba->ep_rdwr_lock);
714
715 if (!ep)
716 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
717
718 return ep;
719}
720
721
722
723
724
725
726
727
728static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba,
729 struct bnx2i_endpoint *ep)
730{
731 write_lock_bh(&hba->ep_rdwr_lock);
732 list_add_tail(&ep->link, &hba->ep_active_list);
733 write_unlock_bh(&hba->ep_rdwr_lock);
734}
735
736
737
738
739
740
741
742
743
744static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba,
745 struct bnx2i_endpoint *ep)
746{
747 write_lock_bh(&hba->ep_rdwr_lock);
748 list_del_init(&ep->link);
749 write_unlock_bh(&hba->ep_rdwr_lock);
750}
751
752
753
754
755
756
757
758
759
760
761
762static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
763 struct Scsi_Host *shost)
764{
765 if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
766 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
767 else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
768 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
769 else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
770 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
771 else
772 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
773}
774
775
776
777
778
779
780
781
782
783struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
784{
785 struct Scsi_Host *shost;
786 struct bnx2i_hba *hba;
787
788 shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
789 if (!shost)
790 return NULL;
791 shost->dma_boundary = cnic->pcidev->dma_mask;
792 shost->transportt = bnx2i_scsi_xport_template;
793 shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
794 shost->max_channel = 0;
795 shost->max_lun = 512;
796 shost->max_cmd_len = 16;
797
798 hba = iscsi_host_priv(shost);
799 hba->shost = shost;
800 hba->netdev = cnic->netdev;
801
802 hba->pcidev = cnic->pcidev;
803 pci_dev_get(hba->pcidev);
804 hba->pci_did = hba->pcidev->device;
805 hba->pci_vid = hba->pcidev->vendor;
806 hba->pci_sdid = hba->pcidev->subsystem_device;
807 hba->pci_svid = hba->pcidev->subsystem_vendor;
808 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
809 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
810
811 bnx2i_identify_device(hba);
812 bnx2i_setup_host_queue_size(hba, shost);
813
814 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
815 hba->regview = ioremap_nocache(hba->netdev->base_addr,
816 BNX2_MQ_CONFIG2);
817 if (!hba->regview)
818 goto ioreg_map_err;
819 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
820 hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
821 if (!hba->regview)
822 goto ioreg_map_err;
823 }
824
825 if (bnx2i_setup_mp_bdt(hba))
826 goto mp_bdt_mem_err;
827
828 INIT_LIST_HEAD(&hba->ep_ofld_list);
829 INIT_LIST_HEAD(&hba->ep_active_list);
830 INIT_LIST_HEAD(&hba->ep_destroy_list);
831 rwlock_init(&hba->ep_rdwr_lock);
832
833 hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
834
835
836 hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
837
838 if (bnx2i_setup_free_cid_que(hba))
839 goto cid_que_err;
840
841
842 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
843 if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
844 hba->max_sqes = sq_size;
845 else
846 hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
847 } else {
848 if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
849 hba->max_sqes = sq_size;
850 else
851 hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
852 }
853
854 hba->max_rqes = rq_size;
855 hba->max_cqes = hba->max_sqes + rq_size;
856 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
857 if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
858 hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
859 } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
860 hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
861
862 hba->num_ccell = hba->max_sqes / 2;
863
864 spin_lock_init(&hba->lock);
865 mutex_init(&hba->net_dev_lock);
866 init_waitqueue_head(&hba->eh_wait);
867 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
868 hba->hba_shutdown_tmo = 30 * HZ;
869 hba->conn_teardown_tmo = 20 * HZ;
870 hba->conn_ctx_destroy_tmo = 6 * HZ;
871 } else {
872 hba->hba_shutdown_tmo = 20 * HZ;
873 hba->conn_teardown_tmo = 10 * HZ;
874 hba->conn_ctx_destroy_tmo = 2 * HZ;
875 }
876
877 if (iscsi_host_add(shost, &hba->pcidev->dev))
878 goto free_dump_mem;
879 return hba;
880
881free_dump_mem:
882 bnx2i_release_free_cid_que(hba);
883cid_que_err:
884 bnx2i_free_mp_bdt(hba);
885mp_bdt_mem_err:
886 if (hba->regview) {
887 iounmap(hba->regview);
888 hba->regview = NULL;
889 }
890ioreg_map_err:
891 pci_dev_put(hba->pcidev);
892 scsi_host_put(shost);
893 return NULL;
894}
895
896
897
898
899
900
901
902void bnx2i_free_hba(struct bnx2i_hba *hba)
903{
904 struct Scsi_Host *shost = hba->shost;
905
906 iscsi_host_remove(shost);
907 INIT_LIST_HEAD(&hba->ep_ofld_list);
908 INIT_LIST_HEAD(&hba->ep_active_list);
909 INIT_LIST_HEAD(&hba->ep_destroy_list);
910 pci_dev_put(hba->pcidev);
911
912 if (hba->regview) {
913 iounmap(hba->regview);
914 hba->regview = NULL;
915 }
916 bnx2i_free_mp_bdt(hba);
917 bnx2i_release_free_cid_que(hba);
918 iscsi_host_free(shost);
919}
920
921
922
923
924
925
926
927
928static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
929 struct bnx2i_conn *bnx2i_conn)
930{
931 if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
932 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
933 bnx2i_conn->gen_pdu.resp_bd_tbl,
934 bnx2i_conn->gen_pdu.resp_bd_dma);
935 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
936 }
937
938 if (bnx2i_conn->gen_pdu.req_bd_tbl) {
939 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
940 bnx2i_conn->gen_pdu.req_bd_tbl,
941 bnx2i_conn->gen_pdu.req_bd_dma);
942 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
943 }
944
945 if (bnx2i_conn->gen_pdu.resp_buf) {
946 dma_free_coherent(&hba->pcidev->dev,
947 ISCSI_DEF_MAX_RECV_SEG_LEN,
948 bnx2i_conn->gen_pdu.resp_buf,
949 bnx2i_conn->gen_pdu.resp_dma_addr);
950 bnx2i_conn->gen_pdu.resp_buf = NULL;
951 }
952
953 if (bnx2i_conn->gen_pdu.req_buf) {
954 dma_free_coherent(&hba->pcidev->dev,
955 ISCSI_DEF_MAX_RECV_SEG_LEN,
956 bnx2i_conn->gen_pdu.req_buf,
957 bnx2i_conn->gen_pdu.req_dma_addr);
958 bnx2i_conn->gen_pdu.req_buf = NULL;
959 }
960}
961
962
963
964
965
966
967
968
969static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
970 struct bnx2i_conn *bnx2i_conn)
971{
972
973 bnx2i_conn->gen_pdu.req_buf =
974 dma_alloc_coherent(&hba->pcidev->dev,
975 ISCSI_DEF_MAX_RECV_SEG_LEN,
976 &bnx2i_conn->gen_pdu.req_dma_addr,
977 GFP_KERNEL);
978 if (bnx2i_conn->gen_pdu.req_buf == NULL)
979 goto login_req_buf_failure;
980
981 bnx2i_conn->gen_pdu.req_buf_size = 0;
982 bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
983
984 bnx2i_conn->gen_pdu.resp_buf =
985 dma_alloc_coherent(&hba->pcidev->dev,
986 ISCSI_DEF_MAX_RECV_SEG_LEN,
987 &bnx2i_conn->gen_pdu.resp_dma_addr,
988 GFP_KERNEL);
989 if (bnx2i_conn->gen_pdu.resp_buf == NULL)
990 goto login_resp_buf_failure;
991
992 bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
993 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
994
995 bnx2i_conn->gen_pdu.req_bd_tbl =
996 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
997 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
998 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
999 goto login_req_bd_tbl_failure;
1000
1001 bnx2i_conn->gen_pdu.resp_bd_tbl =
1002 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
1003 &bnx2i_conn->gen_pdu.resp_bd_dma,
1004 GFP_KERNEL);
1005 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
1006 goto login_resp_bd_tbl_failure;
1007
1008 return 0;
1009
1010login_resp_bd_tbl_failure:
1011 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1012 bnx2i_conn->gen_pdu.req_bd_tbl,
1013 bnx2i_conn->gen_pdu.req_bd_dma);
1014 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
1015
1016login_req_bd_tbl_failure:
1017 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
1018 bnx2i_conn->gen_pdu.resp_buf,
1019 bnx2i_conn->gen_pdu.resp_dma_addr);
1020 bnx2i_conn->gen_pdu.resp_buf = NULL;
1021login_resp_buf_failure:
1022 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
1023 bnx2i_conn->gen_pdu.req_buf,
1024 bnx2i_conn->gen_pdu.req_dma_addr);
1025 bnx2i_conn->gen_pdu.req_buf = NULL;
1026login_req_buf_failure:
1027 iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
1028 "login resource alloc failed!!\n");
1029 return -ENOMEM;
1030
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
1042{
1043 struct iscsi_bd *bd_tbl;
1044
1045 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
1046
1047 bd_tbl->buffer_addr_hi =
1048 (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
1049 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
1050 bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
1051 bnx2i_conn->gen_pdu.req_buf;
1052 bd_tbl->reserved0 = 0;
1053 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1054 ISCSI_BD_FIRST_IN_BD_CHAIN;
1055
1056 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
1057 bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
1058 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
1059 bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
1060 bd_tbl->reserved0 = 0;
1061 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1062 ISCSI_BD_FIRST_IN_BD_CHAIN;
1063}
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1074{
1075 struct bnx2i_cmd *cmd = task->dd_data;
1076 struct bnx2i_conn *bnx2i_conn = cmd->conn;
1077 int rc = 0;
1078 char *buf;
1079 int data_len;
1080
1081 bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
1082 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1083 case ISCSI_OP_LOGIN:
1084 bnx2i_send_iscsi_login(bnx2i_conn, task);
1085 break;
1086 case ISCSI_OP_NOOP_OUT:
1087 data_len = bnx2i_conn->gen_pdu.req_buf_size;
1088 buf = bnx2i_conn->gen_pdu.req_buf;
1089 if (data_len)
1090 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1091 buf, data_len, 1);
1092 else
1093 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1094 NULL, 0, 1);
1095 break;
1096 case ISCSI_OP_LOGOUT:
1097 rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
1098 break;
1099 case ISCSI_OP_SCSI_TMFUNC:
1100 rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
1101 break;
1102 case ISCSI_OP_TEXT:
1103 rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
1104 break;
1105 default:
1106 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1107 "send_gen: unsupported op 0x%x\n",
1108 task->hdr->opcode);
1109 }
1110 return rc;
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
1124{
1125 u32 dword;
1126 int lpcnt;
1127 u8 *srcp;
1128 u32 *dstp;
1129 u32 scsi_lun[2];
1130
1131 int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
1132 cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
1133 cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
1134
1135 lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
1136 srcp = (u8 *) sc->cmnd;
1137 dstp = (u32 *) cmd->req.cdb;
1138 while (lpcnt--) {
1139 memcpy(&dword, (const void *) srcp, 4);
1140 *dstp = cpu_to_be32(dword);
1141 srcp += 4;
1142 dstp++;
1143 }
1144 if (sc->cmd_len & 0x3) {
1145 dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
1146 *dstp = cpu_to_be32(dword);
1147 }
1148}
1149
1150static void bnx2i_cleanup_task(struct iscsi_task *task)
1151{
1152 struct iscsi_conn *conn = task->conn;
1153 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1154 struct bnx2i_hba *hba = bnx2i_conn->hba;
1155
1156
1157
1158
1159 if (!task->sc || task->state == ISCSI_TASK_PENDING)
1160 return;
1161
1162
1163
1164 if (task->state == ISCSI_TASK_ABRT_TMF) {
1165 bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1166
1167 spin_unlock_bh(&conn->session->lock);
1168 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
1169 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
1170 spin_lock_bh(&conn->session->lock);
1171 }
1172 bnx2i_iscsi_unmap_sg_list(task->dd_data);
1173}
1174
1175
1176
1177
1178
1179
1180static int
1181bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1182{
1183 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1184 struct bnx2i_cmd *cmd = task->dd_data;
1185
1186 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1187
1188 bnx2i_setup_cmd_wqe_template(cmd);
1189 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1190 if (task->data_count) {
1191 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1192 task->data_count);
1193 bnx2i_conn->gen_pdu.req_wr_ptr =
1194 bnx2i_conn->gen_pdu.req_buf + task->data_count;
1195 }
1196 cmd->conn = conn->dd_data;
1197 cmd->scsi_cmd = NULL;
1198 return bnx2i_iscsi_send_generic_request(task);
1199}
1200
1201
1202
1203
1204
1205
1206
1207static int bnx2i_task_xmit(struct iscsi_task *task)
1208{
1209 struct iscsi_conn *conn = task->conn;
1210 struct iscsi_session *session = conn->session;
1211 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
1212 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1213 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1214 struct scsi_cmnd *sc = task->sc;
1215 struct bnx2i_cmd *cmd = task->dd_data;
1216 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
1217
1218 if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 >
1219 hba->max_sqes)
1220 return -ENOMEM;
1221
1222
1223
1224
1225 if (!sc)
1226 return bnx2i_mtask_xmit(conn, task);
1227
1228 bnx2i_setup_cmd_wqe_template(cmd);
1229 cmd->req.op_code = ISCSI_OP_SCSI_CMD;
1230 cmd->conn = bnx2i_conn;
1231 cmd->scsi_cmd = sc;
1232 cmd->req.total_data_transfer_length = scsi_bufflen(sc);
1233 cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
1234
1235 bnx2i_iscsi_map_sg_list(cmd);
1236 bnx2i_cpy_scsi_cdb(sc, cmd);
1237
1238 cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
1239 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1240 cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
1241 cmd->req.itt = task->itt |
1242 (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1243 bnx2i_setup_write_cmd_bd_info(task);
1244 } else {
1245 if (scsi_bufflen(sc))
1246 cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
1247 cmd->req.itt = task->itt |
1248 (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1249 }
1250
1251 cmd->req.num_bds = cmd->io_tbl.bd_valid;
1252 if (!cmd->io_tbl.bd_valid) {
1253 cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
1254 cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
1255 cmd->req.num_bds = 1;
1256 }
1257
1258 bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
1259 return 0;
1260}
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270static struct iscsi_cls_session *
1271bnx2i_session_create(struct iscsi_endpoint *ep,
1272 uint16_t cmds_max, uint16_t qdepth,
1273 uint32_t initial_cmdsn)
1274{
1275 struct Scsi_Host *shost;
1276 struct iscsi_cls_session *cls_session;
1277 struct bnx2i_hba *hba;
1278 struct bnx2i_endpoint *bnx2i_ep;
1279
1280 if (!ep) {
1281 printk(KERN_ERR "bnx2i: missing ep.\n");
1282 return NULL;
1283 }
1284
1285 bnx2i_ep = ep->dd_data;
1286 shost = bnx2i_ep->hba->shost;
1287 hba = iscsi_host_priv(shost);
1288 if (bnx2i_adapter_ready(hba))
1289 return NULL;
1290
1291
1292
1293
1294
1295 if (cmds_max > hba->max_sqes)
1296 cmds_max = hba->max_sqes;
1297 else if (cmds_max < BNX2I_SQ_WQES_MIN)
1298 cmds_max = BNX2I_SQ_WQES_MIN;
1299
1300 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
1301 cmds_max, 0, sizeof(struct bnx2i_cmd),
1302 initial_cmdsn, ISCSI_MAX_TARGET);
1303 if (!cls_session)
1304 return NULL;
1305
1306 if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
1307 goto session_teardown;
1308 return cls_session;
1309
1310session_teardown:
1311 iscsi_session_teardown(cls_session);
1312 return NULL;
1313}
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
1324{
1325 struct iscsi_session *session = cls_session->dd_data;
1326 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1327 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1328
1329 bnx2i_destroy_cmd_pool(hba, session);
1330 iscsi_session_teardown(cls_session);
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341static struct iscsi_cls_conn *
1342bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
1343{
1344 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1345 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1346 struct bnx2i_conn *bnx2i_conn;
1347 struct iscsi_cls_conn *cls_conn;
1348 struct iscsi_conn *conn;
1349
1350 cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
1351 cid);
1352 if (!cls_conn)
1353 return NULL;
1354 conn = cls_conn->dd_data;
1355
1356 bnx2i_conn = conn->dd_data;
1357 bnx2i_conn->cls_conn = cls_conn;
1358 bnx2i_conn->hba = hba;
1359
1360 atomic_set(&bnx2i_conn->work_cnt, 0);
1361
1362
1363 bnx2i_conn->ep = NULL;
1364 init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
1365
1366 if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
1367 iscsi_conn_printk(KERN_ALERT, conn,
1368 "conn_new: login resc alloc failed!!\n");
1369 goto free_conn;
1370 }
1371
1372 return cls_conn;
1373
1374free_conn:
1375 iscsi_conn_teardown(cls_conn);
1376 return NULL;
1377}
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1392 struct iscsi_cls_conn *cls_conn,
1393 uint64_t transport_fd, int is_leading)
1394{
1395 struct iscsi_conn *conn = cls_conn->dd_data;
1396 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1397 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1398 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1399 struct bnx2i_endpoint *bnx2i_ep;
1400 struct iscsi_endpoint *ep;
1401 int ret_code;
1402
1403 ep = iscsi_lookup_endpoint(transport_fd);
1404 if (!ep)
1405 return -EINVAL;
1406
1407
1408
1409
1410 if (bnx2i_adapter_ready(hba))
1411 return -EIO;
1412
1413 bnx2i_ep = ep->dd_data;
1414 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
1415 (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
1416
1417 return -EINVAL;
1418
1419 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1420 return -EINVAL;
1421
1422 if (bnx2i_ep->hba != hba) {
1423
1424
1425 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1426 "conn bind, ep=0x%p (%s) does not",
1427 bnx2i_ep, bnx2i_ep->hba->netdev->name);
1428 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1429 "belong to hba (%s)\n",
1430 hba->netdev->name);
1431 return -EEXIST;
1432 }
1433 bnx2i_ep->conn = bnx2i_conn;
1434 bnx2i_conn->ep = bnx2i_ep;
1435 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
1436 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
1437
1438 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
1439 bnx2i_ep->ep_iscsi_cid);
1440
1441
1442
1443
1444 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1445 bnx2i_put_rq_buf(bnx2i_conn, 0);
1446
1447 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1448 return ret_code;
1449}
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
1460{
1461 struct iscsi_conn *conn = cls_conn->dd_data;
1462 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1463 struct Scsi_Host *shost;
1464 struct bnx2i_hba *hba;
1465 struct bnx2i_work *work, *tmp;
1466 unsigned cpu = 0;
1467 struct bnx2i_percpu_s *p;
1468
1469 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
1470 hba = iscsi_host_priv(shost);
1471
1472 bnx2i_conn_free_login_resources(hba, bnx2i_conn);
1473
1474 if (atomic_read(&bnx2i_conn->work_cnt)) {
1475 for_each_online_cpu(cpu) {
1476 p = &per_cpu(bnx2i_percpu, cpu);
1477 spin_lock_bh(&p->p_work_lock);
1478 list_for_each_entry_safe(work, tmp,
1479 &p->work_list, list) {
1480 if (work->session == conn->session &&
1481 work->bnx2i_conn == bnx2i_conn) {
1482 list_del_init(&work->list);
1483 kfree(work);
1484 if (!atomic_dec_and_test(
1485 &bnx2i_conn->work_cnt))
1486 break;
1487 }
1488 }
1489 spin_unlock_bh(&p->p_work_lock);
1490 }
1491 }
1492
1493 iscsi_conn_teardown(cls_conn);
1494}
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
1506 enum iscsi_param param, char *buf)
1507{
1508 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
1509 struct bnx2i_hba *hba = bnx2i_ep->hba;
1510 int len = -ENOTCONN;
1511
1512 if (!hba)
1513 return -ENOTCONN;
1514
1515 switch (param) {
1516 case ISCSI_PARAM_CONN_PORT:
1517 mutex_lock(&hba->net_dev_lock);
1518 if (bnx2i_ep->cm_sk)
1519 len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
1520 mutex_unlock(&hba->net_dev_lock);
1521 break;
1522 case ISCSI_PARAM_CONN_ADDRESS:
1523 mutex_lock(&hba->net_dev_lock);
1524 if (bnx2i_ep->cm_sk)
1525 len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
1526 mutex_unlock(&hba->net_dev_lock);
1527 break;
1528 default:
1529 return -ENOSYS;
1530 }
1531
1532 return len;
1533}
1534
1535
1536
1537
1538
1539
1540
1541static int bnx2i_host_get_param(struct Scsi_Host *shost,
1542 enum iscsi_host_param param, char *buf)
1543{
1544 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1545 int len = 0;
1546
1547 switch (param) {
1548 case ISCSI_HOST_PARAM_HWADDRESS:
1549 len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
1550 break;
1551 case ISCSI_HOST_PARAM_NETDEV_NAME:
1552 len = sprintf(buf, "%s\n", hba->netdev->name);
1553 break;
1554 case ISCSI_HOST_PARAM_IPADDRESS: {
1555 struct list_head *active_list = &hba->ep_active_list;
1556
1557 read_lock_bh(&hba->ep_rdwr_lock);
1558 if (!list_empty(&hba->ep_active_list)) {
1559 struct bnx2i_endpoint *bnx2i_ep;
1560 struct cnic_sock *csk;
1561
1562 bnx2i_ep = list_first_entry(active_list,
1563 struct bnx2i_endpoint,
1564 link);
1565 csk = bnx2i_ep->cm_sk;
1566 if (test_bit(SK_F_IPV6, &csk->flags))
1567 len = sprintf(buf, "%pI6\n", csk->src_ip);
1568 else
1569 len = sprintf(buf, "%pI4\n", csk->src_ip);
1570 }
1571 read_unlock_bh(&hba->ep_rdwr_lock);
1572 break;
1573 }
1574 default:
1575 return iscsi_host_get_param(shost, param, buf);
1576 }
1577 return len;
1578}
1579
1580
1581
1582
1583
1584
1585
1586static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
1587{
1588 struct iscsi_conn *conn = cls_conn->dd_data;
1589 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1590
1591 bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
1592 bnx2i_update_iscsi_conn(conn);
1593
1594
1595
1596
1597
1598 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
1599 bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1600 bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
1601 add_timer(&bnx2i_conn->ep->ofld_timer);
1602
1603 wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
1604 bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
1605
1606 if (signal_pending(current))
1607 flush_signals(current);
1608 del_timer_sync(&bnx2i_conn->ep->ofld_timer);
1609
1610 iscsi_conn_start(cls_conn);
1611 return 0;
1612}
1613
1614
1615
1616
1617
1618
1619
1620static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1621 struct iscsi_stats *stats)
1622{
1623 struct iscsi_conn *conn = cls_conn->dd_data;
1624
1625 stats->txdata_octets = conn->txdata_octets;
1626 stats->rxdata_octets = conn->rxdata_octets;
1627 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1628 stats->dataout_pdus = conn->dataout_pdus_cnt;
1629 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1630 stats->datain_pdus = conn->datain_pdus_cnt;
1631 stats->r2t_pdus = conn->r2t_pdus_cnt;
1632 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1633 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1634 stats->custom_length = 3;
1635 strcpy(stats->custom[2].desc, "eh_abort_cnt");
1636 stats->custom[2].value = conn->eh_abort_cnt;
1637 stats->digest_err = 0;
1638 stats->timeout_err = 0;
1639 stats->custom_length = 0;
1640}
1641
1642
1643
1644
1645
1646
1647
1648
1649static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
1650{
1651 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1652 struct bnx2i_hba *hba;
1653 struct cnic_dev *cnic = NULL;
1654
1655 hba = get_adapter_list_head();
1656 if (hba && hba->cnic)
1657 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
1658 if (!cnic) {
1659 printk(KERN_ALERT "bnx2i: no route,"
1660 "can't connect using cnic\n");
1661 goto no_nx2_route;
1662 }
1663 hba = bnx2i_find_hba_for_cnic(cnic);
1664 if (!hba)
1665 goto no_nx2_route;
1666
1667 if (bnx2i_adapter_ready(hba)) {
1668 printk(KERN_ALERT "bnx2i: check route, hba not found\n");
1669 goto no_nx2_route;
1670 }
1671 if (hba->netdev->mtu > hba->mtu_supported) {
1672 printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
1673 hba->netdev->name, hba->netdev->mtu);
1674 printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
1675 hba->mtu_supported);
1676 goto no_nx2_route;
1677 }
1678 return hba;
1679no_nx2_route:
1680 return NULL;
1681}
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1692 struct bnx2i_endpoint *ep)
1693{
1694 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk)
1695 hba->cnic->cm_destroy(ep->cm_sk);
1696
1697 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
1698 ep->state == EP_STATE_DISCONN_TIMEDOUT) {
1699 if (ep->conn && ep->conn->cls_conn &&
1700 ep->conn->cls_conn->dd_data) {
1701 struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
1702
1703
1704 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1705 }
1706
1707
1708
1709
1710 printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, "
1711 "please submit GRC Dump, NW/PCIe trace, "
1712 "driver msgs to developers for analysis\n",
1713 hba->netdev->name);
1714 }
1715
1716 ep->state = EP_STATE_CLEANUP_START;
1717 init_timer(&ep->ofld_timer);
1718 ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies;
1719 ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1720 ep->ofld_timer.data = (unsigned long) ep;
1721 add_timer(&ep->ofld_timer);
1722
1723 bnx2i_ep_destroy_list_add(hba, ep);
1724
1725
1726 if (bnx2i_send_conn_destroy(hba, ep))
1727 ep->state = EP_STATE_CLEANUP_CMPL;
1728
1729 wait_event_interruptible(ep->ofld_wait,
1730 (ep->state != EP_STATE_CLEANUP_START));
1731
1732 if (signal_pending(current))
1733 flush_signals(current);
1734 del_timer_sync(&ep->ofld_timer);
1735
1736 bnx2i_ep_destroy_list_del(hba, ep);
1737
1738 if (ep->state != EP_STATE_CLEANUP_CMPL)
1739
1740 printk(KERN_ALERT "bnx2i - conn destroy failed\n");
1741
1742 return 0;
1743}
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1759 struct sockaddr *dst_addr,
1760 int non_blocking)
1761{
1762 u32 iscsi_cid = BNX2I_CID_RESERVED;
1763 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1764 struct sockaddr_in6 *desti6;
1765 struct bnx2i_endpoint *bnx2i_ep;
1766 struct bnx2i_hba *hba;
1767 struct cnic_dev *cnic;
1768 struct cnic_sockaddr saddr;
1769 struct iscsi_endpoint *ep;
1770 int rc = 0;
1771
1772 if (shost) {
1773
1774 hba = iscsi_host_priv(shost);
1775 } else
1776
1777
1778
1779
1780 hba = bnx2i_check_route(dst_addr);
1781
1782 if (!hba) {
1783 rc = -EINVAL;
1784 goto nohba;
1785 }
1786 mutex_lock(&hba->net_dev_lock);
1787
1788 if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) {
1789 rc = -EPERM;
1790 goto check_busy;
1791 }
1792 cnic = hba->cnic;
1793 ep = bnx2i_alloc_ep(hba);
1794 if (!ep) {
1795 rc = -ENOMEM;
1796 goto check_busy;
1797 }
1798 bnx2i_ep = ep->dd_data;
1799
1800 atomic_set(&bnx2i_ep->num_active_cmds, 0);
1801 iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1802 if (iscsi_cid == -1) {
1803 printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
1804 "iscsi cid\n", hba->netdev->name);
1805 rc = -ENOMEM;
1806 bnx2i_free_ep(ep);
1807 goto check_busy;
1808 }
1809 bnx2i_ep->hba_age = hba->age;
1810
1811 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
1812 if (rc != 0) {
1813 printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error"
1814 "\n", hba->netdev->name);
1815 rc = -ENOMEM;
1816 goto qp_resc_err;
1817 }
1818
1819 bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
1820 bnx2i_ep->state = EP_STATE_OFLD_START;
1821 bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
1822
1823 init_timer(&bnx2i_ep->ofld_timer);
1824 bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
1825 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
1826 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
1827 add_timer(&bnx2i_ep->ofld_timer);
1828
1829 if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) {
1830 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1831 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1832 hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1833 rc = -EBUSY;
1834 } else
1835 rc = -ENOSPC;
1836 printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe"
1837 "\n", hba->netdev->name);
1838 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1839 goto conn_failed;
1840 }
1841
1842
1843 wait_event_interruptible(bnx2i_ep->ofld_wait,
1844 bnx2i_ep->state != EP_STATE_OFLD_START);
1845
1846 if (signal_pending(current))
1847 flush_signals(current);
1848 del_timer_sync(&bnx2i_ep->ofld_timer);
1849
1850 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1851
1852 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
1853 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1854 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1855 hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1856 rc = -EBUSY;
1857 } else
1858 rc = -ENOSPC;
1859 goto conn_failed;
1860 }
1861
1862 rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
1863 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
1864 if (rc) {
1865 rc = -EINVAL;
1866
1867 goto release_ep;
1868 }
1869
1870 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
1871 bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
1872 clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
1873
1874 memset(&saddr, 0, sizeof(saddr));
1875 if (dst_addr->sa_family == AF_INET) {
1876 desti = (struct sockaddr_in *) dst_addr;
1877 saddr.remote.v4 = *desti;
1878 saddr.local.v4.sin_family = desti->sin_family;
1879 } else if (dst_addr->sa_family == AF_INET6) {
1880 desti6 = (struct sockaddr_in6 *) dst_addr;
1881 saddr.remote.v6 = *desti6;
1882 saddr.local.v6.sin6_family = desti6->sin6_family;
1883 }
1884
1885 bnx2i_ep->timestamp = jiffies;
1886 bnx2i_ep->state = EP_STATE_CONNECT_START;
1887 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1888 rc = -EINVAL;
1889 goto conn_failed;
1890 } else
1891 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
1892 if (rc)
1893 goto release_ep;
1894
1895 bnx2i_ep_active_list_add(hba, bnx2i_ep);
1896
1897 if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
1898 goto del_active_ep;
1899
1900 mutex_unlock(&hba->net_dev_lock);
1901 return ep;
1902
1903del_active_ep:
1904 bnx2i_ep_active_list_del(hba, bnx2i_ep);
1905release_ep:
1906 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1907 mutex_unlock(&hba->net_dev_lock);
1908 return ERR_PTR(rc);
1909 }
1910conn_failed:
1911 bnx2i_free_qp_resc(hba, bnx2i_ep);
1912qp_resc_err:
1913 bnx2i_free_ep(ep);
1914check_busy:
1915 mutex_unlock(&hba->net_dev_lock);
1916nohba:
1917 return ERR_PTR(rc);
1918}
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1929{
1930 struct bnx2i_endpoint *bnx2i_ep;
1931 int rc = 0;
1932
1933 bnx2i_ep = ep->dd_data;
1934 if ((bnx2i_ep->state == EP_STATE_IDLE) ||
1935 (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
1936 (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1937 return -1;
1938 if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
1939 return 1;
1940
1941 rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
1942 ((bnx2i_ep->state ==
1943 EP_STATE_OFLD_FAILED) ||
1944 (bnx2i_ep->state ==
1945 EP_STATE_CONNECT_FAILED) ||
1946 (bnx2i_ep->state ==
1947 EP_STATE_CONNECT_COMPL)),
1948 msecs_to_jiffies(timeout_ms));
1949 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED)
1950 rc = -1;
1951
1952 if (rc > 0)
1953 return 1;
1954 else if (!rc)
1955 return 0;
1956 else
1957 return rc;
1958}
1959
1960
1961
1962
1963
1964
1965
1966
1967static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1968{
1969 int ret;
1970 int cnic_dev_10g = 0;
1971
1972 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1973 cnic_dev_10g = 1;
1974
1975 switch (bnx2i_ep->state) {
1976 case EP_STATE_CLEANUP_FAILED:
1977 case EP_STATE_OFLD_FAILED:
1978 case EP_STATE_DISCONN_TIMEDOUT:
1979 ret = 0;
1980 break;
1981 case EP_STATE_CONNECT_START:
1982 case EP_STATE_CONNECT_FAILED:
1983 case EP_STATE_CONNECT_COMPL:
1984 case EP_STATE_ULP_UPDATE_START:
1985 case EP_STATE_ULP_UPDATE_COMPL:
1986 case EP_STATE_TCP_FIN_RCVD:
1987 case EP_STATE_LOGOUT_SENT:
1988 case EP_STATE_LOGOUT_RESP_RCVD:
1989 case EP_STATE_ULP_UPDATE_FAILED:
1990 ret = 1;
1991 break;
1992 case EP_STATE_TCP_RST_RCVD:
1993 if (cnic_dev_10g)
1994 ret = 0;
1995 else
1996 ret = 1;
1997 break;
1998 default:
1999 ret = 0;
2000 }
2001
2002 return ret;
2003}
2004
2005
2006
2007
2008
2009
2010
2011
2012int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
2013{
2014 struct bnx2i_hba *hba = bnx2i_ep->hba;
2015 struct cnic_dev *cnic;
2016 struct iscsi_session *session = NULL;
2017 struct iscsi_conn *conn = NULL;
2018 int ret = 0;
2019 int close = 0;
2020 int close_ret = 0;
2021
2022 if (!hba)
2023 return 0;
2024
2025 cnic = hba->cnic;
2026 if (!cnic)
2027 return 0;
2028
2029 if (bnx2i_ep->state == EP_STATE_IDLE ||
2030 bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
2031 return 0;
2032
2033 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
2034 goto destroy_conn;
2035
2036 if (bnx2i_ep->conn) {
2037 conn = bnx2i_ep->conn->cls_conn->dd_data;
2038 session = conn->session;
2039 }
2040
2041 init_timer(&bnx2i_ep->ofld_timer);
2042 bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies;
2043 bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
2044 bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
2045 add_timer(&bnx2i_ep->ofld_timer);
2046
2047 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
2048 goto out;
2049
2050 if (session) {
2051 spin_lock_bh(&session->lock);
2052 if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
2053 if (session->state == ISCSI_STATE_LOGGING_OUT) {
2054 if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
2055
2056 printk(KERN_ALERT "bnx2i (%s): WARNING"
2057 " logout response was not "
2058 "received!\n",
2059 bnx2i_ep->hba->netdev->name);
2060 } else if (bnx2i_ep->state ==
2061 EP_STATE_LOGOUT_RESP_RCVD)
2062 close = 1;
2063 }
2064 } else
2065 close = 1;
2066
2067 spin_unlock_bh(&session->lock);
2068 }
2069
2070 bnx2i_ep->state = EP_STATE_DISCONN_START;
2071
2072 if (close)
2073 close_ret = cnic->cm_close(bnx2i_ep->cm_sk);
2074 else
2075 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
2076
2077 if (close_ret)
2078 printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n",
2079 bnx2i_ep->hba->netdev->name, close, close_ret);
2080 else
2081
2082 wait_event_interruptible(bnx2i_ep->ofld_wait,
2083 bnx2i_ep->state != EP_STATE_DISCONN_START);
2084
2085 if (signal_pending(current))
2086 flush_signals(current);
2087 del_timer_sync(&bnx2i_ep->ofld_timer);
2088
2089destroy_conn:
2090 bnx2i_ep_active_list_del(hba, bnx2i_ep);
2091 if (bnx2i_tear_down_conn(hba, bnx2i_ep))
2092 return -EINVAL;
2093out:
2094 bnx2i_ep->state = EP_STATE_IDLE;
2095 return ret;
2096}
2097
2098
2099
2100
2101
2102
2103
2104
2105static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
2106{
2107 struct bnx2i_endpoint *bnx2i_ep;
2108 struct bnx2i_conn *bnx2i_conn = NULL;
2109 struct iscsi_conn *conn = NULL;
2110 struct bnx2i_hba *hba;
2111
2112 bnx2i_ep = ep->dd_data;
2113
2114
2115
2116
2117
2118 while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
2119 !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
2120 msleep(250);
2121
2122 if (bnx2i_ep->conn) {
2123 bnx2i_conn = bnx2i_ep->conn;
2124 conn = bnx2i_conn->cls_conn->dd_data;
2125 iscsi_suspend_queue(conn);
2126 }
2127 hba = bnx2i_ep->hba;
2128
2129 mutex_lock(&hba->net_dev_lock);
2130
2131 if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
2132 goto out;
2133
2134 if (bnx2i_ep->state == EP_STATE_IDLE)
2135 goto free_resc;
2136
2137 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
2138 (bnx2i_ep->hba_age != hba->age)) {
2139 bnx2i_ep_active_list_del(hba, bnx2i_ep);
2140 goto free_resc;
2141 }
2142
2143
2144 if (bnx2i_hw_ep_disconnect(bnx2i_ep)) {
2145 mutex_unlock(&hba->net_dev_lock);
2146 return;
2147 }
2148free_resc:
2149 bnx2i_free_qp_resc(hba, bnx2i_ep);
2150
2151 if (bnx2i_conn)
2152 bnx2i_conn->ep = NULL;
2153
2154 bnx2i_free_ep(ep);
2155out:
2156 mutex_unlock(&hba->net_dev_lock);
2157
2158 wake_up_interruptible(&hba->eh_wait);
2159}
2160
2161
2162
2163
2164
2165
2166
2167static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
2168{
2169 struct bnx2i_hba *hba = iscsi_host_priv(shost);
2170 char *buf = (char *) params;
2171 u16 len = sizeof(*params);
2172
2173
2174 hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
2175 len);
2176
2177 return 0;
2178}
2179
2180static umode_t bnx2i_attr_is_visible(int param_type, int param)
2181{
2182 switch (param_type) {
2183 case ISCSI_HOST_PARAM:
2184 switch (param) {
2185 case ISCSI_HOST_PARAM_NETDEV_NAME:
2186 case ISCSI_HOST_PARAM_HWADDRESS:
2187 case ISCSI_HOST_PARAM_IPADDRESS:
2188 return S_IRUGO;
2189 default:
2190 return 0;
2191 }
2192 case ISCSI_PARAM:
2193 switch (param) {
2194 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2195 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2196 case ISCSI_PARAM_HDRDGST_EN:
2197 case ISCSI_PARAM_DATADGST_EN:
2198 case ISCSI_PARAM_CONN_ADDRESS:
2199 case ISCSI_PARAM_CONN_PORT:
2200 case ISCSI_PARAM_EXP_STATSN:
2201 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2202 case ISCSI_PARAM_PERSISTENT_PORT:
2203 case ISCSI_PARAM_PING_TMO:
2204 case ISCSI_PARAM_RECV_TMO:
2205 case ISCSI_PARAM_INITIAL_R2T_EN:
2206 case ISCSI_PARAM_MAX_R2T:
2207 case ISCSI_PARAM_IMM_DATA_EN:
2208 case ISCSI_PARAM_FIRST_BURST:
2209 case ISCSI_PARAM_MAX_BURST:
2210 case ISCSI_PARAM_PDU_INORDER_EN:
2211 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2212 case ISCSI_PARAM_ERL:
2213 case ISCSI_PARAM_TARGET_NAME:
2214 case ISCSI_PARAM_TPGT:
2215 case ISCSI_PARAM_USERNAME:
2216 case ISCSI_PARAM_PASSWORD:
2217 case ISCSI_PARAM_USERNAME_IN:
2218 case ISCSI_PARAM_PASSWORD_IN:
2219 case ISCSI_PARAM_FAST_ABORT:
2220 case ISCSI_PARAM_ABORT_TMO:
2221 case ISCSI_PARAM_LU_RESET_TMO:
2222 case ISCSI_PARAM_TGT_RESET_TMO:
2223 case ISCSI_PARAM_IFACE_NAME:
2224 case ISCSI_PARAM_INITIATOR_NAME:
2225 return S_IRUGO;
2226 default:
2227 return 0;
2228 }
2229 }
2230
2231 return 0;
2232}
2233
2234
2235
2236
2237
2238static struct scsi_host_template bnx2i_host_template = {
2239 .module = THIS_MODULE,
2240 .name = "Broadcom Offload iSCSI Initiator",
2241 .proc_name = "bnx2i",
2242 .queuecommand = iscsi_queuecommand,
2243 .eh_abort_handler = iscsi_eh_abort,
2244 .eh_device_reset_handler = iscsi_eh_device_reset,
2245 .eh_target_reset_handler = iscsi_eh_recover_target,
2246 .change_queue_depth = iscsi_change_queue_depth,
2247 .can_queue = 2048,
2248 .max_sectors = 127,
2249 .cmd_per_lun = 128,
2250 .this_id = -1,
2251 .use_clustering = ENABLE_CLUSTERING,
2252 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2253 .shost_attrs = bnx2i_dev_attributes,
2254};
2255
2256struct iscsi_transport bnx2i_iscsi_transport = {
2257 .owner = THIS_MODULE,
2258 .name = "bnx2i",
2259 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
2260 CAP_MULTI_R2T | CAP_DATADGST |
2261 CAP_DATA_PATH_OFFLOAD |
2262 CAP_TEXT_NEGO,
2263 .create_session = bnx2i_session_create,
2264 .destroy_session = bnx2i_session_destroy,
2265 .create_conn = bnx2i_conn_create,
2266 .bind_conn = bnx2i_conn_bind,
2267 .destroy_conn = bnx2i_conn_destroy,
2268 .attr_is_visible = bnx2i_attr_is_visible,
2269 .set_param = iscsi_set_param,
2270 .get_conn_param = iscsi_conn_get_param,
2271 .get_session_param = iscsi_session_get_param,
2272 .get_host_param = bnx2i_host_get_param,
2273 .start_conn = bnx2i_conn_start,
2274 .stop_conn = iscsi_conn_stop,
2275 .send_pdu = iscsi_conn_send_pdu,
2276 .xmit_task = bnx2i_task_xmit,
2277 .get_stats = bnx2i_conn_get_stats,
2278
2279 .get_ep_param = bnx2i_ep_get_param,
2280 .ep_connect = bnx2i_ep_connect,
2281 .ep_poll = bnx2i_ep_poll,
2282 .ep_disconnect = bnx2i_ep_disconnect,
2283 .set_path = bnx2i_nl_set_path,
2284
2285 .session_recovery_timedout = iscsi_session_recovery_timedout,
2286 .cleanup_task = bnx2i_cleanup_task,
2287};
2288