1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/slab.h>
19#include <scsi/scsi_tcq.h>
20#include <scsi/libiscsi.h>
21#include "bnx2i.h"
22
23struct scsi_transport_template *bnx2i_scsi_xport_template;
24struct iscsi_transport bnx2i_iscsi_transport;
25static struct scsi_host_template bnx2i_host_template;
26
27
28
29
30static DEFINE_SPINLOCK(bnx2i_resc_lock);
31
32DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
33
34static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
35{
36 int retval = 0;
37
38 if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
39 test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
40 test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
41 retval = -EPERM;
42 return retval;
43}
44
45
46
47
48
49
50
51
52
53
54
55
56static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
57 u32 *start_bd_off, u32 *start_bd_idx)
58{
59 struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
60 u32 cur_offset = 0;
61 u32 cur_bd_idx = 0;
62
63 if (buf_off) {
64 while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
65 cur_offset += bd_tbl->buffer_length;
66 cur_bd_idx++;
67 bd_tbl++;
68 }
69 }
70
71 *start_bd_off = buf_off - cur_offset;
72 *start_bd_idx = cur_bd_idx;
73}
74
75
76
77
78
79
80
81
82
83
84static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
85{
86 struct bnx2i_cmd *cmd = task->dd_data;
87 u32 start_bd_offset;
88 u32 start_bd_idx;
89 u32 buffer_offset = 0;
90 u32 cmd_len = cmd->req.total_data_transfer_length;
91
92
93
94
95 if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
96 return;
97
98
99 buffer_offset += task->imm_count;
100 if (task->imm_count == cmd_len)
101 return;
102
103 if (iscsi_task_has_unsol_data(task)) {
104 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
105 &start_bd_offset, &start_bd_idx);
106 cmd->req.ud_buffer_offset = start_bd_offset;
107 cmd->req.ud_start_bd_index = start_bd_idx;
108 buffer_offset += task->unsol_r2t.data_length;
109 }
110
111 if (buffer_offset != cmd_len) {
112 bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
113 &start_bd_offset, &start_bd_idx);
114 if ((start_bd_offset > task->conn->session->first_burst) ||
115 (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
116 int i = 0;
117
118 iscsi_conn_printk(KERN_ALERT, task->conn,
119 "bnx2i- error, buf offset 0x%x "
120 "bd_valid %d use_sg %d\n",
121 buffer_offset, cmd->io_tbl.bd_valid,
122 scsi_sg_count(cmd->scsi_cmd));
123 for (i = 0; i < cmd->io_tbl.bd_valid; i++)
124 iscsi_conn_printk(KERN_ALERT, task->conn,
125 "bnx2i err, bd[%d]: len %x\n",
126 i, cmd->io_tbl.bd_tbl[i].\
127 buffer_length);
128 }
129 cmd->req.sd_buffer_offset = start_bd_offset;
130 cmd->req.sd_start_bd_index = start_bd_idx;
131 }
132}
133
134
135
136
137
138
139
140
141
142
143static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
144{
145 struct scsi_cmnd *sc = cmd->scsi_cmd;
146 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
147 struct scatterlist *sg;
148 int byte_count = 0;
149 int bd_count = 0;
150 int sg_count;
151 int sg_len;
152 u64 addr;
153 int i;
154
155 BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
156
157 sg_count = scsi_dma_map(sc);
158
159 scsi_for_each_sg(sc, sg, sg_count, i) {
160 sg_len = sg_dma_len(sg);
161 addr = (u64) sg_dma_address(sg);
162 bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
163 bd[bd_count].buffer_addr_hi = addr >> 32;
164 bd[bd_count].buffer_length = sg_len;
165 bd[bd_count].flags = 0;
166 if (bd_count == 0)
167 bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
168
169 byte_count += sg_len;
170 bd_count++;
171 }
172
173 if (bd_count)
174 bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
175
176 BUG_ON(byte_count != scsi_bufflen(sc));
177 return bd_count;
178}
179
180
181
182
183
184
185
186static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
187{
188 int bd_count;
189
190 bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
191 if (!bd_count) {
192 struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
193
194 bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
195 bd[0].buffer_length = bd[0].flags = 0;
196 }
197 cmd->io_tbl.bd_valid = bd_count;
198}
199
200
201
202
203
204
205
206
207void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
208{
209 struct scsi_cmnd *sc = cmd->scsi_cmd;
210
211 if (cmd->io_tbl.bd_valid && sc) {
212 scsi_dma_unmap(sc);
213 cmd->io_tbl.bd_valid = 0;
214 }
215}
216
217static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
218{
219 memset(&cmd->req, 0x00, sizeof(cmd->req));
220 cmd->req.op_code = 0xFF;
221 cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
222 cmd->req.bd_list_addr_hi =
223 (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
224
225}
226
227
228
229
230
231
232
233
234
235
236
237
238static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
239 struct bnx2i_conn *bnx2i_conn,
240 u32 iscsi_cid)
241{
242 if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
243 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
244 "conn bind - entry #%d not free\n", iscsi_cid);
245 return -EBUSY;
246 }
247
248 hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
249 return 0;
250}
251
252
253
254
255
256
257
258struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
259 u16 iscsi_cid)
260{
261 if (!hba->cid_que.conn_cid_tbl) {
262 printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
263 return NULL;
264
265 } else if (iscsi_cid >= hba->max_active_conns) {
266 printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
267 return NULL;
268 }
269 return hba->cid_que.conn_cid_tbl[iscsi_cid];
270}
271
272
273
274
275
276
277static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
278{
279 int idx;
280
281 if (!hba->cid_que.cid_free_cnt)
282 return -1;
283
284 idx = hba->cid_que.cid_q_cons_idx;
285 hba->cid_que.cid_q_cons_idx++;
286 if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
287 hba->cid_que.cid_q_cons_idx = 0;
288
289 hba->cid_que.cid_free_cnt--;
290 return hba->cid_que.cid_que[idx];
291}
292
293
294
295
296
297
298
299static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
300{
301 int idx;
302
303 if (iscsi_cid == (u16) -1)
304 return;
305
306 hba->cid_que.cid_free_cnt++;
307
308 idx = hba->cid_que.cid_q_prod_idx;
309 hba->cid_que.cid_que[idx] = iscsi_cid;
310 hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
311 hba->cid_que.cid_q_prod_idx++;
312 if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
313 hba->cid_que.cid_q_prod_idx = 0;
314}
315
316
317
318
319
320
321
322
323
324static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
325{
326 int mem_size;
327 int i;
328
329 mem_size = hba->max_active_conns * sizeof(u32);
330 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
331
332 hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
333 if (!hba->cid_que.cid_que_base)
334 return -ENOMEM;
335
336 mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
337 mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
338 hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
339 if (!hba->cid_que.conn_cid_tbl) {
340 kfree(hba->cid_que.cid_que_base);
341 hba->cid_que.cid_que_base = NULL;
342 return -ENOMEM;
343 }
344
345 hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
346 hba->cid_que.cid_q_prod_idx = 0;
347 hba->cid_que.cid_q_cons_idx = 0;
348 hba->cid_que.cid_q_max_idx = hba->max_active_conns;
349 hba->cid_que.cid_free_cnt = hba->max_active_conns;
350
351 for (i = 0; i < hba->max_active_conns; i++) {
352 hba->cid_que.cid_que[i] = i;
353 hba->cid_que.conn_cid_tbl[i] = NULL;
354 }
355 return 0;
356}
357
358
359
360
361
362
363static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
364{
365 kfree(hba->cid_que.cid_que_base);
366 hba->cid_que.cid_que_base = NULL;
367
368 kfree(hba->cid_que.conn_cid_tbl);
369 hba->cid_que.conn_cid_tbl = NULL;
370}
371
372
373
374
375
376
377
378
379
380
381static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
382{
383 struct iscsi_endpoint *ep;
384 struct bnx2i_endpoint *bnx2i_ep;
385 u32 ec_div;
386
387 ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
388 if (!ep) {
389 printk(KERN_ERR "bnx2i: Could not allocate ep\n");
390 return NULL;
391 }
392
393 bnx2i_ep = ep->dd_data;
394 bnx2i_ep->cls_ep = ep;
395 INIT_LIST_HEAD(&bnx2i_ep->link);
396 bnx2i_ep->state = EP_STATE_IDLE;
397 bnx2i_ep->ep_iscsi_cid = (u16) -1;
398 bnx2i_ep->hba = hba;
399 bnx2i_ep->hba_age = hba->age;
400
401 ec_div = event_coal_div;
402 while (ec_div >>= 1)
403 bnx2i_ep->ec_shift += 1;
404
405 hba->ofld_conns_active++;
406 init_waitqueue_head(&bnx2i_ep->ofld_wait);
407 return ep;
408}
409
410
411
412
413
414
415static void bnx2i_free_ep(struct iscsi_endpoint *ep)
416{
417 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
418 unsigned long flags;
419
420 spin_lock_irqsave(&bnx2i_resc_lock, flags);
421 bnx2i_ep->state = EP_STATE_IDLE;
422 bnx2i_ep->hba->ofld_conns_active--;
423
424 if (bnx2i_ep->ep_iscsi_cid != (u16) -1)
425 bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
426
427 if (bnx2i_ep->conn) {
428 bnx2i_ep->conn->ep = NULL;
429 bnx2i_ep->conn = NULL;
430 }
431
432 bnx2i_ep->hba = NULL;
433 spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
434 iscsi_destroy_endpoint(ep);
435}
436
437
438
439
440
441
442
443
444static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
445 struct bnx2i_cmd *cmd)
446{
447 struct io_bdt *io = &cmd->io_tbl;
448 struct iscsi_bd *bd;
449
450 io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
451 ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
452 &io->bd_tbl_dma, GFP_KERNEL);
453 if (!io->bd_tbl) {
454 iscsi_session_printk(KERN_ERR, session, "Could not "
455 "allocate bdt.\n");
456 return -ENOMEM;
457 }
458 io->bd_valid = 0;
459 return 0;
460}
461
462
463
464
465
466
467
468static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
469 struct iscsi_session *session)
470{
471 int i;
472
473 for (i = 0; i < session->cmds_max; i++) {
474 struct iscsi_task *task = session->cmds[i];
475 struct bnx2i_cmd *cmd = task->dd_data;
476
477 if (cmd->io_tbl.bd_tbl)
478 dma_free_coherent(&hba->pcidev->dev,
479 ISCSI_MAX_BDS_PER_CMD *
480 sizeof(struct iscsi_bd),
481 cmd->io_tbl.bd_tbl,
482 cmd->io_tbl.bd_tbl_dma);
483 }
484
485}
486
487
488
489
490
491
492
493static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
494 struct iscsi_session *session)
495{
496 int i;
497
498 for (i = 0; i < session->cmds_max; i++) {
499 struct iscsi_task *task = session->cmds[i];
500 struct bnx2i_cmd *cmd = task->dd_data;
501
502 task->hdr = &cmd->hdr;
503 task->hdr_max = sizeof(struct iscsi_hdr);
504
505 if (bnx2i_alloc_bdt(hba, session, cmd))
506 goto free_bdts;
507 }
508
509 return 0;
510
511free_bdts:
512 bnx2i_destroy_cmd_pool(hba, session);
513 return -ENOMEM;
514}
515
516
517
518
519
520
521
522
523
524static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
525{
526 int rc = 0;
527 struct iscsi_bd *mp_bdt;
528 u64 addr;
529
530 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
531 &hba->mp_bd_dma, GFP_KERNEL);
532 if (!hba->mp_bd_tbl) {
533 printk(KERN_ERR "unable to allocate Middle Path BDT\n");
534 rc = -1;
535 goto out;
536 }
537
538 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
539 CNIC_PAGE_SIZE,
540 &hba->dummy_buf_dma, GFP_KERNEL);
541 if (!hba->dummy_buffer) {
542 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
543 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
544 hba->mp_bd_tbl, hba->mp_bd_dma);
545 hba->mp_bd_tbl = NULL;
546 rc = -1;
547 goto out;
548 }
549
550 mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
551 addr = (unsigned long) hba->dummy_buf_dma;
552 mp_bdt->buffer_addr_lo = addr & 0xffffffff;
553 mp_bdt->buffer_addr_hi = addr >> 32;
554 mp_bdt->buffer_length = CNIC_PAGE_SIZE;
555 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
556 ISCSI_BD_FIRST_IN_BD_CHAIN;
557out:
558 return rc;
559}
560
561
562
563
564
565
566
567
568static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
569{
570 if (hba->mp_bd_tbl) {
571 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
572 hba->mp_bd_tbl, hba->mp_bd_dma);
573 hba->mp_bd_tbl = NULL;
574 }
575 if (hba->dummy_buffer) {
576 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
577 hba->dummy_buffer, hba->dummy_buf_dma);
578 hba->dummy_buffer = NULL;
579 }
580 return;
581}
582
583
584
585
586
587
588
589
590
591
592
593
594void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
595{
596 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
597}
598
599
600
601
602
603
604
605
606static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
607 struct bnx2i_endpoint *ep)
608{
609 write_lock_bh(&hba->ep_rdwr_lock);
610 list_add_tail(&ep->link, &hba->ep_destroy_list);
611 write_unlock_bh(&hba->ep_rdwr_lock);
612 return 0;
613}
614
615
616
617
618
619
620
621
622
623static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
624 struct bnx2i_endpoint *ep)
625{
626 write_lock_bh(&hba->ep_rdwr_lock);
627 list_del_init(&ep->link);
628 write_unlock_bh(&hba->ep_rdwr_lock);
629
630 return 0;
631}
632
633
634
635
636
637
638
639
640static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
641 struct bnx2i_endpoint *ep)
642{
643 write_lock_bh(&hba->ep_rdwr_lock);
644 list_add_tail(&ep->link, &hba->ep_ofld_list);
645 write_unlock_bh(&hba->ep_rdwr_lock);
646 return 0;
647}
648
649
650
651
652
653
654
655
656static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
657 struct bnx2i_endpoint *ep)
658{
659 write_lock_bh(&hba->ep_rdwr_lock);
660 list_del_init(&ep->link);
661 write_unlock_bh(&hba->ep_rdwr_lock);
662 return 0;
663}
664
665
666
667
668
669
670
671
672
673struct bnx2i_endpoint *
674bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
675{
676 struct list_head *list;
677 struct list_head *tmp;
678 struct bnx2i_endpoint *ep = NULL;
679
680 read_lock_bh(&hba->ep_rdwr_lock);
681 list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
682 ep = (struct bnx2i_endpoint *)list;
683
684 if (ep->ep_iscsi_cid == iscsi_cid)
685 break;
686 ep = NULL;
687 }
688 read_unlock_bh(&hba->ep_rdwr_lock);
689
690 if (!ep)
691 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
692 return ep;
693}
694
695
696
697
698
699
700
701struct bnx2i_endpoint *
702bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
703{
704 struct list_head *list;
705 struct list_head *tmp;
706 struct bnx2i_endpoint *ep = NULL;
707
708 read_lock_bh(&hba->ep_rdwr_lock);
709 list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
710 ep = (struct bnx2i_endpoint *)list;
711
712 if (ep->ep_iscsi_cid == iscsi_cid)
713 break;
714 ep = NULL;
715 }
716 read_unlock_bh(&hba->ep_rdwr_lock);
717
718 if (!ep)
719 printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
720
721 return ep;
722}
723
724
725
726
727
728
729
730
731static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba,
732 struct bnx2i_endpoint *ep)
733{
734 write_lock_bh(&hba->ep_rdwr_lock);
735 list_add_tail(&ep->link, &hba->ep_active_list);
736 write_unlock_bh(&hba->ep_rdwr_lock);
737}
738
739
740
741
742
743
744
745
746
747static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba,
748 struct bnx2i_endpoint *ep)
749{
750 write_lock_bh(&hba->ep_rdwr_lock);
751 list_del_init(&ep->link);
752 write_unlock_bh(&hba->ep_rdwr_lock);
753}
754
755
756
757
758
759
760
761
762
763
764
765static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
766 struct Scsi_Host *shost)
767{
768 if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
769 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
770 else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
771 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
772 else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
773 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
774 else
775 shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
776}
777
778
779
780
781
782
783
784
785
786struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
787{
788 struct Scsi_Host *shost;
789 struct bnx2i_hba *hba;
790
791 shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
792 if (!shost)
793 return NULL;
794 shost->dma_boundary = cnic->pcidev->dma_mask;
795 shost->transportt = bnx2i_scsi_xport_template;
796 shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
797 shost->max_channel = 0;
798 shost->max_lun = 512;
799 shost->max_cmd_len = 16;
800
801 hba = iscsi_host_priv(shost);
802 hba->shost = shost;
803 hba->netdev = cnic->netdev;
804
805 hba->pcidev = cnic->pcidev;
806 pci_dev_get(hba->pcidev);
807 hba->pci_did = hba->pcidev->device;
808 hba->pci_vid = hba->pcidev->vendor;
809 hba->pci_sdid = hba->pcidev->subsystem_device;
810 hba->pci_svid = hba->pcidev->subsystem_vendor;
811 hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
812 hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
813
814 bnx2i_identify_device(hba, cnic);
815 bnx2i_setup_host_queue_size(hba, shost);
816
817 hba->reg_base = pci_resource_start(hba->pcidev, 0);
818 if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
819 hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
820 if (!hba->regview)
821 goto ioreg_map_err;
822 } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
823 hba->regview = pci_iomap(hba->pcidev, 0, 4096);
824 if (!hba->regview)
825 goto ioreg_map_err;
826 }
827
828 if (bnx2i_setup_mp_bdt(hba))
829 goto mp_bdt_mem_err;
830
831 INIT_LIST_HEAD(&hba->ep_ofld_list);
832 INIT_LIST_HEAD(&hba->ep_active_list);
833 INIT_LIST_HEAD(&hba->ep_destroy_list);
834 rwlock_init(&hba->ep_rdwr_lock);
835
836 hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
837
838
839 hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
840
841 if (bnx2i_setup_free_cid_que(hba))
842 goto cid_que_err;
843
844
845 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
846 if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
847 hba->max_sqes = sq_size;
848 else
849 hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
850 } else {
851 if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
852 hba->max_sqes = sq_size;
853 else
854 hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
855 }
856
857 hba->max_rqes = rq_size;
858 hba->max_cqes = hba->max_sqes + rq_size;
859 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
860 if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
861 hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
862 } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
863 hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
864
865 hba->num_ccell = hba->max_sqes / 2;
866
867 spin_lock_init(&hba->lock);
868 mutex_init(&hba->net_dev_lock);
869 init_waitqueue_head(&hba->eh_wait);
870 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
871 hba->hba_shutdown_tmo = 30 * HZ;
872 hba->conn_teardown_tmo = 20 * HZ;
873 hba->conn_ctx_destroy_tmo = 6 * HZ;
874 } else {
875 hba->hba_shutdown_tmo = 20 * HZ;
876 hba->conn_teardown_tmo = 10 * HZ;
877 hba->conn_ctx_destroy_tmo = 2 * HZ;
878 }
879
880#ifdef CONFIG_32BIT
881 spin_lock_init(&hba->stat_lock);
882#endif
883 memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
884
885 if (iscsi_host_add(shost, &hba->pcidev->dev))
886 goto free_dump_mem;
887 return hba;
888
889free_dump_mem:
890 bnx2i_release_free_cid_que(hba);
891cid_que_err:
892 bnx2i_free_mp_bdt(hba);
893mp_bdt_mem_err:
894 if (hba->regview) {
895 pci_iounmap(hba->pcidev, hba->regview);
896 hba->regview = NULL;
897 }
898ioreg_map_err:
899 pci_dev_put(hba->pcidev);
900 scsi_host_put(shost);
901 return NULL;
902}
903
904
905
906
907
908
909
910void bnx2i_free_hba(struct bnx2i_hba *hba)
911{
912 struct Scsi_Host *shost = hba->shost;
913
914 iscsi_host_remove(shost);
915 INIT_LIST_HEAD(&hba->ep_ofld_list);
916 INIT_LIST_HEAD(&hba->ep_active_list);
917 INIT_LIST_HEAD(&hba->ep_destroy_list);
918
919 if (hba->regview) {
920 pci_iounmap(hba->pcidev, hba->regview);
921 hba->regview = NULL;
922 }
923 pci_dev_put(hba->pcidev);
924 bnx2i_free_mp_bdt(hba);
925 bnx2i_release_free_cid_que(hba);
926 iscsi_host_free(shost);
927}
928
929
930
931
932
933
934
935
936static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
937 struct bnx2i_conn *bnx2i_conn)
938{
939 if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
940 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
941 bnx2i_conn->gen_pdu.resp_bd_tbl,
942 bnx2i_conn->gen_pdu.resp_bd_dma);
943 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
944 }
945
946 if (bnx2i_conn->gen_pdu.req_bd_tbl) {
947 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
948 bnx2i_conn->gen_pdu.req_bd_tbl,
949 bnx2i_conn->gen_pdu.req_bd_dma);
950 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
951 }
952
953 if (bnx2i_conn->gen_pdu.resp_buf) {
954 dma_free_coherent(&hba->pcidev->dev,
955 ISCSI_DEF_MAX_RECV_SEG_LEN,
956 bnx2i_conn->gen_pdu.resp_buf,
957 bnx2i_conn->gen_pdu.resp_dma_addr);
958 bnx2i_conn->gen_pdu.resp_buf = NULL;
959 }
960
961 if (bnx2i_conn->gen_pdu.req_buf) {
962 dma_free_coherent(&hba->pcidev->dev,
963 ISCSI_DEF_MAX_RECV_SEG_LEN,
964 bnx2i_conn->gen_pdu.req_buf,
965 bnx2i_conn->gen_pdu.req_dma_addr);
966 bnx2i_conn->gen_pdu.req_buf = NULL;
967 }
968}
969
970
971
972
973
974
975
976
977static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
978 struct bnx2i_conn *bnx2i_conn)
979{
980
981 bnx2i_conn->gen_pdu.req_buf =
982 dma_alloc_coherent(&hba->pcidev->dev,
983 ISCSI_DEF_MAX_RECV_SEG_LEN,
984 &bnx2i_conn->gen_pdu.req_dma_addr,
985 GFP_KERNEL);
986 if (bnx2i_conn->gen_pdu.req_buf == NULL)
987 goto login_req_buf_failure;
988
989 bnx2i_conn->gen_pdu.req_buf_size = 0;
990 bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
991
992 bnx2i_conn->gen_pdu.resp_buf =
993 dma_alloc_coherent(&hba->pcidev->dev,
994 ISCSI_DEF_MAX_RECV_SEG_LEN,
995 &bnx2i_conn->gen_pdu.resp_dma_addr,
996 GFP_KERNEL);
997 if (bnx2i_conn->gen_pdu.resp_buf == NULL)
998 goto login_resp_buf_failure;
999
1000 bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
1001 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
1002
1003 bnx2i_conn->gen_pdu.req_bd_tbl =
1004 dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1005 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
1006 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
1007 goto login_req_bd_tbl_failure;
1008
1009 bnx2i_conn->gen_pdu.resp_bd_tbl =
1010 dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1011 &bnx2i_conn->gen_pdu.resp_bd_dma,
1012 GFP_KERNEL);
1013 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
1014 goto login_resp_bd_tbl_failure;
1015
1016 return 0;
1017
1018login_resp_bd_tbl_failure:
1019 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1020 bnx2i_conn->gen_pdu.req_bd_tbl,
1021 bnx2i_conn->gen_pdu.req_bd_dma);
1022 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
1023
1024login_req_bd_tbl_failure:
1025 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
1026 bnx2i_conn->gen_pdu.resp_buf,
1027 bnx2i_conn->gen_pdu.resp_dma_addr);
1028 bnx2i_conn->gen_pdu.resp_buf = NULL;
1029login_resp_buf_failure:
1030 dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
1031 bnx2i_conn->gen_pdu.req_buf,
1032 bnx2i_conn->gen_pdu.req_dma_addr);
1033 bnx2i_conn->gen_pdu.req_buf = NULL;
1034login_req_buf_failure:
1035 iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
1036 "login resource alloc failed!!\n");
1037 return -ENOMEM;
1038
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
1050{
1051 struct iscsi_bd *bd_tbl;
1052
1053 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
1054
1055 bd_tbl->buffer_addr_hi =
1056 (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
1057 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
1058 bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
1059 bnx2i_conn->gen_pdu.req_buf;
1060 bd_tbl->reserved0 = 0;
1061 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1062 ISCSI_BD_FIRST_IN_BD_CHAIN;
1063
1064 bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
1065 bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
1066 bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
1067 bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
1068 bd_tbl->reserved0 = 0;
1069 bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
1070 ISCSI_BD_FIRST_IN_BD_CHAIN;
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
1082{
1083 struct bnx2i_cmd *cmd = task->dd_data;
1084 struct bnx2i_conn *bnx2i_conn = cmd->conn;
1085 int rc = 0;
1086 char *buf;
1087 int data_len;
1088
1089 bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
1090 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
1091 case ISCSI_OP_LOGIN:
1092 bnx2i_send_iscsi_login(bnx2i_conn, task);
1093 break;
1094 case ISCSI_OP_NOOP_OUT:
1095 data_len = bnx2i_conn->gen_pdu.req_buf_size;
1096 buf = bnx2i_conn->gen_pdu.req_buf;
1097 if (data_len)
1098 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1099 buf, data_len, 1);
1100 else
1101 rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
1102 NULL, 0, 1);
1103 break;
1104 case ISCSI_OP_LOGOUT:
1105 rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
1106 break;
1107 case ISCSI_OP_SCSI_TMFUNC:
1108 rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
1109 break;
1110 case ISCSI_OP_TEXT:
1111 rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
1112 break;
1113 default:
1114 iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
1115 "send_gen: unsupported op 0x%x\n",
1116 task->hdr->opcode);
1117 }
1118 return rc;
1119}
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
1132{
1133 u32 dword;
1134 int lpcnt;
1135 u8 *srcp;
1136 u32 *dstp;
1137 u32 scsi_lun[2];
1138
1139 int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
1140 cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
1141 cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
1142
1143 lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
1144 srcp = (u8 *) sc->cmnd;
1145 dstp = (u32 *) cmd->req.cdb;
1146 while (lpcnt--) {
1147 memcpy(&dword, (const void *) srcp, 4);
1148 *dstp = cpu_to_be32(dword);
1149 srcp += 4;
1150 dstp++;
1151 }
1152 if (sc->cmd_len & 0x3) {
1153 dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
1154 *dstp = cpu_to_be32(dword);
1155 }
1156}
1157
1158static void bnx2i_cleanup_task(struct iscsi_task *task)
1159{
1160 struct iscsi_conn *conn = task->conn;
1161 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1162 struct bnx2i_hba *hba = bnx2i_conn->hba;
1163
1164
1165
1166
1167 if (!task->sc || task->state == ISCSI_TASK_PENDING)
1168 return;
1169
1170
1171
1172 if (task->state == ISCSI_TASK_ABRT_TMF) {
1173 bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
1174
1175 spin_unlock_bh(&conn->session->back_lock);
1176 spin_unlock_bh(&conn->session->frwd_lock);
1177 wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
1178 msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
1179 spin_lock_bh(&conn->session->frwd_lock);
1180 spin_lock_bh(&conn->session->back_lock);
1181 }
1182 bnx2i_iscsi_unmap_sg_list(task->dd_data);
1183}
1184
1185
1186
1187
1188
1189
1190static int
1191bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
1192{
1193 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1194 struct bnx2i_hba *hba = bnx2i_conn->hba;
1195 struct bnx2i_cmd *cmd = task->dd_data;
1196
1197 memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
1198
1199 bnx2i_setup_cmd_wqe_template(cmd);
1200 bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
1201
1202
1203 ADD_STATS_64(hba, tx_pdus, 1);
1204 ADD_STATS_64(hba, tx_bytes, task->data_count);
1205
1206 if (task->data_count) {
1207 memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
1208 task->data_count);
1209 bnx2i_conn->gen_pdu.req_wr_ptr =
1210 bnx2i_conn->gen_pdu.req_buf + task->data_count;
1211 }
1212 cmd->conn = conn->dd_data;
1213 cmd->scsi_cmd = NULL;
1214 return bnx2i_iscsi_send_generic_request(task);
1215}
1216
1217
1218
1219
1220
1221
1222
1223static int bnx2i_task_xmit(struct iscsi_task *task)
1224{
1225 struct iscsi_conn *conn = task->conn;
1226 struct iscsi_session *session = conn->session;
1227 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
1228 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1229 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1230 struct scsi_cmnd *sc = task->sc;
1231 struct bnx2i_cmd *cmd = task->dd_data;
1232 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
1233
1234 if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 >
1235 hba->max_sqes)
1236 return -ENOMEM;
1237
1238
1239
1240
1241 if (!sc)
1242 return bnx2i_mtask_xmit(conn, task);
1243
1244 bnx2i_setup_cmd_wqe_template(cmd);
1245 cmd->req.op_code = ISCSI_OP_SCSI_CMD;
1246 cmd->conn = bnx2i_conn;
1247 cmd->scsi_cmd = sc;
1248 cmd->req.total_data_transfer_length = scsi_bufflen(sc);
1249 cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
1250
1251 bnx2i_iscsi_map_sg_list(cmd);
1252 bnx2i_cpy_scsi_cdb(sc, cmd);
1253
1254 cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
1255 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1256 cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
1257 cmd->req.itt = task->itt |
1258 (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1259 bnx2i_setup_write_cmd_bd_info(task);
1260 } else {
1261 if (scsi_bufflen(sc))
1262 cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
1263 cmd->req.itt = task->itt |
1264 (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
1265 }
1266
1267 cmd->req.num_bds = cmd->io_tbl.bd_valid;
1268 if (!cmd->io_tbl.bd_valid) {
1269 cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
1270 cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
1271 cmd->req.num_bds = 1;
1272 }
1273
1274 bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
1275 return 0;
1276}
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286static struct iscsi_cls_session *
1287bnx2i_session_create(struct iscsi_endpoint *ep,
1288 uint16_t cmds_max, uint16_t qdepth,
1289 uint32_t initial_cmdsn)
1290{
1291 struct Scsi_Host *shost;
1292 struct iscsi_cls_session *cls_session;
1293 struct bnx2i_hba *hba;
1294 struct bnx2i_endpoint *bnx2i_ep;
1295
1296 if (!ep) {
1297 printk(KERN_ERR "bnx2i: missing ep.\n");
1298 return NULL;
1299 }
1300
1301 bnx2i_ep = ep->dd_data;
1302 shost = bnx2i_ep->hba->shost;
1303 hba = iscsi_host_priv(shost);
1304 if (bnx2i_adapter_ready(hba))
1305 return NULL;
1306
1307
1308
1309
1310
1311 if (cmds_max > hba->max_sqes)
1312 cmds_max = hba->max_sqes;
1313 else if (cmds_max < BNX2I_SQ_WQES_MIN)
1314 cmds_max = BNX2I_SQ_WQES_MIN;
1315
1316 cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
1317 cmds_max, 0, sizeof(struct bnx2i_cmd),
1318 initial_cmdsn, ISCSI_MAX_TARGET);
1319 if (!cls_session)
1320 return NULL;
1321
1322 if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
1323 goto session_teardown;
1324 return cls_session;
1325
1326session_teardown:
1327 iscsi_session_teardown(cls_session);
1328 return NULL;
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
1340{
1341 struct iscsi_session *session = cls_session->dd_data;
1342 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1343 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1344
1345 bnx2i_destroy_cmd_pool(hba, session);
1346 iscsi_session_teardown(cls_session);
1347}
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357static struct iscsi_cls_conn *
1358bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
1359{
1360 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1361 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1362 struct bnx2i_conn *bnx2i_conn;
1363 struct iscsi_cls_conn *cls_conn;
1364 struct iscsi_conn *conn;
1365
1366 cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
1367 cid);
1368 if (!cls_conn)
1369 return NULL;
1370 conn = cls_conn->dd_data;
1371
1372 bnx2i_conn = conn->dd_data;
1373 bnx2i_conn->cls_conn = cls_conn;
1374 bnx2i_conn->hba = hba;
1375
1376 atomic_set(&bnx2i_conn->work_cnt, 0);
1377
1378
1379 bnx2i_conn->ep = NULL;
1380 init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
1381
1382 if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
1383 iscsi_conn_printk(KERN_ALERT, conn,
1384 "conn_new: login resc alloc failed!!\n");
1385 goto free_conn;
1386 }
1387
1388 return cls_conn;
1389
1390free_conn:
1391 iscsi_conn_teardown(cls_conn);
1392 return NULL;
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
1408 struct iscsi_cls_conn *cls_conn,
1409 uint64_t transport_fd, int is_leading)
1410{
1411 struct iscsi_conn *conn = cls_conn->dd_data;
1412 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1413 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1414 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1415 struct bnx2i_endpoint *bnx2i_ep;
1416 struct iscsi_endpoint *ep;
1417 int ret_code;
1418
1419 ep = iscsi_lookup_endpoint(transport_fd);
1420 if (!ep)
1421 return -EINVAL;
1422
1423
1424
1425
1426 if (bnx2i_adapter_ready(hba))
1427 return -EIO;
1428
1429 bnx2i_ep = ep->dd_data;
1430 if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
1431 (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
1432
1433 return -EINVAL;
1434
1435 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1436 return -EINVAL;
1437
1438 if (bnx2i_ep->hba != hba) {
1439
1440
1441 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1442 "conn bind, ep=0x%p (%s) does not",
1443 bnx2i_ep, bnx2i_ep->hba->netdev->name);
1444 iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
1445 "belong to hba (%s)\n",
1446 hba->netdev->name);
1447 return -EEXIST;
1448 }
1449 bnx2i_ep->conn = bnx2i_conn;
1450 bnx2i_conn->ep = bnx2i_ep;
1451 bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
1452 bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
1453
1454 ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
1455 bnx2i_ep->ep_iscsi_cid);
1456
1457
1458
1459
1460 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1461 bnx2i_put_rq_buf(bnx2i_conn, 0);
1462
1463 bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
1464 return ret_code;
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
1476{
1477 struct iscsi_conn *conn = cls_conn->dd_data;
1478 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1479 struct Scsi_Host *shost;
1480 struct bnx2i_hba *hba;
1481 struct bnx2i_work *work, *tmp;
1482 unsigned cpu = 0;
1483 struct bnx2i_percpu_s *p;
1484
1485 shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
1486 hba = iscsi_host_priv(shost);
1487
1488 bnx2i_conn_free_login_resources(hba, bnx2i_conn);
1489
1490 if (atomic_read(&bnx2i_conn->work_cnt)) {
1491 for_each_online_cpu(cpu) {
1492 p = &per_cpu(bnx2i_percpu, cpu);
1493 spin_lock_bh(&p->p_work_lock);
1494 list_for_each_entry_safe(work, tmp,
1495 &p->work_list, list) {
1496 if (work->session == conn->session &&
1497 work->bnx2i_conn == bnx2i_conn) {
1498 list_del_init(&work->list);
1499 kfree(work);
1500 if (!atomic_dec_and_test(
1501 &bnx2i_conn->work_cnt))
1502 break;
1503 }
1504 }
1505 spin_unlock_bh(&p->p_work_lock);
1506 }
1507 }
1508
1509 iscsi_conn_teardown(cls_conn);
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
1522 enum iscsi_param param, char *buf)
1523{
1524 struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
1525 struct bnx2i_hba *hba = bnx2i_ep->hba;
1526 int len = -ENOTCONN;
1527
1528 if (!hba)
1529 return -ENOTCONN;
1530
1531 switch (param) {
1532 case ISCSI_PARAM_CONN_PORT:
1533 mutex_lock(&hba->net_dev_lock);
1534 if (bnx2i_ep->cm_sk)
1535 len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
1536 mutex_unlock(&hba->net_dev_lock);
1537 break;
1538 case ISCSI_PARAM_CONN_ADDRESS:
1539 mutex_lock(&hba->net_dev_lock);
1540 if (bnx2i_ep->cm_sk)
1541 len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
1542 mutex_unlock(&hba->net_dev_lock);
1543 break;
1544 default:
1545 return -ENOSYS;
1546 }
1547
1548 return len;
1549}
1550
1551
1552
1553
1554
1555
1556
1557static int bnx2i_host_get_param(struct Scsi_Host *shost,
1558 enum iscsi_host_param param, char *buf)
1559{
1560 struct bnx2i_hba *hba = iscsi_host_priv(shost);
1561 int len = 0;
1562
1563 switch (param) {
1564 case ISCSI_HOST_PARAM_HWADDRESS:
1565 len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
1566 break;
1567 case ISCSI_HOST_PARAM_NETDEV_NAME:
1568 len = sprintf(buf, "%s\n", hba->netdev->name);
1569 break;
1570 case ISCSI_HOST_PARAM_IPADDRESS: {
1571 struct list_head *active_list = &hba->ep_active_list;
1572
1573 read_lock_bh(&hba->ep_rdwr_lock);
1574 if (!list_empty(&hba->ep_active_list)) {
1575 struct bnx2i_endpoint *bnx2i_ep;
1576 struct cnic_sock *csk;
1577
1578 bnx2i_ep = list_first_entry(active_list,
1579 struct bnx2i_endpoint,
1580 link);
1581 csk = bnx2i_ep->cm_sk;
1582 if (test_bit(SK_F_IPV6, &csk->flags))
1583 len = sprintf(buf, "%pI6\n", csk->src_ip);
1584 else
1585 len = sprintf(buf, "%pI4\n", csk->src_ip);
1586 }
1587 read_unlock_bh(&hba->ep_rdwr_lock);
1588 break;
1589 }
1590 default:
1591 return iscsi_host_get_param(shost, param, buf);
1592 }
1593 return len;
1594}
1595
1596
1597
1598
1599
1600
1601
1602static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
1603{
1604 struct iscsi_conn *conn = cls_conn->dd_data;
1605 struct bnx2i_conn *bnx2i_conn = conn->dd_data;
1606
1607 bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
1608 bnx2i_update_iscsi_conn(conn);
1609
1610
1611
1612
1613
1614 timer_setup(&bnx2i_conn->ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
1615 bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
1616 add_timer(&bnx2i_conn->ep->ofld_timer);
1617
1618 wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
1619 bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
1620
1621 if (signal_pending(current))
1622 flush_signals(current);
1623 del_timer_sync(&bnx2i_conn->ep->ofld_timer);
1624
1625 iscsi_conn_start(cls_conn);
1626 return 0;
1627}
1628
1629
1630
1631
1632
1633
1634
1635static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1636 struct iscsi_stats *stats)
1637{
1638 struct iscsi_conn *conn = cls_conn->dd_data;
1639
1640 stats->txdata_octets = conn->txdata_octets;
1641 stats->rxdata_octets = conn->rxdata_octets;
1642 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
1643 stats->dataout_pdus = conn->dataout_pdus_cnt;
1644 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
1645 stats->datain_pdus = conn->datain_pdus_cnt;
1646 stats->r2t_pdus = conn->r2t_pdus_cnt;
1647 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
1648 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
1649 stats->digest_err = 0;
1650 stats->timeout_err = 0;
1651 strcpy(stats->custom[0].desc, "eh_abort_cnt");
1652 stats->custom[0].value = conn->eh_abort_cnt;
1653 stats->custom_length = 1;
1654}
1655
1656
1657
1658
1659
1660
1661
1662
1663static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
1664{
1665 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1666 struct bnx2i_hba *hba;
1667 struct cnic_dev *cnic = NULL;
1668
1669 hba = get_adapter_list_head();
1670 if (hba && hba->cnic)
1671 cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
1672 if (!cnic) {
1673 printk(KERN_ALERT "bnx2i: no route,"
1674 "can't connect using cnic\n");
1675 goto no_nx2_route;
1676 }
1677 hba = bnx2i_find_hba_for_cnic(cnic);
1678 if (!hba)
1679 goto no_nx2_route;
1680
1681 if (bnx2i_adapter_ready(hba)) {
1682 printk(KERN_ALERT "bnx2i: check route, hba not found\n");
1683 goto no_nx2_route;
1684 }
1685 if (hba->netdev->mtu > hba->mtu_supported) {
1686 printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
1687 hba->netdev->name, hba->netdev->mtu);
1688 printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
1689 hba->mtu_supported);
1690 goto no_nx2_route;
1691 }
1692 return hba;
1693no_nx2_route:
1694 return NULL;
1695}
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
1706 struct bnx2i_endpoint *ep)
1707{
1708 if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk)
1709 hba->cnic->cm_destroy(ep->cm_sk);
1710
1711 if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
1712 ep->state == EP_STATE_DISCONN_TIMEDOUT) {
1713 if (ep->conn && ep->conn->cls_conn &&
1714 ep->conn->cls_conn->dd_data) {
1715 struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
1716
1717
1718 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
1719 }
1720
1721
1722
1723
1724 printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, "
1725 "please submit GRC Dump, NW/PCIe trace, "
1726 "driver msgs to developers for analysis\n",
1727 hba->netdev->name);
1728 }
1729
1730 ep->state = EP_STATE_CLEANUP_START;
1731 timer_setup(&ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
1732 ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies;
1733 add_timer(&ep->ofld_timer);
1734
1735 bnx2i_ep_destroy_list_add(hba, ep);
1736
1737
1738 if (bnx2i_send_conn_destroy(hba, ep))
1739 ep->state = EP_STATE_CLEANUP_CMPL;
1740
1741 wait_event_interruptible(ep->ofld_wait,
1742 (ep->state != EP_STATE_CLEANUP_START));
1743
1744 if (signal_pending(current))
1745 flush_signals(current);
1746 del_timer_sync(&ep->ofld_timer);
1747
1748 bnx2i_ep_destroy_list_del(hba, ep);
1749
1750 if (ep->state != EP_STATE_CLEANUP_CMPL)
1751
1752 printk(KERN_ALERT "bnx2i - conn destroy failed\n");
1753
1754 return 0;
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
1771 struct sockaddr *dst_addr,
1772 int non_blocking)
1773{
1774 u32 iscsi_cid = BNX2I_CID_RESERVED;
1775 struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
1776 struct sockaddr_in6 *desti6;
1777 struct bnx2i_endpoint *bnx2i_ep;
1778 struct bnx2i_hba *hba;
1779 struct cnic_dev *cnic;
1780 struct cnic_sockaddr saddr;
1781 struct iscsi_endpoint *ep;
1782 int rc = 0;
1783
1784 if (shost) {
1785
1786 hba = iscsi_host_priv(shost);
1787 } else
1788
1789
1790
1791
1792 hba = bnx2i_check_route(dst_addr);
1793
1794 if (!hba) {
1795 rc = -EINVAL;
1796 goto nohba;
1797 }
1798 mutex_lock(&hba->net_dev_lock);
1799
1800 if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) {
1801 rc = -EPERM;
1802 goto check_busy;
1803 }
1804 cnic = hba->cnic;
1805 ep = bnx2i_alloc_ep(hba);
1806 if (!ep) {
1807 rc = -ENOMEM;
1808 goto check_busy;
1809 }
1810 bnx2i_ep = ep->dd_data;
1811
1812 atomic_set(&bnx2i_ep->num_active_cmds, 0);
1813 iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
1814 if (iscsi_cid == -1) {
1815 printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
1816 "iscsi cid\n", hba->netdev->name);
1817 rc = -ENOMEM;
1818 bnx2i_free_ep(ep);
1819 goto check_busy;
1820 }
1821 bnx2i_ep->hba_age = hba->age;
1822
1823 rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
1824 if (rc != 0) {
1825 printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error"
1826 "\n", hba->netdev->name);
1827 rc = -ENOMEM;
1828 goto qp_resc_err;
1829 }
1830
1831 bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
1832 bnx2i_ep->state = EP_STATE_OFLD_START;
1833 bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
1834
1835 timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
1836 bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
1837 add_timer(&bnx2i_ep->ofld_timer);
1838
1839 if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) {
1840 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1841 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1842 hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1843 rc = -EBUSY;
1844 } else
1845 rc = -ENOSPC;
1846 printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe"
1847 "\n", hba->netdev->name);
1848 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1849 goto conn_failed;
1850 }
1851
1852
1853 wait_event_interruptible(bnx2i_ep->ofld_wait,
1854 bnx2i_ep->state != EP_STATE_OFLD_START);
1855
1856 if (signal_pending(current))
1857 flush_signals(current);
1858 del_timer_sync(&bnx2i_ep->ofld_timer);
1859
1860 bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
1861
1862 if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
1863 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
1864 printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
1865 hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
1866 rc = -EBUSY;
1867 } else
1868 rc = -ENOSPC;
1869 goto conn_failed;
1870 }
1871
1872 rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
1873 iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
1874 if (rc) {
1875 rc = -EINVAL;
1876
1877 goto release_ep;
1878 }
1879
1880 bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
1881 bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
1882 clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
1883
1884 memset(&saddr, 0, sizeof(saddr));
1885 if (dst_addr->sa_family == AF_INET) {
1886 desti = (struct sockaddr_in *) dst_addr;
1887 saddr.remote.v4 = *desti;
1888 saddr.local.v4.sin_family = desti->sin_family;
1889 } else if (dst_addr->sa_family == AF_INET6) {
1890 desti6 = (struct sockaddr_in6 *) dst_addr;
1891 saddr.remote.v6 = *desti6;
1892 saddr.local.v6.sin6_family = desti6->sin6_family;
1893 }
1894
1895 bnx2i_ep->timestamp = jiffies;
1896 bnx2i_ep->state = EP_STATE_CONNECT_START;
1897 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
1898 rc = -EINVAL;
1899 goto conn_failed;
1900 } else
1901 rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
1902 if (rc)
1903 goto release_ep;
1904
1905 bnx2i_ep_active_list_add(hba, bnx2i_ep);
1906
1907 rc = bnx2i_map_ep_dbell_regs(bnx2i_ep);
1908 if (rc)
1909 goto del_active_ep;
1910
1911 mutex_unlock(&hba->net_dev_lock);
1912 return ep;
1913
1914del_active_ep:
1915 bnx2i_ep_active_list_del(hba, bnx2i_ep);
1916release_ep:
1917 if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
1918 mutex_unlock(&hba->net_dev_lock);
1919 return ERR_PTR(rc);
1920 }
1921conn_failed:
1922 bnx2i_free_qp_resc(hba, bnx2i_ep);
1923qp_resc_err:
1924 bnx2i_free_ep(ep);
1925check_busy:
1926 mutex_unlock(&hba->net_dev_lock);
1927nohba:
1928 return ERR_PTR(rc);
1929}
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1940{
1941 struct bnx2i_endpoint *bnx2i_ep;
1942 int rc = 0;
1943
1944 bnx2i_ep = ep->dd_data;
1945 if ((bnx2i_ep->state == EP_STATE_IDLE) ||
1946 (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
1947 (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
1948 return -1;
1949 if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
1950 return 1;
1951
1952 rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
1953 ((bnx2i_ep->state ==
1954 EP_STATE_OFLD_FAILED) ||
1955 (bnx2i_ep->state ==
1956 EP_STATE_CONNECT_FAILED) ||
1957 (bnx2i_ep->state ==
1958 EP_STATE_CONNECT_COMPL)),
1959 msecs_to_jiffies(timeout_ms));
1960 if (bnx2i_ep->state == EP_STATE_OFLD_FAILED)
1961 rc = -1;
1962
1963 if (rc > 0)
1964 return 1;
1965 else if (!rc)
1966 return 0;
1967 else
1968 return rc;
1969}
1970
1971
1972
1973
1974
1975
1976
1977
1978static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
1979{
1980 int ret;
1981 int cnic_dev_10g = 0;
1982
1983 if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
1984 cnic_dev_10g = 1;
1985
1986 switch (bnx2i_ep->state) {
1987 case EP_STATE_CLEANUP_FAILED:
1988 case EP_STATE_OFLD_FAILED:
1989 case EP_STATE_DISCONN_TIMEDOUT:
1990 ret = 0;
1991 break;
1992 case EP_STATE_CONNECT_START:
1993 case EP_STATE_CONNECT_FAILED:
1994 case EP_STATE_CONNECT_COMPL:
1995 case EP_STATE_ULP_UPDATE_START:
1996 case EP_STATE_ULP_UPDATE_COMPL:
1997 case EP_STATE_TCP_FIN_RCVD:
1998 case EP_STATE_LOGOUT_SENT:
1999 case EP_STATE_LOGOUT_RESP_RCVD:
2000 case EP_STATE_ULP_UPDATE_FAILED:
2001 ret = 1;
2002 break;
2003 case EP_STATE_TCP_RST_RCVD:
2004 if (cnic_dev_10g)
2005 ret = 0;
2006 else
2007 ret = 1;
2008 break;
2009 default:
2010 ret = 0;
2011 }
2012
2013 return ret;
2014}
2015
2016
2017
2018
2019
2020
2021
2022
2023int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
2024{
2025 struct bnx2i_hba *hba = bnx2i_ep->hba;
2026 struct cnic_dev *cnic;
2027 struct iscsi_session *session = NULL;
2028 struct iscsi_conn *conn = NULL;
2029 int ret = 0;
2030 int close = 0;
2031 int close_ret = 0;
2032
2033 if (!hba)
2034 return 0;
2035
2036 cnic = hba->cnic;
2037 if (!cnic)
2038 return 0;
2039
2040 if (bnx2i_ep->state == EP_STATE_IDLE ||
2041 bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
2042 return 0;
2043
2044 if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
2045 goto destroy_conn;
2046
2047 if (bnx2i_ep->conn) {
2048 conn = bnx2i_ep->conn->cls_conn->dd_data;
2049 session = conn->session;
2050 }
2051
2052 timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0);
2053 bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies;
2054 add_timer(&bnx2i_ep->ofld_timer);
2055
2056 if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
2057 goto out;
2058
2059 if (session) {
2060 spin_lock_bh(&session->frwd_lock);
2061 if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
2062 if (session->state == ISCSI_STATE_LOGGING_OUT) {
2063 if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
2064
2065 printk(KERN_ALERT "bnx2i (%s): WARNING"
2066 " logout response was not "
2067 "received!\n",
2068 bnx2i_ep->hba->netdev->name);
2069 } else if (bnx2i_ep->state ==
2070 EP_STATE_LOGOUT_RESP_RCVD)
2071 close = 1;
2072 }
2073 } else
2074 close = 1;
2075
2076 spin_unlock_bh(&session->frwd_lock);
2077 }
2078
2079 bnx2i_ep->state = EP_STATE_DISCONN_START;
2080
2081 if (close)
2082 close_ret = cnic->cm_close(bnx2i_ep->cm_sk);
2083 else
2084 close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
2085
2086 if (close_ret)
2087 printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n",
2088 bnx2i_ep->hba->netdev->name, close, close_ret);
2089 else
2090
2091 wait_event_interruptible(bnx2i_ep->ofld_wait,
2092 ((bnx2i_ep->state != EP_STATE_DISCONN_START)
2093 && (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD)));
2094
2095 if (signal_pending(current))
2096 flush_signals(current);
2097 del_timer_sync(&bnx2i_ep->ofld_timer);
2098
2099destroy_conn:
2100 bnx2i_ep_active_list_del(hba, bnx2i_ep);
2101 if (bnx2i_tear_down_conn(hba, bnx2i_ep))
2102 return -EINVAL;
2103out:
2104 bnx2i_ep->state = EP_STATE_IDLE;
2105 return ret;
2106}
2107
2108
2109
2110
2111
2112
2113
2114
2115static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
2116{
2117 struct bnx2i_endpoint *bnx2i_ep;
2118 struct bnx2i_conn *bnx2i_conn = NULL;
2119 struct iscsi_conn *conn = NULL;
2120 struct bnx2i_hba *hba;
2121
2122 bnx2i_ep = ep->dd_data;
2123
2124
2125
2126
2127
2128 while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
2129 !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
2130 msleep(250);
2131
2132 if (bnx2i_ep->conn) {
2133 bnx2i_conn = bnx2i_ep->conn;
2134 conn = bnx2i_conn->cls_conn->dd_data;
2135 iscsi_suspend_queue(conn);
2136 }
2137 hba = bnx2i_ep->hba;
2138
2139 mutex_lock(&hba->net_dev_lock);
2140
2141 if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
2142 goto out;
2143
2144 if (bnx2i_ep->state == EP_STATE_IDLE)
2145 goto free_resc;
2146
2147 if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
2148 (bnx2i_ep->hba_age != hba->age)) {
2149 bnx2i_ep_active_list_del(hba, bnx2i_ep);
2150 goto free_resc;
2151 }
2152
2153
2154 if (bnx2i_hw_ep_disconnect(bnx2i_ep)) {
2155 mutex_unlock(&hba->net_dev_lock);
2156 return;
2157 }
2158free_resc:
2159 bnx2i_free_qp_resc(hba, bnx2i_ep);
2160
2161 if (bnx2i_conn)
2162 bnx2i_conn->ep = NULL;
2163
2164 bnx2i_free_ep(ep);
2165out:
2166 mutex_unlock(&hba->net_dev_lock);
2167
2168 wake_up_interruptible(&hba->eh_wait);
2169}
2170
2171
2172
2173
2174
2175
2176
2177static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
2178{
2179 struct bnx2i_hba *hba = iscsi_host_priv(shost);
2180 char *buf = (char *) params;
2181 u16 len = sizeof(*params);
2182
2183
2184 hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
2185 len);
2186
2187 return 0;
2188}
2189
2190static umode_t bnx2i_attr_is_visible(int param_type, int param)
2191{
2192 switch (param_type) {
2193 case ISCSI_HOST_PARAM:
2194 switch (param) {
2195 case ISCSI_HOST_PARAM_NETDEV_NAME:
2196 case ISCSI_HOST_PARAM_HWADDRESS:
2197 case ISCSI_HOST_PARAM_IPADDRESS:
2198 return S_IRUGO;
2199 default:
2200 return 0;
2201 }
2202 case ISCSI_PARAM:
2203 switch (param) {
2204 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2205 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2206 case ISCSI_PARAM_HDRDGST_EN:
2207 case ISCSI_PARAM_DATADGST_EN:
2208 case ISCSI_PARAM_CONN_ADDRESS:
2209 case ISCSI_PARAM_CONN_PORT:
2210 case ISCSI_PARAM_EXP_STATSN:
2211 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2212 case ISCSI_PARAM_PERSISTENT_PORT:
2213 case ISCSI_PARAM_PING_TMO:
2214 case ISCSI_PARAM_RECV_TMO:
2215 case ISCSI_PARAM_INITIAL_R2T_EN:
2216 case ISCSI_PARAM_MAX_R2T:
2217 case ISCSI_PARAM_IMM_DATA_EN:
2218 case ISCSI_PARAM_FIRST_BURST:
2219 case ISCSI_PARAM_MAX_BURST:
2220 case ISCSI_PARAM_PDU_INORDER_EN:
2221 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2222 case ISCSI_PARAM_ERL:
2223 case ISCSI_PARAM_TARGET_NAME:
2224 case ISCSI_PARAM_TPGT:
2225 case ISCSI_PARAM_USERNAME:
2226 case ISCSI_PARAM_PASSWORD:
2227 case ISCSI_PARAM_USERNAME_IN:
2228 case ISCSI_PARAM_PASSWORD_IN:
2229 case ISCSI_PARAM_FAST_ABORT:
2230 case ISCSI_PARAM_ABORT_TMO:
2231 case ISCSI_PARAM_LU_RESET_TMO:
2232 case ISCSI_PARAM_TGT_RESET_TMO:
2233 case ISCSI_PARAM_IFACE_NAME:
2234 case ISCSI_PARAM_INITIATOR_NAME:
2235 case ISCSI_PARAM_BOOT_ROOT:
2236 case ISCSI_PARAM_BOOT_NIC:
2237 case ISCSI_PARAM_BOOT_TARGET:
2238 return S_IRUGO;
2239 default:
2240 return 0;
2241 }
2242 }
2243
2244 return 0;
2245}
2246
2247
2248
2249
2250
2251static struct scsi_host_template bnx2i_host_template = {
2252 .module = THIS_MODULE,
2253 .name = "QLogic Offload iSCSI Initiator",
2254 .proc_name = "bnx2i",
2255 .queuecommand = iscsi_queuecommand,
2256 .eh_timed_out = iscsi_eh_cmd_timed_out,
2257 .eh_abort_handler = iscsi_eh_abort,
2258 .eh_device_reset_handler = iscsi_eh_device_reset,
2259 .eh_target_reset_handler = iscsi_eh_recover_target,
2260 .change_queue_depth = scsi_change_queue_depth,
2261 .target_alloc = iscsi_target_alloc,
2262 .can_queue = 2048,
2263 .max_sectors = 127,
2264 .cmd_per_lun = 128,
2265 .this_id = -1,
2266 .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
2267 .shost_attrs = bnx2i_dev_attributes,
2268 .track_queue_depth = 1,
2269};
2270
2271struct iscsi_transport bnx2i_iscsi_transport = {
2272 .owner = THIS_MODULE,
2273 .name = "bnx2i",
2274 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
2275 CAP_MULTI_R2T | CAP_DATADGST |
2276 CAP_DATA_PATH_OFFLOAD |
2277 CAP_TEXT_NEGO,
2278 .create_session = bnx2i_session_create,
2279 .destroy_session = bnx2i_session_destroy,
2280 .create_conn = bnx2i_conn_create,
2281 .bind_conn = bnx2i_conn_bind,
2282 .destroy_conn = bnx2i_conn_destroy,
2283 .attr_is_visible = bnx2i_attr_is_visible,
2284 .set_param = iscsi_set_param,
2285 .get_conn_param = iscsi_conn_get_param,
2286 .get_session_param = iscsi_session_get_param,
2287 .get_host_param = bnx2i_host_get_param,
2288 .start_conn = bnx2i_conn_start,
2289 .stop_conn = iscsi_conn_stop,
2290 .send_pdu = iscsi_conn_send_pdu,
2291 .xmit_task = bnx2i_task_xmit,
2292 .get_stats = bnx2i_conn_get_stats,
2293
2294 .get_ep_param = bnx2i_ep_get_param,
2295 .ep_connect = bnx2i_ep_connect,
2296 .ep_poll = bnx2i_ep_poll,
2297 .ep_disconnect = bnx2i_ep_disconnect,
2298 .set_path = bnx2i_nl_set_path,
2299
2300 .session_recovery_timedout = iscsi_session_recovery_timedout,
2301 .cleanup_task = bnx2i_cleanup_task,
2302};
2303