1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/module.h>
19#include <linux/mempool.h>
20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/pci.h>
25#include <linux/skbuff.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/workqueue.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_tcq.h>
31
32#include "snic.h"
33#include "snic_fwint.h"
34
35#define PCI_DEVICE_ID_CISCO_SNIC 0x0046
36
37
38static struct pci_device_id snic_id_table[] = {
39 {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
40 { 0, }
41};
42
43unsigned int snic_log_level = 0x0;
44module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
45MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
46
47#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
48unsigned int snic_trace_max_pages = 16;
49module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
50MODULE_PARM_DESC(snic_trace_max_pages,
51 "Total allocated memory pages for snic trace buffer");
52
53#endif
54unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
55module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
56MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
57
58
59
60
61
62static int
63snic_slave_alloc(struct scsi_device *sdev)
64{
65 struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
66
67 if (!tgt || snic_tgt_chkready(tgt))
68 return -ENXIO;
69
70 return 0;
71}
72
73
74
75
76
77static int
78snic_slave_configure(struct scsi_device *sdev)
79{
80 struct snic *snic = shost_priv(sdev->host);
81 u32 qdepth = 0, max_ios = 0;
82 int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
83
84
85 max_ios = snic_max_qdepth;
86 qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
87 scsi_change_queue_depth(sdev, qdepth);
88
89 if (snic->fwinfo.io_tmo > 1)
90 tmo = snic->fwinfo.io_tmo * HZ;
91
92
93 blk_queue_rq_timeout(sdev->request_queue, tmo);
94
95 return 0;
96}
97
98static int
99snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
100{
101 struct snic *snic = shost_priv(sdev->host);
102 int qsz = 0;
103
104 qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
105 if (qsz < sdev->queue_depth)
106 atomic64_inc(&snic->s_stats.misc.qsz_rampdown);
107 else if (qsz > sdev->queue_depth)
108 atomic64_inc(&snic->s_stats.misc.qsz_rampup);
109
110 atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth);
111
112 scsi_change_queue_depth(sdev, qsz);
113
114 return sdev->queue_depth;
115}
116
117static struct scsi_host_template snic_host_template = {
118 .module = THIS_MODULE,
119 .name = SNIC_DRV_NAME,
120 .queuecommand = snic_queuecommand,
121 .eh_abort_handler = snic_abort_cmd,
122 .eh_device_reset_handler = snic_device_reset,
123 .eh_host_reset_handler = snic_host_reset,
124 .slave_alloc = snic_slave_alloc,
125 .slave_configure = snic_slave_configure,
126 .change_queue_depth = snic_change_queue_depth,
127 .this_id = -1,
128 .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
129 .can_queue = SNIC_MAX_IO_REQ,
130 .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
131 .max_sectors = 0x800,
132 .shost_attrs = snic_attrs,
133 .track_queue_depth = 1,
134 .cmd_size = sizeof(struct snic_internal_io_state),
135 .proc_name = "snic_scsi",
136};
137
138
139
140
141void
142snic_handle_link_event(struct snic *snic)
143{
144 unsigned long flags;
145
146 spin_lock_irqsave(&snic->snic_lock, flags);
147 if (snic->stop_link_events) {
148 spin_unlock_irqrestore(&snic->snic_lock, flags);
149
150 return;
151 }
152 spin_unlock_irqrestore(&snic->snic_lock, flags);
153
154 queue_work(snic_glob->event_q, &snic->link_work);
155}
156
157
158
159
160
161
162
163static int
164snic_notify_set(struct snic *snic)
165{
166 int ret = 0;
167 enum vnic_dev_intr_mode intr_mode;
168
169 intr_mode = svnic_dev_get_intr_mode(snic->vdev);
170
171 if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
172 ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
173 } else {
174 SNIC_HOST_ERR(snic->shost,
175 "Interrupt mode should be setup before devcmd notify set %d\n",
176 intr_mode);
177 ret = -1;
178 }
179
180 return ret;
181}
182
183
184
185
186static int
187snic_dev_wait(struct vnic_dev *vdev,
188 int (*start)(struct vnic_dev *, int),
189 int (*finished)(struct vnic_dev *, int *),
190 int arg)
191{
192 unsigned long time;
193 int ret, done;
194 int retry_cnt = 0;
195
196 ret = start(vdev, arg);
197 if (ret)
198 return ret;
199
200
201
202
203
204
205
206
207 time = jiffies + (HZ * 2);
208 do {
209 ret = finished(vdev, &done);
210 if (ret)
211 return ret;
212
213 if (done)
214 return 0;
215 schedule_timeout_uninterruptible(HZ/10);
216 ++retry_cnt;
217 } while (time_after(time, jiffies) || (retry_cnt < 3));
218
219 return -ETIMEDOUT;
220}
221
222
223
224
225
226
227static int
228snic_cleanup(struct snic *snic)
229{
230 unsigned int i;
231 int ret;
232
233 svnic_dev_disable(snic->vdev);
234 for (i = 0; i < snic->intr_count; i++)
235 svnic_intr_mask(&snic->intr[i]);
236
237 for (i = 0; i < snic->wq_count; i++) {
238 ret = svnic_wq_disable(&snic->wq[i]);
239 if (ret)
240 return ret;
241 }
242
243
244 snic_fwcq_cmpl_handler(snic, -1);
245
246 snic_wq_cmpl_handler(snic, -1);
247
248
249 for (i = 0; i < snic->wq_count; i++)
250 svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
251
252 for (i = 0; i < snic->cq_count; i++)
253 svnic_cq_clean(&snic->cq[i]);
254
255 for (i = 0; i < snic->intr_count; i++)
256 svnic_intr_clean(&snic->intr[i]);
257
258
259 snic_free_all_untagged_reqs(snic);
260
261
262 snic_shutdown_scsi_cleanup(snic);
263
264 for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
265 mempool_destroy(snic->req_pool[i]);
266
267 return 0;
268}
269
270
271static void
272snic_iounmap(struct snic *snic)
273{
274 if (snic->bar0.vaddr)
275 iounmap(snic->bar0.vaddr);
276}
277
278
279
280
281static int
282snic_vdev_open_done(struct vnic_dev *vdev, int *done)
283{
284 struct snic *snic = svnic_dev_priv(vdev);
285 int ret;
286 int nretries = 5;
287
288 do {
289 ret = svnic_dev_open_done(vdev, done);
290 if (ret == 0)
291 break;
292
293 SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
294 } while (nretries--);
295
296 return ret;
297}
298
299
300
301
302static int
303snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
304{
305 int ret = 0;
306
307 ret = scsi_add_host(shost, &pdev->dev);
308 if (ret) {
309 SNIC_HOST_ERR(shost,
310 "snic: scsi_add_host failed. %d\n",
311 ret);
312
313 return ret;
314 }
315
316 SNIC_BUG_ON(shost->work_q != NULL);
317 snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
318 shost->host_no);
319 shost->work_q = create_singlethread_workqueue(shost->work_q_name);
320 if (!shost->work_q) {
321 SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
322
323 ret = -ENOMEM;
324 }
325
326 return ret;
327}
328
329static void
330snic_del_host(struct Scsi_Host *shost)
331{
332 if (!shost->work_q)
333 return;
334
335 destroy_workqueue(shost->work_q);
336 shost->work_q = NULL;
337 scsi_remove_host(shost);
338}
339
340int
341snic_get_state(struct snic *snic)
342{
343 return atomic_read(&snic->state);
344}
345
346void
347snic_set_state(struct snic *snic, enum snic_state state)
348{
349 SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
350 snic_state_to_str(snic_get_state(snic)),
351 snic_state_to_str(state));
352
353 atomic_set(&snic->state, state);
354}
355
356
357
358
359static int
360snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
361{
362 struct Scsi_Host *shost;
363 struct snic *snic;
364 mempool_t *pool;
365 unsigned long flags;
366 u32 max_ios = 0;
367 int ret, i;
368
369
370 SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
371 pdev->vendor, pdev->device, pdev->subsystem_vendor,
372 pdev->subsystem_device);
373
374 SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
375 pdev->bus->number, PCI_SLOT(pdev->devfn),
376 PCI_FUNC(pdev->devfn));
377
378
379
380
381 shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
382 if (!shost) {
383 SNIC_ERR("Unable to alloc scsi_host\n");
384 ret = -ENOMEM;
385
386 goto prob_end;
387 }
388 snic = shost_priv(shost);
389 snic->shost = shost;
390
391 snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
392 shost->host_no);
393
394 SNIC_HOST_INFO(shost,
395 "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
396 shost->host_no, snic, shost, pdev->bus->number,
397 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
398#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
399
400 snic_stats_debugfs_init(snic);
401#endif
402
403
404 pci_set_drvdata(pdev, snic);
405 snic->pdev = pdev;
406
407 ret = pci_enable_device(pdev);
408 if (ret) {
409 SNIC_HOST_ERR(shost,
410 "Cannot enable PCI Resources, aborting : %d\n",
411 ret);
412
413 goto err_free_snic;
414 }
415
416 ret = pci_request_regions(pdev, SNIC_DRV_NAME);
417 if (ret) {
418 SNIC_HOST_ERR(shost,
419 "Cannot obtain PCI Resources, aborting : %d\n",
420 ret);
421
422 goto err_pci_disable;
423 }
424
425 pci_set_master(pdev);
426
427
428
429
430
431
432 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
433 if (ret) {
434 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
435 if (ret) {
436 SNIC_HOST_ERR(shost,
437 "No Usable DMA Configuration, aborting %d\n",
438 ret);
439 goto err_rel_regions;
440 }
441 }
442
443
444 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
445 SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
446
447 ret = -ENODEV;
448 goto err_rel_regions;
449 }
450
451 snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
452 if (!snic->bar0.vaddr) {
453 SNIC_HOST_ERR(shost,
454 "Cannot memory map BAR0 res hdr aborting.\n");
455
456 ret = -ENODEV;
457 goto err_rel_regions;
458 }
459
460 snic->bar0.bus_addr = pci_resource_start(pdev, 0);
461 snic->bar0.len = pci_resource_len(pdev, 0);
462 SNIC_BUG_ON(snic->bar0.bus_addr == 0);
463
464
465 snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
466 if (!snic->vdev) {
467 SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
468
469 ret = -ENODEV;
470 goto err_iounmap;
471 }
472
473 ret = svnic_dev_cmd_init(snic->vdev, 0);
474 if (ret) {
475 SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
476
477 goto err_vnic_unreg;
478 }
479
480 ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
481 if (ret) {
482 SNIC_HOST_ERR(shost,
483 "vNIC dev open failed, aborting. %d\n",
484 ret);
485
486 goto err_vnic_unreg;
487 }
488
489 ret = svnic_dev_init(snic->vdev, 0);
490 if (ret) {
491 SNIC_HOST_ERR(shost,
492 "vNIC dev init failed. aborting. %d\n",
493 ret);
494
495 goto err_dev_close;
496 }
497
498
499 ret = snic_get_vnic_config(snic);
500 if (ret) {
501 SNIC_HOST_ERR(shost,
502 "Get vNIC configuration failed, aborting. %d\n",
503 ret);
504
505 goto err_dev_close;
506 }
507
508
509 max_ios = snic->config.io_throttle_count;
510 if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
511 shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
512 max_t(u32, SNIC_MIN_IO_REQ, max_ios));
513
514 snic->max_tag_id = shost->can_queue;
515
516 shost->max_lun = snic->config.luns_per_tgt;
517 shost->max_id = SNIC_MAX_TARGET;
518
519 shost->max_cmd_len = MAX_COMMAND_SIZE;
520
521 snic_get_res_counts(snic);
522
523
524
525
526 ret = snic_set_intr_mode(snic);
527 if (ret) {
528 SNIC_HOST_ERR(shost,
529 "Failed to set intr mode aborting. %d\n",
530 ret);
531
532 goto err_dev_close;
533 }
534
535 ret = snic_alloc_vnic_res(snic);
536 if (ret) {
537 SNIC_HOST_ERR(shost,
538 "Failed to alloc vNIC resources aborting. %d\n",
539 ret);
540
541 goto err_clear_intr;
542 }
543
544
545 INIT_LIST_HEAD(&snic->list);
546
547
548
549
550
551 INIT_LIST_HEAD(&snic->spl_cmd_list);
552 spin_lock_init(&snic->spl_cmd_lock);
553
554
555 spin_lock_init(&snic->snic_lock);
556
557 for (i = 0; i < SNIC_WQ_MAX; i++)
558 spin_lock_init(&snic->wq_lock[i]);
559
560 for (i = 0; i < SNIC_IO_LOCKS; i++)
561 spin_lock_init(&snic->io_req_lock[i]);
562
563 pool = mempool_create_slab_pool(2,
564 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
565 if (!pool) {
566 SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
567
568 ret = -ENOMEM;
569 goto err_free_res;
570 }
571
572 snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
573
574 pool = mempool_create_slab_pool(2,
575 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
576 if (!pool) {
577 SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
578
579 ret = -ENOMEM;
580 goto err_free_dflt_sgl_pool;
581 }
582
583 snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
584
585 pool = mempool_create_slab_pool(2,
586 snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
587 if (!pool) {
588 SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
589
590 ret = -ENOMEM;
591 goto err_free_max_sgl_pool;
592 }
593
594 snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
595
596
597 atomic_set(&snic->state, SNIC_INIT);
598
599 atomic_set(&snic->ios_inflight, 0);
600
601
602 ret = snic_notify_set(snic);
603 if (ret) {
604 SNIC_HOST_ERR(shost,
605 "Failed to alloc notify buffer aborting. %d\n",
606 ret);
607
608 goto err_free_tmreq_pool;
609 }
610
611 spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
612 list_add_tail(&snic->list, &snic_glob->snic_list);
613 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
614
615 snic_disc_init(&snic->disc);
616 INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
617 INIT_WORK(&snic->disc_work, snic_handle_disc);
618 INIT_WORK(&snic->link_work, snic_handle_link);
619
620
621 for (i = 0; i < snic->wq_count; i++)
622 svnic_wq_enable(&snic->wq[i]);
623
624 ret = svnic_dev_enable_wait(snic->vdev);
625 if (ret) {
626 SNIC_HOST_ERR(shost,
627 "vNIC dev enable failed w/ error %d\n",
628 ret);
629
630 goto err_vdev_enable;
631 }
632
633 ret = snic_request_intr(snic);
634 if (ret) {
635 SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
636
637 goto err_req_intr;
638 }
639
640 for (i = 0; i < snic->intr_count; i++)
641 svnic_intr_unmask(&snic->intr[i]);
642
643
644 ret = snic_get_conf(snic);
645 if (ret) {
646 SNIC_HOST_ERR(shost,
647 "Failed to get snic io config from FW w err %d\n",
648 ret);
649
650 goto err_get_conf;
651 }
652
653
654
655
656
657 ret = snic_add_host(shost, pdev);
658 if (ret) {
659 SNIC_HOST_ERR(shost,
660 "Adding scsi host Failed ... exiting. %d\n",
661 ret);
662
663 goto err_get_conf;
664 }
665
666 snic_set_state(snic, SNIC_ONLINE);
667
668 ret = snic_disc_start(snic);
669 if (ret) {
670 SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
671 ret);
672
673 goto err_get_conf;
674 }
675
676 SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
677
678 return 0;
679
680err_get_conf:
681 snic_free_all_untagged_reqs(snic);
682
683 for (i = 0; i < snic->intr_count; i++)
684 svnic_intr_mask(&snic->intr[i]);
685
686 snic_free_intr(snic);
687
688err_req_intr:
689 svnic_dev_disable(snic->vdev);
690
691err_vdev_enable:
692 svnic_dev_notify_unset(snic->vdev);
693
694 for (i = 0; i < snic->wq_count; i++) {
695 int rc = 0;
696
697 rc = svnic_wq_disable(&snic->wq[i]);
698 if (rc) {
699 SNIC_HOST_ERR(shost,
700 "WQ Disable Failed w/ err = %d\n", rc);
701
702 break;
703 }
704 }
705 snic_del_host(snic->shost);
706
707err_free_tmreq_pool:
708 mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
709
710err_free_max_sgl_pool:
711 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
712
713err_free_dflt_sgl_pool:
714 mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
715
716err_free_res:
717 snic_free_vnic_res(snic);
718
719err_clear_intr:
720 snic_clear_intr_mode(snic);
721
722err_dev_close:
723 svnic_dev_close(snic->vdev);
724
725err_vnic_unreg:
726 svnic_dev_unregister(snic->vdev);
727
728err_iounmap:
729 snic_iounmap(snic);
730
731err_rel_regions:
732 pci_release_regions(pdev);
733
734err_pci_disable:
735 pci_disable_device(pdev);
736
737err_free_snic:
738#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
739 snic_stats_debugfs_remove(snic);
740#endif
741 scsi_host_put(shost);
742 pci_set_drvdata(pdev, NULL);
743
744prob_end:
745 SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
746 pdev->bus->number, PCI_SLOT(pdev->devfn),
747 PCI_FUNC(pdev->devfn));
748
749 return ret;
750}
751
752
753
754
755
756
757static void
758snic_remove(struct pci_dev *pdev)
759{
760 struct snic *snic = pci_get_drvdata(pdev);
761 unsigned long flags;
762
763 if (!snic) {
764 SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
765 pdev->bus->number, PCI_SLOT(pdev->devfn),
766 PCI_FUNC(pdev->devfn));
767
768 return;
769 }
770
771
772
773
774
775
776
777 snic_set_state(snic, SNIC_OFFLINE);
778 spin_lock_irqsave(&snic->snic_lock, flags);
779 snic->stop_link_events = 1;
780 spin_unlock_irqrestore(&snic->snic_lock, flags);
781
782 flush_workqueue(snic_glob->event_q);
783 snic_disc_term(snic);
784
785 spin_lock_irqsave(&snic->snic_lock, flags);
786 snic->in_remove = 1;
787 spin_unlock_irqrestore(&snic->snic_lock, flags);
788
789
790
791
792
793
794 snic_cleanup(snic);
795
796 spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
797 list_del(&snic->list);
798 spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
799
800 snic_tgt_del_all(snic);
801#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
802 snic_stats_debugfs_remove(snic);
803#endif
804 snic_del_host(snic->shost);
805
806 svnic_dev_notify_unset(snic->vdev);
807 snic_free_intr(snic);
808 snic_free_vnic_res(snic);
809 snic_clear_intr_mode(snic);
810 svnic_dev_close(snic->vdev);
811 svnic_dev_unregister(snic->vdev);
812 snic_iounmap(snic);
813 pci_release_regions(pdev);
814 pci_disable_device(pdev);
815 pci_set_drvdata(pdev, NULL);
816
817
818 scsi_host_put(snic->shost);
819}
820
821
822struct snic_global *snic_glob;
823
824
825
826
827
828
829static int
830snic_global_data_init(void)
831{
832 int ret = 0;
833 struct kmem_cache *cachep;
834 ssize_t len = 0;
835
836 snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
837
838 if (!snic_glob) {
839 SNIC_ERR("Failed to allocate Global Context.\n");
840
841 ret = -ENOMEM;
842 goto gdi_end;
843 }
844
845#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
846
847
848 snic_debugfs_init();
849
850
851
852 ret = snic_trc_init();
853 if (ret < 0) {
854 SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
855 snic_trc_free();
856
857 }
858
859#endif
860 INIT_LIST_HEAD(&snic_glob->snic_list);
861 spin_lock_init(&snic_glob->snic_list_lock);
862
863
864 len = sizeof(struct snic_req_info);
865 len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
866 cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
867 SLAB_HWCACHE_ALIGN, NULL);
868 if (!cachep) {
869 SNIC_ERR("Failed to create snic default sgl slab\n");
870 ret = -ENOMEM;
871
872 goto err_dflt_req_slab;
873 }
874 snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
875
876
877 len = sizeof(struct snic_req_info);
878 len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
879 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
880 SLAB_HWCACHE_ALIGN, NULL);
881 if (!cachep) {
882 SNIC_ERR("Failed to create snic max sgl slab\n");
883 ret = -ENOMEM;
884
885 goto err_max_req_slab;
886 }
887 snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
888
889 len = sizeof(struct snic_host_req);
890 cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
891 SLAB_HWCACHE_ALIGN, NULL);
892 if (!cachep) {
893 SNIC_ERR("Failed to create snic tm req slab\n");
894 ret = -ENOMEM;
895
896 goto err_tmreq_slab;
897 }
898 snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
899
900
901 snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
902 if (!snic_glob->event_q) {
903 SNIC_ERR("snic event queue create failed\n");
904 ret = -ENOMEM;
905
906 goto err_eventq;
907 }
908
909 return ret;
910
911err_eventq:
912 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
913
914err_tmreq_slab:
915 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
916
917err_max_req_slab:
918 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
919
920err_dflt_req_slab:
921#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
922 snic_trc_free();
923 snic_debugfs_term();
924#endif
925 kfree(snic_glob);
926 snic_glob = NULL;
927
928gdi_end:
929 return ret;
930}
931
932
933
934
935static void
936snic_global_data_cleanup(void)
937{
938 SNIC_BUG_ON(snic_glob == NULL);
939
940 destroy_workqueue(snic_glob->event_q);
941 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
942 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
943 kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
944
945#ifdef CONFIG_SCSI_SNIC_DEBUG_FS
946
947 snic_trc_free();
948
949
950 snic_debugfs_term();
951#endif
952 kfree(snic_glob);
953 snic_glob = NULL;
954}
955
956static struct pci_driver snic_driver = {
957 .name = SNIC_DRV_NAME,
958 .id_table = snic_id_table,
959 .probe = snic_probe,
960 .remove = snic_remove,
961};
962
963static int __init
964snic_init_module(void)
965{
966 int ret = 0;
967
968#ifndef __x86_64__
969 SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
970 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
971#endif
972
973 SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
974
975 ret = snic_global_data_init();
976 if (ret) {
977 SNIC_ERR("Failed to Initialize Global Data.\n");
978
979 return ret;
980 }
981
982 ret = pci_register_driver(&snic_driver);
983 if (ret < 0) {
984 SNIC_ERR("PCI driver register error\n");
985
986 goto err_pci_reg;
987 }
988
989 return ret;
990
991err_pci_reg:
992 snic_global_data_cleanup();
993
994 return ret;
995}
996
997static void __exit
998snic_cleanup_module(void)
999{
1000 pci_unregister_driver(&snic_driver);
1001 snic_global_data_cleanup();
1002}
1003
1004module_init(snic_init_module);
1005module_exit(snic_cleanup_module);
1006
1007MODULE_LICENSE("GPL v2");
1008MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
1009MODULE_VERSION(SNIC_DRV_VERSION);
1010MODULE_DEVICE_TABLE(pci, snic_id_table);
1011MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
1012 "Sesidhar Baddela <sebaddel@cisco.com>");
1013