1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/module.h>
19#include <linux/mempool.h>
20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/pci.h>
25#include <linux/skbuff.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <linux/workqueue.h>
29#include <linux/if_ether.h>
30#include <scsi/fc/fc_fip.h>
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_transport_fc.h>
34#include <scsi/scsi_tcq.h>
35#include <scsi/libfc.h>
36#include <scsi/fc_frame.h>
37
38#include "vnic_dev.h"
39#include "vnic_intr.h"
40#include "vnic_stats.h"
41#include "fnic_io.h"
42#include "fnic_fip.h"
43#include "fnic.h"
44
45#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
46
47
48#define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ)
49
50static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
51static struct kmem_cache *fnic_io_req_cache;
52LIST_HEAD(fnic_list);
53DEFINE_SPINLOCK(fnic_list_lock);
54
55
56static struct pci_device_id fnic_id_table[] = {
57 { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
58 { 0, }
59};
60
61MODULE_DESCRIPTION(DRV_DESCRIPTION);
62MODULE_AUTHOR("Abhijeet Joglekar <abjoglek@cisco.com>, "
63 "Joseph R. Eykholt <jeykholt@cisco.com>");
64MODULE_LICENSE("GPL v2");
65MODULE_VERSION(DRV_VERSION);
66MODULE_DEVICE_TABLE(pci, fnic_id_table);
67
68unsigned int fnic_log_level;
69module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
70MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
71
72
73unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS;
74module_param(io_completions, int, S_IRUGO|S_IWUSR);
75MODULE_PARM_DESC(io_completions, "Max CQ entries to process at a time");
76
77unsigned int fnic_trace_max_pages = 16;
78module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
79MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
80 "for fnic trace buffer");
81
82unsigned int fnic_fc_trace_max_pages = 64;
83module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR);
84MODULE_PARM_DESC(fnic_fc_trace_max_pages,
85 "Total allocated memory pages for fc trace buffer");
86
87static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
88module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
89MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
90
91static struct libfc_function_template fnic_transport_template = {
92 .frame_send = fnic_send,
93 .lport_set_port_id = fnic_set_port_id,
94 .fcp_abort_io = fnic_empty_scsi_cleanup,
95 .fcp_cleanup = fnic_empty_scsi_cleanup,
96 .exch_mgr_reset = fnic_exch_mgr_reset
97};
98
99static int fnic_slave_alloc(struct scsi_device *sdev)
100{
101 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
102
103 if (!rport || fc_remote_port_chkready(rport))
104 return -ENXIO;
105
106 scsi_change_queue_depth(sdev, fnic_max_qdepth);
107 return 0;
108}
109
110static struct scsi_host_template fnic_host_template = {
111 .module = THIS_MODULE,
112 .name = DRV_NAME,
113 .queuecommand = fnic_queuecommand,
114 .eh_timed_out = fc_eh_timed_out,
115 .eh_abort_handler = fnic_abort_cmd,
116 .eh_device_reset_handler = fnic_device_reset,
117 .eh_host_reset_handler = fnic_host_reset,
118 .slave_alloc = fnic_slave_alloc,
119 .change_queue_depth = scsi_change_queue_depth,
120 .this_id = -1,
121 .cmd_per_lun = 3,
122 .can_queue = FNIC_DFLT_IO_REQ,
123 .use_clustering = ENABLE_CLUSTERING,
124 .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
125 .max_sectors = 0xffff,
126 .shost_attrs = fnic_attrs,
127 .track_queue_depth = 1,
128};
129
130static void
131fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
132{
133 if (timeout)
134 rport->dev_loss_tmo = timeout;
135 else
136 rport->dev_loss_tmo = 1;
137}
138
139static void fnic_get_host_speed(struct Scsi_Host *shost);
140static struct scsi_transport_template *fnic_fc_transport;
141static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
142static void fnic_reset_host_stats(struct Scsi_Host *);
143
144static struct fc_function_template fnic_fc_functions = {
145
146 .show_host_node_name = 1,
147 .show_host_port_name = 1,
148 .show_host_supported_classes = 1,
149 .show_host_supported_fc4s = 1,
150 .show_host_active_fc4s = 1,
151 .show_host_maxframe_size = 1,
152 .show_host_port_id = 1,
153 .show_host_supported_speeds = 1,
154 .get_host_speed = fnic_get_host_speed,
155 .show_host_speed = 1,
156 .show_host_port_type = 1,
157 .get_host_port_state = fc_get_host_port_state,
158 .show_host_port_state = 1,
159 .show_host_symbolic_name = 1,
160 .show_rport_maxframe_size = 1,
161 .show_rport_supported_classes = 1,
162 .show_host_fabric_name = 1,
163 .show_starget_node_name = 1,
164 .show_starget_port_name = 1,
165 .show_starget_port_id = 1,
166 .show_rport_dev_loss_tmo = 1,
167 .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
168 .issue_fc_host_lip = fnic_reset,
169 .get_fc_host_stats = fnic_get_stats,
170 .reset_fc_host_stats = fnic_reset_host_stats,
171 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
172 .terminate_rport_io = fnic_terminate_rport_io,
173 .bsg_request = fc_lport_bsg_request,
174};
175
176static void fnic_get_host_speed(struct Scsi_Host *shost)
177{
178 struct fc_lport *lp = shost_priv(shost);
179 struct fnic *fnic = lport_priv(lp);
180 u32 port_speed = vnic_dev_port_speed(fnic->vdev);
181
182
183 switch (port_speed) {
184 case DCEM_PORTSPEED_10G:
185 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
186 break;
187 case DCEM_PORTSPEED_20G:
188 fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
189 break;
190 case DCEM_PORTSPEED_25G:
191 fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
192 break;
193 case DCEM_PORTSPEED_40G:
194 case DCEM_PORTSPEED_4x10G:
195 fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
196 break;
197 case DCEM_PORTSPEED_100G:
198 fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
199 break;
200 default:
201 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
202 break;
203 }
204}
205
206static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
207{
208 int ret;
209 struct fc_lport *lp = shost_priv(host);
210 struct fnic *fnic = lport_priv(lp);
211 struct fc_host_statistics *stats = &lp->host_stats;
212 struct vnic_stats *vs;
213 unsigned long flags;
214
215 if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
216 return stats;
217 fnic->stats_time = jiffies;
218
219 spin_lock_irqsave(&fnic->fnic_lock, flags);
220 ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
221 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
222
223 if (ret) {
224 FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
225 "fnic: Get vnic stats failed"
226 " 0x%x", ret);
227 return stats;
228 }
229 vs = fnic->stats;
230 stats->tx_frames = vs->tx.tx_unicast_frames_ok;
231 stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
232 stats->rx_frames = vs->rx.rx_unicast_frames_ok;
233 stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
234 stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
235 stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
236 stats->invalid_crc_count = vs->rx.rx_crc_errors;
237 stats->seconds_since_last_reset =
238 (jiffies - fnic->stats_reset_time) / HZ;
239 stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
240 stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
241
242 return stats;
243}
244
245
246
247
248
249void fnic_dump_fchost_stats(struct Scsi_Host *host,
250 struct fc_host_statistics *stats)
251{
252 FNIC_MAIN_NOTE(KERN_NOTICE, host,
253 "fnic: seconds since last reset = %llu\n",
254 stats->seconds_since_last_reset);
255 FNIC_MAIN_NOTE(KERN_NOTICE, host,
256 "fnic: tx frames = %llu\n",
257 stats->tx_frames);
258 FNIC_MAIN_NOTE(KERN_NOTICE, host,
259 "fnic: tx words = %llu\n",
260 stats->tx_words);
261 FNIC_MAIN_NOTE(KERN_NOTICE, host,
262 "fnic: rx frames = %llu\n",
263 stats->rx_frames);
264 FNIC_MAIN_NOTE(KERN_NOTICE, host,
265 "fnic: rx words = %llu\n",
266 stats->rx_words);
267 FNIC_MAIN_NOTE(KERN_NOTICE, host,
268 "fnic: lip count = %llu\n",
269 stats->lip_count);
270 FNIC_MAIN_NOTE(KERN_NOTICE, host,
271 "fnic: nos count = %llu\n",
272 stats->nos_count);
273 FNIC_MAIN_NOTE(KERN_NOTICE, host,
274 "fnic: error frames = %llu\n",
275 stats->error_frames);
276 FNIC_MAIN_NOTE(KERN_NOTICE, host,
277 "fnic: dumped frames = %llu\n",
278 stats->dumped_frames);
279 FNIC_MAIN_NOTE(KERN_NOTICE, host,
280 "fnic: link failure count = %llu\n",
281 stats->link_failure_count);
282 FNIC_MAIN_NOTE(KERN_NOTICE, host,
283 "fnic: loss of sync count = %llu\n",
284 stats->loss_of_sync_count);
285 FNIC_MAIN_NOTE(KERN_NOTICE, host,
286 "fnic: loss of signal count = %llu\n",
287 stats->loss_of_signal_count);
288 FNIC_MAIN_NOTE(KERN_NOTICE, host,
289 "fnic: prim seq protocol err count = %llu\n",
290 stats->prim_seq_protocol_err_count);
291 FNIC_MAIN_NOTE(KERN_NOTICE, host,
292 "fnic: invalid tx word count= %llu\n",
293 stats->invalid_tx_word_count);
294 FNIC_MAIN_NOTE(KERN_NOTICE, host,
295 "fnic: invalid crc count = %llu\n",
296 stats->invalid_crc_count);
297 FNIC_MAIN_NOTE(KERN_NOTICE, host,
298 "fnic: fcp input requests = %llu\n",
299 stats->fcp_input_requests);
300 FNIC_MAIN_NOTE(KERN_NOTICE, host,
301 "fnic: fcp output requests = %llu\n",
302 stats->fcp_output_requests);
303 FNIC_MAIN_NOTE(KERN_NOTICE, host,
304 "fnic: fcp control requests = %llu\n",
305 stats->fcp_control_requests);
306 FNIC_MAIN_NOTE(KERN_NOTICE, host,
307 "fnic: fcp input megabytes = %llu\n",
308 stats->fcp_input_megabytes);
309 FNIC_MAIN_NOTE(KERN_NOTICE, host,
310 "fnic: fcp output megabytes = %llu\n",
311 stats->fcp_output_megabytes);
312 return;
313}
314
315
316
317
318
319static void fnic_reset_host_stats(struct Scsi_Host *host)
320{
321 int ret;
322 struct fc_lport *lp = shost_priv(host);
323 struct fnic *fnic = lport_priv(lp);
324 struct fc_host_statistics *stats;
325 unsigned long flags;
326
327
328 stats = fnic_get_stats(host);
329 fnic_dump_fchost_stats(host, stats);
330
331 spin_lock_irqsave(&fnic->fnic_lock, flags);
332 ret = vnic_dev_stats_clear(fnic->vdev);
333 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
334
335 if (ret) {
336 FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
337 "fnic: Reset vnic stats failed"
338 " 0x%x", ret);
339 return;
340 }
341 fnic->stats_reset_time = jiffies;
342 memset(stats, 0, sizeof(*stats));
343
344 return;
345}
346
347void fnic_log_q_error(struct fnic *fnic)
348{
349 unsigned int i;
350 u32 error_status;
351
352 for (i = 0; i < fnic->raw_wq_count; i++) {
353 error_status = ioread32(&fnic->wq[i].ctrl->error_status);
354 if (error_status)
355 shost_printk(KERN_ERR, fnic->lport->host,
356 "WQ[%d] error_status"
357 " %d\n", i, error_status);
358 }
359
360 for (i = 0; i < fnic->rq_count; i++) {
361 error_status = ioread32(&fnic->rq[i].ctrl->error_status);
362 if (error_status)
363 shost_printk(KERN_ERR, fnic->lport->host,
364 "RQ[%d] error_status"
365 " %d\n", i, error_status);
366 }
367
368 for (i = 0; i < fnic->wq_copy_count; i++) {
369 error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
370 if (error_status)
371 shost_printk(KERN_ERR, fnic->lport->host,
372 "CWQ[%d] error_status"
373 " %d\n", i, error_status);
374 }
375}
376
377void fnic_handle_link_event(struct fnic *fnic)
378{
379 unsigned long flags;
380
381 spin_lock_irqsave(&fnic->fnic_lock, flags);
382 if (fnic->stop_rx_link_events) {
383 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
384 return;
385 }
386 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
387
388 queue_work(fnic_event_queue, &fnic->link_work);
389
390}
391
392static int fnic_notify_set(struct fnic *fnic)
393{
394 int err;
395
396 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
397 case VNIC_DEV_INTR_MODE_INTX:
398 err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
399 break;
400 case VNIC_DEV_INTR_MODE_MSI:
401 err = vnic_dev_notify_set(fnic->vdev, -1);
402 break;
403 case VNIC_DEV_INTR_MODE_MSIX:
404 err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
405 break;
406 default:
407 shost_printk(KERN_ERR, fnic->lport->host,
408 "Interrupt mode should be set up"
409 " before devcmd notify set %d\n",
410 vnic_dev_get_intr_mode(fnic->vdev));
411 err = -1;
412 break;
413 }
414
415 return err;
416}
417
418static void fnic_notify_timer(struct timer_list *t)
419{
420 struct fnic *fnic = from_timer(fnic, t, notify_timer);
421
422 fnic_handle_link_event(fnic);
423 mod_timer(&fnic->notify_timer,
424 round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
425}
426
427static void fnic_fip_notify_timer(struct timer_list *t)
428{
429 struct fnic *fnic = from_timer(fnic, t, fip_timer);
430
431 fnic_handle_fip_timer(fnic);
432}
433
434static void fnic_notify_timer_start(struct fnic *fnic)
435{
436 switch (vnic_dev_get_intr_mode(fnic->vdev)) {
437 case VNIC_DEV_INTR_MODE_MSI:
438
439
440
441
442 mod_timer(&fnic->notify_timer, jiffies);
443 break;
444 default:
445
446 break;
447 };
448}
449
450static int fnic_dev_wait(struct vnic_dev *vdev,
451 int (*start)(struct vnic_dev *, int),
452 int (*finished)(struct vnic_dev *, int *),
453 int arg)
454{
455 unsigned long time;
456 int done;
457 int err;
458 int count;
459
460 count = 0;
461
462 err = start(vdev, arg);
463 if (err)
464 return err;
465
466
467
468
469
470
471
472 time = jiffies + (HZ * 2);
473 do {
474 err = finished(vdev, &done);
475 count++;
476 if (err)
477 return err;
478 if (done)
479 return 0;
480 schedule_timeout_uninterruptible(HZ / 10);
481 } while (time_after(time, jiffies) || (count < 3));
482
483 return -ETIMEDOUT;
484}
485
486static int fnic_cleanup(struct fnic *fnic)
487{
488 unsigned int i;
489 int err;
490
491 vnic_dev_disable(fnic->vdev);
492 for (i = 0; i < fnic->intr_count; i++)
493 vnic_intr_mask(&fnic->intr[i]);
494
495 for (i = 0; i < fnic->rq_count; i++) {
496 err = vnic_rq_disable(&fnic->rq[i]);
497 if (err)
498 return err;
499 }
500 for (i = 0; i < fnic->raw_wq_count; i++) {
501 err = vnic_wq_disable(&fnic->wq[i]);
502 if (err)
503 return err;
504 }
505 for (i = 0; i < fnic->wq_copy_count; i++) {
506 err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
507 if (err)
508 return err;
509 }
510
511
512 fnic_wq_copy_cmpl_handler(fnic, io_completions);
513 fnic_wq_cmpl_handler(fnic, -1);
514 fnic_rq_cmpl_handler(fnic, -1);
515
516
517 for (i = 0; i < fnic->raw_wq_count; i++)
518 vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
519 for (i = 0; i < fnic->rq_count; i++)
520 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
521 for (i = 0; i < fnic->wq_copy_count; i++)
522 vnic_wq_copy_clean(&fnic->wq_copy[i],
523 fnic_wq_copy_cleanup_handler);
524
525 for (i = 0; i < fnic->cq_count; i++)
526 vnic_cq_clean(&fnic->cq[i]);
527 for (i = 0; i < fnic->intr_count; i++)
528 vnic_intr_clean(&fnic->intr[i]);
529
530 mempool_destroy(fnic->io_req_pool);
531 for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
532 mempool_destroy(fnic->io_sgl_pool[i]);
533
534 return 0;
535}
536
537static void fnic_iounmap(struct fnic *fnic)
538{
539 if (fnic->bar0.vaddr)
540 iounmap(fnic->bar0.vaddr);
541}
542
543
544
545
546
547static u8 *fnic_get_mac(struct fc_lport *lport)
548{
549 struct fnic *fnic = lport_priv(lport);
550
551 return fnic->data_src_addr;
552}
553
554static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
555{
556 u16 old_vlan;
557 old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
558}
559
560static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
561{
562 struct Scsi_Host *host;
563 struct fc_lport *lp;
564 struct fnic *fnic;
565 mempool_t *pool;
566 int err;
567 int i;
568 unsigned long flags;
569
570
571
572
573
574 lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
575 if (!lp) {
576 printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
577 err = -ENOMEM;
578 goto err_out;
579 }
580 host = lp->host;
581 fnic = lport_priv(lp);
582 fnic->lport = lp;
583 fnic->ctlr.lp = lp;
584
585 fnic->link_events = 0;
586
587 snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
588 host->host_no);
589
590 host->transportt = fnic_fc_transport;
591
592 fnic_stats_debugfs_init(fnic);
593
594
595 pci_set_drvdata(pdev, fnic);
596
597 fnic->pdev = pdev;
598
599 err = pci_enable_device(pdev);
600 if (err) {
601 shost_printk(KERN_ERR, fnic->lport->host,
602 "Cannot enable PCI device, aborting.\n");
603 goto err_out_free_hba;
604 }
605
606 err = pci_request_regions(pdev, DRV_NAME);
607 if (err) {
608 shost_printk(KERN_ERR, fnic->lport->host,
609 "Cannot enable PCI resources, aborting\n");
610 goto err_out_disable_device;
611 }
612
613 pci_set_master(pdev);
614
615
616
617
618
619 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
620 if (err) {
621 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
622 if (err) {
623 shost_printk(KERN_ERR, fnic->lport->host,
624 "No usable DMA configuration "
625 "aborting\n");
626 goto err_out_release_regions;
627 }
628 }
629
630
631 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
632 shost_printk(KERN_ERR, fnic->lport->host,
633 "BAR0 not memory-map'able, aborting.\n");
634 err = -ENODEV;
635 goto err_out_release_regions;
636 }
637
638 fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
639 fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
640 fnic->bar0.len = pci_resource_len(pdev, 0);
641
642 if (!fnic->bar0.vaddr) {
643 shost_printk(KERN_ERR, fnic->lport->host,
644 "Cannot memory-map BAR0 res hdr, "
645 "aborting.\n");
646 err = -ENODEV;
647 goto err_out_release_regions;
648 }
649
650 fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
651 if (!fnic->vdev) {
652 shost_printk(KERN_ERR, fnic->lport->host,
653 "vNIC registration failed, "
654 "aborting.\n");
655 err = -ENODEV;
656 goto err_out_iounmap;
657 }
658
659 err = vnic_dev_cmd_init(fnic->vdev);
660 if (err) {
661 shost_printk(KERN_ERR, fnic->lport->host,
662 "vnic_dev_cmd_init() returns %d, aborting\n",
663 err);
664 goto err_out_vnic_unregister;
665 }
666
667 err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
668 vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST);
669 if (err) {
670 shost_printk(KERN_ERR, fnic->lport->host,
671 "vNIC dev open failed, aborting.\n");
672 goto err_out_dev_cmd_deinit;
673 }
674
675 err = vnic_dev_init(fnic->vdev, 0);
676 if (err) {
677 shost_printk(KERN_ERR, fnic->lport->host,
678 "vNIC dev init failed, aborting.\n");
679 goto err_out_dev_close;
680 }
681
682 err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
683 if (err) {
684 shost_printk(KERN_ERR, fnic->lport->host,
685 "vNIC get MAC addr failed \n");
686 goto err_out_dev_close;
687 }
688
689 memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);
690
691
692 err = fnic_get_vnic_config(fnic);
693 if (err) {
694 shost_printk(KERN_ERR, fnic->lport->host,
695 "Get vNIC configuration failed, "
696 "aborting.\n");
697 goto err_out_dev_close;
698 }
699
700
701 if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
702 host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
703 max_t(u32, FNIC_MIN_IO_REQ,
704 fnic->config.io_throttle_count));
705 }
706 fnic->fnic_max_tag_id = host->can_queue;
707
708 host->max_lun = fnic->config.luns_per_tgt;
709 host->max_id = FNIC_MAX_FCP_TARGET;
710 host->max_cmd_len = FCOE_MAX_CMD_LEN;
711
712 fnic_get_res_counts(fnic);
713
714 err = fnic_set_intr_mode(fnic);
715 if (err) {
716 shost_printk(KERN_ERR, fnic->lport->host,
717 "Failed to set intr mode, "
718 "aborting.\n");
719 goto err_out_dev_close;
720 }
721
722 err = fnic_alloc_vnic_resources(fnic);
723 if (err) {
724 shost_printk(KERN_ERR, fnic->lport->host,
725 "Failed to alloc vNIC resources, "
726 "aborting.\n");
727 goto err_out_clear_intr;
728 }
729
730
731
732 spin_lock_init(&fnic->fnic_lock);
733
734 for (i = 0; i < FNIC_WQ_MAX; i++)
735 spin_lock_init(&fnic->wq_lock[i]);
736
737 for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
738 spin_lock_init(&fnic->wq_copy_lock[i]);
739 fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
740 fnic->fw_ack_recd[i] = 0;
741 fnic->fw_ack_index[i] = -1;
742 }
743
744 for (i = 0; i < FNIC_IO_LOCKS; i++)
745 spin_lock_init(&fnic->io_req_lock[i]);
746
747 fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
748 if (!fnic->io_req_pool)
749 goto err_out_free_resources;
750
751 pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
752 if (!pool)
753 goto err_out_free_ioreq_pool;
754 fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
755
756 pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
757 if (!pool)
758 goto err_out_free_dflt_pool;
759 fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
760
761
762 fnic->vlan_hw_insert = 1;
763 fnic->vlan_id = 0;
764
765
766 fnic->ctlr.send = fnic_eth_send;
767 fnic->ctlr.update_mac = fnic_update_mac;
768 fnic->ctlr.get_src_addr = fnic_get_mac;
769 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
770 shost_printk(KERN_INFO, fnic->lport->host,
771 "firmware supports FIP\n");
772
773 vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
774 vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
775 vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
776 fnic->set_vlan = fnic_set_vlan;
777 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
778 timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
779 spin_lock_init(&fnic->vlans_lock);
780 INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
781 INIT_WORK(&fnic->event_work, fnic_handle_event);
782 skb_queue_head_init(&fnic->fip_frame_queue);
783 INIT_LIST_HEAD(&fnic->evlist);
784 INIT_LIST_HEAD(&fnic->vlans);
785 } else {
786 shost_printk(KERN_INFO, fnic->lport->host,
787 "firmware uses non-FIP mode\n");
788 fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
789 fnic->ctlr.state = FIP_ST_NON_FIP;
790 }
791 fnic->state = FNIC_IN_FC_MODE;
792
793 atomic_set(&fnic->in_flight, 0);
794 fnic->state_flags = FNIC_FLAGS_NONE;
795
796
797 fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
798
799
800 err = fnic_notify_set(fnic);
801 if (err) {
802 shost_printk(KERN_ERR, fnic->lport->host,
803 "Failed to alloc notify buffer, aborting.\n");
804 goto err_out_free_max_pool;
805 }
806
807
808 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
809 timer_setup(&fnic->notify_timer, fnic_notify_timer, 0);
810
811
812 for (i = 0; i < fnic->rq_count; i++) {
813 vnic_rq_enable(&fnic->rq[i]);
814 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
815 if (err) {
816 shost_printk(KERN_ERR, fnic->lport->host,
817 "fnic_alloc_rq_frame can't alloc "
818 "frame\n");
819 goto err_out_free_rq_buf;
820 }
821 }
822
823
824
825
826
827 err = scsi_add_host(lp->host, &pdev->dev);
828 if (err) {
829 shost_printk(KERN_ERR, fnic->lport->host,
830 "fnic: scsi_add_host failed...exiting\n");
831 goto err_out_free_rq_buf;
832 }
833
834
835
836 lp->link_up = 0;
837
838 lp->max_retry_count = fnic->config.flogi_retries;
839 lp->max_rport_retry_count = fnic->config.plogi_retries;
840 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
841 FCP_SPPF_CONF_COMPL);
842 if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
843 lp->service_params |= FCP_SPPF_RETRY;
844
845 lp->boot_time = jiffies;
846 lp->e_d_tov = fnic->config.ed_tov;
847 lp->r_a_tov = fnic->config.ra_tov;
848 lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
849 fc_set_wwnn(lp, fnic->config.node_wwn);
850 fc_set_wwpn(lp, fnic->config.port_wwn);
851
852 fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);
853
854 if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
855 FCPIO_HOST_EXCH_RANGE_END, NULL)) {
856 err = -ENOMEM;
857 goto err_out_remove_scsi_host;
858 }
859
860 fc_lport_init_stats(lp);
861 fnic->stats_reset_time = jiffies;
862
863 fc_lport_config(lp);
864
865 if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
866 sizeof(struct fc_frame_header))) {
867 err = -EINVAL;
868 goto err_out_free_exch_mgr;
869 }
870 fc_host_maxframe_size(lp->host) = lp->mfs;
871 fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;
872
873 sprintf(fc_host_symbolic_name(lp->host),
874 DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
875
876 spin_lock_irqsave(&fnic_list_lock, flags);
877 list_add_tail(&fnic->list, &fnic_list);
878 spin_unlock_irqrestore(&fnic_list_lock, flags);
879
880 INIT_WORK(&fnic->link_work, fnic_handle_link);
881 INIT_WORK(&fnic->frame_work, fnic_handle_frame);
882 skb_queue_head_init(&fnic->frame_queue);
883 skb_queue_head_init(&fnic->tx_queue);
884
885
886 for (i = 0; i < fnic->raw_wq_count; i++)
887 vnic_wq_enable(&fnic->wq[i]);
888 for (i = 0; i < fnic->wq_copy_count; i++)
889 vnic_wq_copy_enable(&fnic->wq_copy[i]);
890
891 fc_fabric_login(lp);
892
893 err = fnic_request_intr(fnic);
894 if (err) {
895 shost_printk(KERN_ERR, fnic->lport->host,
896 "Unable to request irq.\n");
897 goto err_out_free_exch_mgr;
898 }
899
900 vnic_dev_enable(fnic->vdev);
901
902 for (i = 0; i < fnic->intr_count; i++)
903 vnic_intr_unmask(&fnic->intr[i]);
904
905 fnic_notify_timer_start(fnic);
906
907 return 0;
908
909err_out_free_exch_mgr:
910 fc_exch_mgr_free(lp);
911err_out_remove_scsi_host:
912 fc_remove_host(lp->host);
913 scsi_remove_host(lp->host);
914err_out_free_rq_buf:
915 for (i = 0; i < fnic->rq_count; i++)
916 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
917 vnic_dev_notify_unset(fnic->vdev);
918err_out_free_max_pool:
919 mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
920err_out_free_dflt_pool:
921 mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
922err_out_free_ioreq_pool:
923 mempool_destroy(fnic->io_req_pool);
924err_out_free_resources:
925 fnic_free_vnic_resources(fnic);
926err_out_clear_intr:
927 fnic_clear_intr_mode(fnic);
928err_out_dev_close:
929 vnic_dev_close(fnic->vdev);
930err_out_dev_cmd_deinit:
931err_out_vnic_unregister:
932 vnic_dev_unregister(fnic->vdev);
933err_out_iounmap:
934 fnic_iounmap(fnic);
935err_out_release_regions:
936 pci_release_regions(pdev);
937err_out_disable_device:
938 pci_disable_device(pdev);
939err_out_free_hba:
940 fnic_stats_debugfs_remove(fnic);
941 scsi_host_put(lp->host);
942err_out:
943 return err;
944}
945
946static void fnic_remove(struct pci_dev *pdev)
947{
948 struct fnic *fnic = pci_get_drvdata(pdev);
949 struct fc_lport *lp = fnic->lport;
950 unsigned long flags;
951
952
953
954
955
956
957
958 spin_lock_irqsave(&fnic->fnic_lock, flags);
959 fnic->stop_rx_link_events = 1;
960 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
961
962 if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
963 del_timer_sync(&fnic->notify_timer);
964
965
966
967
968
969 flush_workqueue(fnic_event_queue);
970 skb_queue_purge(&fnic->frame_queue);
971 skb_queue_purge(&fnic->tx_queue);
972
973 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
974 del_timer_sync(&fnic->fip_timer);
975 skb_queue_purge(&fnic->fip_frame_queue);
976 fnic_fcoe_reset_vlans(fnic);
977 fnic_fcoe_evlist_free(fnic);
978 }
979
980
981
982
983
984
985 fc_fabric_logoff(fnic->lport);
986
987 spin_lock_irqsave(&fnic->fnic_lock, flags);
988 fnic->in_remove = 1;
989 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
990
991 fcoe_ctlr_destroy(&fnic->ctlr);
992 fc_lport_destroy(lp);
993 fnic_stats_debugfs_remove(fnic);
994
995
996
997
998
999
1000 fnic_cleanup(fnic);
1001
1002 BUG_ON(!skb_queue_empty(&fnic->frame_queue));
1003 BUG_ON(!skb_queue_empty(&fnic->tx_queue));
1004
1005 spin_lock_irqsave(&fnic_list_lock, flags);
1006 list_del(&fnic->list);
1007 spin_unlock_irqrestore(&fnic_list_lock, flags);
1008
1009 fc_remove_host(fnic->lport->host);
1010 scsi_remove_host(fnic->lport->host);
1011 fc_exch_mgr_free(fnic->lport);
1012 vnic_dev_notify_unset(fnic->vdev);
1013 fnic_free_intr(fnic);
1014 fnic_free_vnic_resources(fnic);
1015 fnic_clear_intr_mode(fnic);
1016 vnic_dev_close(fnic->vdev);
1017 vnic_dev_unregister(fnic->vdev);
1018 fnic_iounmap(fnic);
1019 pci_release_regions(pdev);
1020 pci_disable_device(pdev);
1021 scsi_host_put(lp->host);
1022}
1023
1024static struct pci_driver fnic_driver = {
1025 .name = DRV_NAME,
1026 .id_table = fnic_id_table,
1027 .probe = fnic_probe,
1028 .remove = fnic_remove,
1029};
1030
1031static int __init fnic_init_module(void)
1032{
1033 size_t len;
1034 int err = 0;
1035
1036 printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
1037
1038
1039 err = fnic_debugfs_init();
1040 if (err < 0) {
1041 printk(KERN_ERR PFX "Failed to create fnic directory "
1042 "for tracing and stats logging\n");
1043 fnic_debugfs_terminate();
1044 }
1045
1046
1047 err = fnic_trace_buf_init();
1048 if (err < 0) {
1049 printk(KERN_ERR PFX
1050 "Trace buffer initialization Failed. "
1051 "Fnic Tracing utility is disabled\n");
1052 fnic_trace_free();
1053 }
1054
1055
1056 err = fnic_fc_trace_init();
1057 if (err < 0) {
1058 printk(KERN_ERR PFX "FC trace buffer initialization Failed "
1059 "FC frame tracing utility is disabled\n");
1060 fnic_fc_trace_free();
1061 }
1062
1063
1064 len = sizeof(struct fnic_dflt_sgl_list);
1065 fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
1066 ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
1067 SLAB_HWCACHE_ALIGN,
1068 NULL);
1069 if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
1070 printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
1071 err = -ENOMEM;
1072 goto err_create_fnic_sgl_slab_dflt;
1073 }
1074
1075
1076 len = sizeof(struct fnic_sgl_list);
1077 fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
1078 ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
1079 SLAB_HWCACHE_ALIGN,
1080 NULL);
1081 if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
1082 printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
1083 err = -ENOMEM;
1084 goto err_create_fnic_sgl_slab_max;
1085 }
1086
1087
1088 fnic_io_req_cache = kmem_cache_create("fnic_io_req",
1089 sizeof(struct fnic_io_req),
1090 0, SLAB_HWCACHE_ALIGN, NULL);
1091 if (!fnic_io_req_cache) {
1092 printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
1093 err = -ENOMEM;
1094 goto err_create_fnic_ioreq_slab;
1095 }
1096
1097 fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
1098 if (!fnic_event_queue) {
1099 printk(KERN_ERR PFX "fnic work queue create failed\n");
1100 err = -ENOMEM;
1101 goto err_create_fnic_workq;
1102 }
1103
1104 spin_lock_init(&fnic_list_lock);
1105 INIT_LIST_HEAD(&fnic_list);
1106
1107 fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
1108 if (!fnic_fip_queue) {
1109 printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
1110 err = -ENOMEM;
1111 goto err_create_fip_workq;
1112 }
1113
1114 fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
1115 if (!fnic_fc_transport) {
1116 printk(KERN_ERR PFX "fc_attach_transport error\n");
1117 err = -ENOMEM;
1118 goto err_fc_transport;
1119 }
1120
1121
1122 err = pci_register_driver(&fnic_driver);
1123 if (err < 0) {
1124 printk(KERN_ERR PFX "pci register error\n");
1125 goto err_pci_register;
1126 }
1127 return err;
1128
1129err_pci_register:
1130 fc_release_transport(fnic_fc_transport);
1131err_fc_transport:
1132 destroy_workqueue(fnic_fip_queue);
1133err_create_fip_workq:
1134 destroy_workqueue(fnic_event_queue);
1135err_create_fnic_workq:
1136 kmem_cache_destroy(fnic_io_req_cache);
1137err_create_fnic_ioreq_slab:
1138 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
1139err_create_fnic_sgl_slab_max:
1140 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
1141err_create_fnic_sgl_slab_dflt:
1142 fnic_trace_free();
1143 fnic_fc_trace_free();
1144 fnic_debugfs_terminate();
1145 return err;
1146}
1147
1148static void __exit fnic_cleanup_module(void)
1149{
1150 pci_unregister_driver(&fnic_driver);
1151 destroy_workqueue(fnic_event_queue);
1152 if (fnic_fip_queue) {
1153 flush_workqueue(fnic_fip_queue);
1154 destroy_workqueue(fnic_fip_queue);
1155 }
1156 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
1157 kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
1158 kmem_cache_destroy(fnic_io_req_cache);
1159 fc_release_transport(fnic_fc_transport);
1160 fnic_trace_free();
1161 fnic_fc_trace_free();
1162 fnic_debugfs_terminate();
1163}
1164
1165module_init(fnic_init_module);
1166module_exit(fnic_cleanup_module);
1167
1168