1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/slab.h>
31#include <linux/delay.h>
32#include <linux/kernel.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/scsi_cmnd.h>
38#include <net/netlink.h>
39#include <scsi/scsi_netlink_fc.h>
40#include <scsi/scsi_bsg_fc.h>
41#include "scsi_priv.h"
42#include "scsi_transport_fc_internal.h"
43
44static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
45static void fc_vport_sched_delete(struct work_struct *work);
46static int fc_vport_setup(struct Scsi_Host *shost, int channel,
47 struct device *pdev, struct fc_vport_identifiers *ids,
48 struct fc_vport **vport);
49static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
50static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
51static void fc_bsg_remove(struct request_queue *);
52static void fc_bsg_goose_queue(struct fc_rport *);
53
54
55
56
57
58
59
60
61
62
63static unsigned int fc_dev_loss_tmo = 60;
64
65module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
66MODULE_PARM_DESC(dev_loss_tmo,
67 "Maximum number of seconds that the FC transport should"
68 " insulate the loss of a remote port. Once this value is"
69 " exceeded, the scsi target is removed. Value should be"
70 " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
71 " fast_io_fail_tmo is not set.");
72
73
74
75
76
77#define FC_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \
78struct device_attribute device_attr_##_prefix##_##_name = \
79 __ATTR(_name,_mode,_show,_store)
80
81#define fc_enum_name_search(title, table_type, table) \
82static const char *get_fc_##title##_name(enum table_type table_key) \
83{ \
84 int i; \
85 char *name = NULL; \
86 \
87 for (i = 0; i < ARRAY_SIZE(table); i++) { \
88 if (table[i].value == table_key) { \
89 name = table[i].name; \
90 break; \
91 } \
92 } \
93 return name; \
94}
95
96#define fc_enum_name_match(title, table_type, table) \
97static int get_fc_##title##_match(const char *table_key, \
98 enum table_type *value) \
99{ \
100 int i; \
101 \
102 for (i = 0; i < ARRAY_SIZE(table); i++) { \
103 if (strncmp(table_key, table[i].name, \
104 table[i].matchlen) == 0) { \
105 *value = table[i].value; \
106 return 0; \
107 } \
108 } \
109 return 1; \
110}
111
112
113
114static struct {
115 enum fc_port_type value;
116 char *name;
117} fc_port_type_names[] = {
118 { FC_PORTTYPE_UNKNOWN, "Unknown" },
119 { FC_PORTTYPE_OTHER, "Other" },
120 { FC_PORTTYPE_NOTPRESENT, "Not Present" },
121 { FC_PORTTYPE_NPORT, "NPort (fabric via point-to-point)" },
122 { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" },
123 { FC_PORTTYPE_LPORT, "LPort (private loop)" },
124 { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection)" },
125 { FC_PORTTYPE_NPIV, "NPIV VPORT" },
126};
127fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
128#define FC_PORTTYPE_MAX_NAMELEN 50
129
130
131#define get_fc_vport_type_name get_fc_port_type_name
132
133
134
135static const struct {
136 enum fc_host_event_code value;
137 char *name;
138} fc_host_event_code_names[] = {
139 { FCH_EVT_LIP, "lip" },
140 { FCH_EVT_LINKUP, "link_up" },
141 { FCH_EVT_LINKDOWN, "link_down" },
142 { FCH_EVT_LIPRESET, "lip_reset" },
143 { FCH_EVT_RSCN, "rscn" },
144 { FCH_EVT_ADAPTER_CHANGE, "adapter_chg" },
145 { FCH_EVT_PORT_UNKNOWN, "port_unknown" },
146 { FCH_EVT_PORT_ONLINE, "port_online" },
147 { FCH_EVT_PORT_OFFLINE, "port_offline" },
148 { FCH_EVT_PORT_FABRIC, "port_fabric" },
149 { FCH_EVT_LINK_UNKNOWN, "link_unknown" },
150 { FCH_EVT_VENDOR_UNIQUE, "vendor_unique" },
151};
152fc_enum_name_search(host_event_code, fc_host_event_code,
153 fc_host_event_code_names)
154#define FC_HOST_EVENT_CODE_MAX_NAMELEN 30
155
156
157
158static struct {
159 enum fc_port_state value;
160 char *name;
161} fc_port_state_names[] = {
162 { FC_PORTSTATE_UNKNOWN, "Unknown" },
163 { FC_PORTSTATE_NOTPRESENT, "Not Present" },
164 { FC_PORTSTATE_ONLINE, "Online" },
165 { FC_PORTSTATE_OFFLINE, "Offline" },
166 { FC_PORTSTATE_BLOCKED, "Blocked" },
167 { FC_PORTSTATE_BYPASSED, "Bypassed" },
168 { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics" },
169 { FC_PORTSTATE_LINKDOWN, "Linkdown" },
170 { FC_PORTSTATE_ERROR, "Error" },
171 { FC_PORTSTATE_LOOPBACK, "Loopback" },
172 { FC_PORTSTATE_DELETED, "Deleted" },
173};
174fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
175#define FC_PORTSTATE_MAX_NAMELEN 20
176
177
178
179static struct {
180 enum fc_vport_state value;
181 char *name;
182} fc_vport_state_names[] = {
183 { FC_VPORT_UNKNOWN, "Unknown" },
184 { FC_VPORT_ACTIVE, "Active" },
185 { FC_VPORT_DISABLED, "Disabled" },
186 { FC_VPORT_LINKDOWN, "Linkdown" },
187 { FC_VPORT_INITIALIZING, "Initializing" },
188 { FC_VPORT_NO_FABRIC_SUPP, "No Fabric Support" },
189 { FC_VPORT_NO_FABRIC_RSCS, "No Fabric Resources" },
190 { FC_VPORT_FABRIC_LOGOUT, "Fabric Logout" },
191 { FC_VPORT_FABRIC_REJ_WWN, "Fabric Rejected WWN" },
192 { FC_VPORT_FAILED, "VPort Failed" },
193};
194fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names)
195#define FC_VPORTSTATE_MAX_NAMELEN 24
196
197
198#define get_fc_vport_last_state_name get_fc_vport_state_name
199
200
201
202static const struct {
203 enum fc_tgtid_binding_type value;
204 char *name;
205 int matchlen;
206} fc_tgtid_binding_type_names[] = {
207 { FC_TGTID_BIND_NONE, "none", 4 },
208 { FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 },
209 { FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 },
210 { FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 },
211};
212fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type,
213 fc_tgtid_binding_type_names)
214fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type,
215 fc_tgtid_binding_type_names)
216#define FC_BINDTYPE_MAX_NAMELEN 30
217
218
219#define fc_bitfield_name_search(title, table) \
220static ssize_t \
221get_fc_##title##_names(u32 table_key, char *buf) \
222{ \
223 char *prefix = ""; \
224 ssize_t len = 0; \
225 int i; \
226 \
227 for (i = 0; i < ARRAY_SIZE(table); i++) { \
228 if (table[i].value & table_key) { \
229 len += sprintf(buf + len, "%s%s", \
230 prefix, table[i].name); \
231 prefix = ", "; \
232 } \
233 } \
234 len += sprintf(buf + len, "\n"); \
235 return len; \
236}
237
238
239
240static const struct {
241 u32 value;
242 char *name;
243} fc_cos_names[] = {
244 { FC_COS_CLASS1, "Class 1" },
245 { FC_COS_CLASS2, "Class 2" },
246 { FC_COS_CLASS3, "Class 3" },
247 { FC_COS_CLASS4, "Class 4" },
248 { FC_COS_CLASS6, "Class 6" },
249};
250fc_bitfield_name_search(cos, fc_cos_names)
251
252
253
254static const struct {
255 u32 value;
256 char *name;
257} fc_port_speed_names[] = {
258 { FC_PORTSPEED_1GBIT, "1 Gbit" },
259 { FC_PORTSPEED_2GBIT, "2 Gbit" },
260 { FC_PORTSPEED_4GBIT, "4 Gbit" },
261 { FC_PORTSPEED_10GBIT, "10 Gbit" },
262 { FC_PORTSPEED_8GBIT, "8 Gbit" },
263 { FC_PORTSPEED_16GBIT, "16 Gbit" },
264 { FC_PORTSPEED_32GBIT, "32 Gbit" },
265 { FC_PORTSPEED_20GBIT, "20 Gbit" },
266 { FC_PORTSPEED_40GBIT, "40 Gbit" },
267 { FC_PORTSPEED_50GBIT, "50 Gbit" },
268 { FC_PORTSPEED_100GBIT, "100 Gbit" },
269 { FC_PORTSPEED_25GBIT, "25 Gbit" },
270 { FC_PORTSPEED_64GBIT, "64 Gbit" },
271 { FC_PORTSPEED_128GBIT, "128 Gbit" },
272 { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
273};
274fc_bitfield_name_search(port_speed, fc_port_speed_names)
275
276
277static int
278show_fc_fc4s (char *buf, u8 *fc4_list)
279{
280 int i, len=0;
281
282 for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++)
283 len += sprintf(buf + len , "0x%02x ", *fc4_list);
284 len += sprintf(buf + len, "\n");
285 return len;
286}
287
288
289
290static const struct {
291 u32 value;
292 char *name;
293} fc_port_role_names[] = {
294 { FC_PORT_ROLE_FCP_TARGET, "FCP Target" },
295 { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
296 { FC_PORT_ROLE_IP_PORT, "IP Port" },
297 { FC_PORT_ROLE_FCP_DUMMY_INITIATOR, "FCP Dummy Initiator" },
298};
299fc_bitfield_name_search(port_roles, fc_port_role_names)
300
301
302
303
304#define FC_WELLKNOWN_PORTID_MASK 0xfffff0
305#define FC_WELLKNOWN_ROLE_MASK 0x00000f
306#define FC_FPORT_PORTID 0x00000e
307#define FC_FABCTLR_PORTID 0x00000d
308#define FC_DIRSRVR_PORTID 0x00000c
309#define FC_TIMESRVR_PORTID 0x00000b
310#define FC_MGMTSRVR_PORTID 0x00000a
311
312
313static void fc_timeout_deleted_rport(struct work_struct *work);
314static void fc_timeout_fail_rport_io(struct work_struct *work);
315static void fc_scsi_scan_rport(struct work_struct *work);
316
317
318
319
320
321#define FC_STARGET_NUM_ATTRS 3
322#define FC_RPORT_NUM_ATTRS 10
323#define FC_VPORT_NUM_ATTRS 9
324#define FC_HOST_NUM_ATTRS 29
325
326struct fc_internal {
327 struct scsi_transport_template t;
328 struct fc_function_template *f;
329
330
331
332
333
334
335
336
337
338
339
340 struct device_attribute private_starget_attrs[
341 FC_STARGET_NUM_ATTRS];
342 struct device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1];
343
344 struct device_attribute private_host_attrs[FC_HOST_NUM_ATTRS];
345 struct device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1];
346
347 struct transport_container rport_attr_cont;
348 struct device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
349 struct device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
350
351 struct transport_container vport_attr_cont;
352 struct device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
353 struct device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
354};
355
356#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
357
358static int fc_target_setup(struct transport_container *tc, struct device *dev,
359 struct device *cdev)
360{
361 struct scsi_target *starget = to_scsi_target(dev);
362 struct fc_rport *rport = starget_to_rport(starget);
363
364
365
366
367
368
369 if (rport) {
370 fc_starget_node_name(starget) = rport->node_name;
371 fc_starget_port_name(starget) = rport->port_name;
372 fc_starget_port_id(starget) = rport->port_id;
373 } else {
374 fc_starget_node_name(starget) = -1;
375 fc_starget_port_name(starget) = -1;
376 fc_starget_port_id(starget) = -1;
377 }
378
379 return 0;
380}
381
382static DECLARE_TRANSPORT_CLASS(fc_transport_class,
383 "fc_transport",
384 fc_target_setup,
385 NULL,
386 NULL);
387
388static int fc_host_setup(struct transport_container *tc, struct device *dev,
389 struct device *cdev)
390{
391 struct Scsi_Host *shost = dev_to_shost(dev);
392 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
393
394
395
396
397
398
399 fc_host->node_name = -1;
400 fc_host->port_name = -1;
401 fc_host->permanent_port_name = -1;
402 fc_host->supported_classes = FC_COS_UNSPECIFIED;
403 memset(fc_host->supported_fc4s, 0,
404 sizeof(fc_host->supported_fc4s));
405 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
406 fc_host->maxframe_size = -1;
407 fc_host->max_npiv_vports = 0;
408 memset(fc_host->serial_number, 0,
409 sizeof(fc_host->serial_number));
410 memset(fc_host->manufacturer, 0,
411 sizeof(fc_host->manufacturer));
412 memset(fc_host->model, 0,
413 sizeof(fc_host->model));
414 memset(fc_host->model_description, 0,
415 sizeof(fc_host->model_description));
416 memset(fc_host->hardware_version, 0,
417 sizeof(fc_host->hardware_version));
418 memset(fc_host->driver_version, 0,
419 sizeof(fc_host->driver_version));
420 memset(fc_host->firmware_version, 0,
421 sizeof(fc_host->firmware_version));
422 memset(fc_host->optionrom_version, 0,
423 sizeof(fc_host->optionrom_version));
424
425 fc_host->port_id = -1;
426 fc_host->port_type = FC_PORTTYPE_UNKNOWN;
427 fc_host->port_state = FC_PORTSTATE_UNKNOWN;
428 memset(fc_host->active_fc4s, 0,
429 sizeof(fc_host->active_fc4s));
430 fc_host->speed = FC_PORTSPEED_UNKNOWN;
431 fc_host->fabric_name = -1;
432 memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
433 memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
434
435 fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
436
437 INIT_LIST_HEAD(&fc_host->rports);
438 INIT_LIST_HEAD(&fc_host->rport_bindings);
439 INIT_LIST_HEAD(&fc_host->vports);
440 fc_host->next_rport_number = 0;
441 fc_host->next_target_id = 0;
442 fc_host->next_vport_number = 0;
443 fc_host->npiv_vports_inuse = 0;
444
445 snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
446 "fc_wq_%d", shost->host_no);
447 fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name);
448 if (!fc_host->work_q)
449 return -ENOMEM;
450
451 fc_host->dev_loss_tmo = fc_dev_loss_tmo;
452 snprintf(fc_host->devloss_work_q_name,
453 sizeof(fc_host->devloss_work_q_name),
454 "fc_dl_%d", shost->host_no);
455 fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0,
456 fc_host->devloss_work_q_name);
457 if (!fc_host->devloss_work_q) {
458 destroy_workqueue(fc_host->work_q);
459 fc_host->work_q = NULL;
460 return -ENOMEM;
461 }
462
463 fc_bsg_hostadd(shost, fc_host);
464
465
466 return 0;
467}
468
469static int fc_host_remove(struct transport_container *tc, struct device *dev,
470 struct device *cdev)
471{
472 struct Scsi_Host *shost = dev_to_shost(dev);
473 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
474
475 fc_bsg_remove(fc_host->rqst_q);
476 return 0;
477}
478
479static DECLARE_TRANSPORT_CLASS(fc_host_class,
480 "fc_host",
481 fc_host_setup,
482 fc_host_remove,
483 NULL);
484
485
486
487
488
489static DECLARE_TRANSPORT_CLASS(fc_rport_class,
490 "fc_remote_ports",
491 NULL,
492 NULL,
493 NULL);
494
495
496
497
498
499static DECLARE_TRANSPORT_CLASS(fc_vport_class,
500 "fc_vports",
501 NULL,
502 NULL,
503 NULL);
504
505
506
507
508
509static atomic_t fc_event_seq;
510
511
512
513
514
515
516
517
518
519u32
520fc_get_event_number(void)
521{
522 return atomic_add_return(1, &fc_event_seq);
523}
524EXPORT_SYMBOL(fc_get_event_number);
525
526
527
528
529
530
531
532
533
534
535
536
537void
538fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
539 enum fc_host_event_code event_code, u32 event_data)
540{
541 struct sk_buff *skb;
542 struct nlmsghdr *nlh;
543 struct fc_nl_event *event;
544 const char *name;
545 u32 len;
546 int err;
547
548 if (!scsi_nl_sock) {
549 err = -ENOENT;
550 goto send_fail;
551 }
552
553 len = FC_NL_MSGALIGN(sizeof(*event));
554
555 skb = nlmsg_new(len, GFP_KERNEL);
556 if (!skb) {
557 err = -ENOBUFS;
558 goto send_fail;
559 }
560
561 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
562 if (!nlh) {
563 err = -ENOBUFS;
564 goto send_fail_skb;
565 }
566 event = nlmsg_data(nlh);
567
568 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
569 FC_NL_ASYNC_EVENT, len);
570 event->seconds = get_seconds();
571 event->vendor_id = 0;
572 event->host_no = shost->host_no;
573 event->event_datalen = sizeof(u32);
574 event->event_num = event_number;
575 event->event_code = event_code;
576 event->event_data = event_data;
577
578 nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
579 GFP_KERNEL);
580 return;
581
582send_fail_skb:
583 kfree_skb(skb);
584send_fail:
585 name = get_fc_host_event_code_name(event_code);
586 printk(KERN_WARNING
587 "%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
588 __func__, shost->host_no,
589 (name) ? name : "<unknown>", event_data, err);
590 return;
591}
592EXPORT_SYMBOL(fc_host_post_event);
593
594
595
596
597
598
599
600
601
602
603
604
605
606void
607fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
608 u32 data_len, char * data_buf, u64 vendor_id)
609{
610 struct sk_buff *skb;
611 struct nlmsghdr *nlh;
612 struct fc_nl_event *event;
613 u32 len;
614 int err;
615
616 if (!scsi_nl_sock) {
617 err = -ENOENT;
618 goto send_vendor_fail;
619 }
620
621 len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
622
623 skb = nlmsg_new(len, GFP_KERNEL);
624 if (!skb) {
625 err = -ENOBUFS;
626 goto send_vendor_fail;
627 }
628
629 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
630 if (!nlh) {
631 err = -ENOBUFS;
632 goto send_vendor_fail_skb;
633 }
634 event = nlmsg_data(nlh);
635
636 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
637 FC_NL_ASYNC_EVENT, len);
638 event->seconds = get_seconds();
639 event->vendor_id = vendor_id;
640 event->host_no = shost->host_no;
641 event->event_datalen = data_len;
642 event->event_num = event_number;
643 event->event_code = FCH_EVT_VENDOR_UNIQUE;
644 memcpy(&event->event_data, data_buf, data_len);
645
646 nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
647 GFP_KERNEL);
648 return;
649
650send_vendor_fail_skb:
651 kfree_skb(skb);
652send_vendor_fail:
653 printk(KERN_WARNING
654 "%s: Dropped Event : host %d vendor_unique - err %d\n",
655 __func__, shost->host_no, err);
656 return;
657}
658EXPORT_SYMBOL(fc_host_post_vendor_event);
659
660
661
662static __init int fc_transport_init(void)
663{
664 int error;
665
666 atomic_set(&fc_event_seq, 0);
667
668 error = transport_class_register(&fc_host_class);
669 if (error)
670 return error;
671 error = transport_class_register(&fc_vport_class);
672 if (error)
673 goto unreg_host_class;
674 error = transport_class_register(&fc_rport_class);
675 if (error)
676 goto unreg_vport_class;
677 error = transport_class_register(&fc_transport_class);
678 if (error)
679 goto unreg_rport_class;
680 return 0;
681
682unreg_rport_class:
683 transport_class_unregister(&fc_rport_class);
684unreg_vport_class:
685 transport_class_unregister(&fc_vport_class);
686unreg_host_class:
687 transport_class_unregister(&fc_host_class);
688 return error;
689}
690
691static void __exit fc_transport_exit(void)
692{
693 transport_class_unregister(&fc_transport_class);
694 transport_class_unregister(&fc_rport_class);
695 transport_class_unregister(&fc_host_class);
696 transport_class_unregister(&fc_vport_class);
697}
698
699
700
701
702
703#define fc_rport_show_function(field, format_string, sz, cast) \
704static ssize_t \
705show_fc_rport_##field (struct device *dev, \
706 struct device_attribute *attr, char *buf) \
707{ \
708 struct fc_rport *rport = transport_class_to_rport(dev); \
709 struct Scsi_Host *shost = rport_to_shost(rport); \
710 struct fc_internal *i = to_fc_internal(shost->transportt); \
711 if ((i->f->get_rport_##field) && \
712 !((rport->port_state == FC_PORTSTATE_BLOCKED) || \
713 (rport->port_state == FC_PORTSTATE_DELETED) || \
714 (rport->port_state == FC_PORTSTATE_NOTPRESENT))) \
715 i->f->get_rport_##field(rport); \
716 return snprintf(buf, sz, format_string, cast rport->field); \
717}
718
719#define fc_rport_store_function(field) \
720static ssize_t \
721store_fc_rport_##field(struct device *dev, \
722 struct device_attribute *attr, \
723 const char *buf, size_t count) \
724{ \
725 int val; \
726 struct fc_rport *rport = transport_class_to_rport(dev); \
727 struct Scsi_Host *shost = rport_to_shost(rport); \
728 struct fc_internal *i = to_fc_internal(shost->transportt); \
729 char *cp; \
730 if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \
731 (rport->port_state == FC_PORTSTATE_DELETED) || \
732 (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \
733 return -EBUSY; \
734 val = simple_strtoul(buf, &cp, 0); \
735 if (*cp && (*cp != '\n')) \
736 return -EINVAL; \
737 i->f->set_rport_##field(rport, val); \
738 return count; \
739}
740
741#define fc_rport_rd_attr(field, format_string, sz) \
742 fc_rport_show_function(field, format_string, sz, ) \
743static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
744 show_fc_rport_##field, NULL)
745
746#define fc_rport_rd_attr_cast(field, format_string, sz, cast) \
747 fc_rport_show_function(field, format_string, sz, (cast)) \
748static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
749 show_fc_rport_##field, NULL)
750
751#define fc_rport_rw_attr(field, format_string, sz) \
752 fc_rport_show_function(field, format_string, sz, ) \
753 fc_rport_store_function(field) \
754static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR, \
755 show_fc_rport_##field, \
756 store_fc_rport_##field)
757
758
759#define fc_private_rport_show_function(field, format_string, sz, cast) \
760static ssize_t \
761show_fc_rport_##field (struct device *dev, \
762 struct device_attribute *attr, char *buf) \
763{ \
764 struct fc_rport *rport = transport_class_to_rport(dev); \
765 return snprintf(buf, sz, format_string, cast rport->field); \
766}
767
768#define fc_private_rport_rd_attr(field, format_string, sz) \
769 fc_private_rport_show_function(field, format_string, sz, ) \
770static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
771 show_fc_rport_##field, NULL)
772
773#define fc_private_rport_rd_attr_cast(field, format_string, sz, cast) \
774 fc_private_rport_show_function(field, format_string, sz, (cast)) \
775static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
776 show_fc_rport_##field, NULL)
777
778
779#define fc_private_rport_rd_enum_attr(title, maxlen) \
780static ssize_t \
781show_fc_rport_##title (struct device *dev, \
782 struct device_attribute *attr, char *buf) \
783{ \
784 struct fc_rport *rport = transport_class_to_rport(dev); \
785 const char *name; \
786 name = get_fc_##title##_name(rport->title); \
787 if (!name) \
788 return -EINVAL; \
789 return snprintf(buf, maxlen, "%s\n", name); \
790} \
791static FC_DEVICE_ATTR(rport, title, S_IRUGO, \
792 show_fc_rport_##title, NULL)
793
794
795#define SETUP_RPORT_ATTRIBUTE_RD(field) \
796 i->private_rport_attrs[count] = device_attr_rport_##field; \
797 i->private_rport_attrs[count].attr.mode = S_IRUGO; \
798 i->private_rport_attrs[count].store = NULL; \
799 i->rport_attrs[count] = &i->private_rport_attrs[count]; \
800 if (i->f->show_rport_##field) \
801 count++
802
803#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field) \
804 i->private_rport_attrs[count] = device_attr_rport_##field; \
805 i->private_rport_attrs[count].attr.mode = S_IRUGO; \
806 i->private_rport_attrs[count].store = NULL; \
807 i->rport_attrs[count] = &i->private_rport_attrs[count]; \
808 count++
809
810#define SETUP_RPORT_ATTRIBUTE_RW(field) \
811 i->private_rport_attrs[count] = device_attr_rport_##field; \
812 if (!i->f->set_rport_##field) { \
813 i->private_rport_attrs[count].attr.mode = S_IRUGO; \
814 i->private_rport_attrs[count].store = NULL; \
815 } \
816 i->rport_attrs[count] = &i->private_rport_attrs[count]; \
817 if (i->f->show_rport_##field) \
818 count++
819
820#define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field) \
821{ \
822 i->private_rport_attrs[count] = device_attr_rport_##field; \
823 i->rport_attrs[count] = &i->private_rport_attrs[count]; \
824 count++; \
825}
826
827
828
829
830
831
832fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20);
833
834static ssize_t
835show_fc_rport_supported_classes (struct device *dev,
836 struct device_attribute *attr, char *buf)
837{
838 struct fc_rport *rport = transport_class_to_rport(dev);
839 if (rport->supported_classes == FC_COS_UNSPECIFIED)
840 return snprintf(buf, 20, "unspecified\n");
841 return get_fc_cos_names(rport->supported_classes, buf);
842}
843static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO,
844 show_fc_rport_supported_classes, NULL);
845
846
847
848
849
850
851static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
852{
853 char *cp;
854
855 *val = simple_strtoul(buf, &cp, 0);
856 if ((*cp && (*cp != '\n')) || (*val < 0))
857 return -EINVAL;
858
859
860
861 if (*val > UINT_MAX)
862 return -EINVAL;
863
864 return 0;
865}
866
867static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
868 unsigned long val)
869{
870 struct Scsi_Host *shost = rport_to_shost(rport);
871 struct fc_internal *i = to_fc_internal(shost->transportt);
872
873 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
874 (rport->port_state == FC_PORTSTATE_DELETED) ||
875 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
876 return -EBUSY;
877
878
879
880 if (val > UINT_MAX)
881 return -EINVAL;
882
883
884
885
886
887 if (rport->fast_io_fail_tmo == -1 &&
888 val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
889 return -EINVAL;
890
891 i->f->set_rport_dev_loss_tmo(rport, val);
892 return 0;
893}
894
895fc_rport_show_function(dev_loss_tmo, "%d\n", 20, )
896static ssize_t
897store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
898 const char *buf, size_t count)
899{
900 struct fc_rport *rport = transport_class_to_rport(dev);
901 unsigned long val;
902 int rc;
903
904 rc = fc_str_to_dev_loss(buf, &val);
905 if (rc)
906 return rc;
907
908 rc = fc_rport_set_dev_loss_tmo(rport, val);
909 if (rc)
910 return rc;
911 return count;
912}
913static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
914 show_fc_rport_dev_loss_tmo, store_fc_rport_dev_loss_tmo);
915
916
917
918
919fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
920fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
921fc_private_rport_rd_attr(port_id, "0x%06x\n", 20);
922
923static ssize_t
924show_fc_rport_roles (struct device *dev, struct device_attribute *attr,
925 char *buf)
926{
927 struct fc_rport *rport = transport_class_to_rport(dev);
928
929
930 if ((rport->port_id != -1) &&
931 (rport->port_id & FC_WELLKNOWN_PORTID_MASK) ==
932 FC_WELLKNOWN_PORTID_MASK) {
933 switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) {
934 case FC_FPORT_PORTID:
935 return snprintf(buf, 30, "Fabric Port\n");
936 case FC_FABCTLR_PORTID:
937 return snprintf(buf, 30, "Fabric Controller\n");
938 case FC_DIRSRVR_PORTID:
939 return snprintf(buf, 30, "Directory Server\n");
940 case FC_TIMESRVR_PORTID:
941 return snprintf(buf, 30, "Time Server\n");
942 case FC_MGMTSRVR_PORTID:
943 return snprintf(buf, 30, "Management Server\n");
944 default:
945 return snprintf(buf, 30, "Unknown Fabric Entity\n");
946 }
947 } else {
948 if (rport->roles == FC_PORT_ROLE_UNKNOWN)
949 return snprintf(buf, 20, "unknown\n");
950 return get_fc_port_roles_names(rport->roles, buf);
951 }
952}
953static FC_DEVICE_ATTR(rport, roles, S_IRUGO,
954 show_fc_rport_roles, NULL);
955
956fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
957fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
958
959
960
961
962static ssize_t
963show_fc_rport_fast_io_fail_tmo (struct device *dev,
964 struct device_attribute *attr, char *buf)
965{
966 struct fc_rport *rport = transport_class_to_rport(dev);
967
968 if (rport->fast_io_fail_tmo == -1)
969 return snprintf(buf, 5, "off\n");
970 return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
971}
972
973static ssize_t
974store_fc_rport_fast_io_fail_tmo(struct device *dev,
975 struct device_attribute *attr, const char *buf,
976 size_t count)
977{
978 int val;
979 char *cp;
980 struct fc_rport *rport = transport_class_to_rport(dev);
981
982 if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
983 (rport->port_state == FC_PORTSTATE_DELETED) ||
984 (rport->port_state == FC_PORTSTATE_NOTPRESENT))
985 return -EBUSY;
986 if (strncmp(buf, "off", 3) == 0)
987 rport->fast_io_fail_tmo = -1;
988 else {
989 val = simple_strtoul(buf, &cp, 0);
990 if ((*cp && (*cp != '\n')) || (val < 0))
991 return -EINVAL;
992
993
994
995
996 if ((val >= rport->dev_loss_tmo) ||
997 (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
998 return -EINVAL;
999
1000 rport->fast_io_fail_tmo = val;
1001 }
1002 return count;
1003}
1004static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
1005 show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018#define fc_starget_show_function(field, format_string, sz, cast) \
1019static ssize_t \
1020show_fc_starget_##field (struct device *dev, \
1021 struct device_attribute *attr, char *buf) \
1022{ \
1023 struct scsi_target *starget = transport_class_to_starget(dev); \
1024 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \
1025 struct fc_internal *i = to_fc_internal(shost->transportt); \
1026 struct fc_rport *rport = starget_to_rport(starget); \
1027 if (rport) \
1028 fc_starget_##field(starget) = rport->field; \
1029 else if (i->f->get_starget_##field) \
1030 i->f->get_starget_##field(starget); \
1031 return snprintf(buf, sz, format_string, \
1032 cast fc_starget_##field(starget)); \
1033}
1034
1035#define fc_starget_rd_attr(field, format_string, sz) \
1036 fc_starget_show_function(field, format_string, sz, ) \
1037static FC_DEVICE_ATTR(starget, field, S_IRUGO, \
1038 show_fc_starget_##field, NULL)
1039
1040#define fc_starget_rd_attr_cast(field, format_string, sz, cast) \
1041 fc_starget_show_function(field, format_string, sz, (cast)) \
1042static FC_DEVICE_ATTR(starget, field, S_IRUGO, \
1043 show_fc_starget_##field, NULL)
1044
1045#define SETUP_STARGET_ATTRIBUTE_RD(field) \
1046 i->private_starget_attrs[count] = device_attr_starget_##field; \
1047 i->private_starget_attrs[count].attr.mode = S_IRUGO; \
1048 i->private_starget_attrs[count].store = NULL; \
1049 i->starget_attrs[count] = &i->private_starget_attrs[count]; \
1050 if (i->f->show_starget_##field) \
1051 count++
1052
1053#define SETUP_STARGET_ATTRIBUTE_RW(field) \
1054 i->private_starget_attrs[count] = device_attr_starget_##field; \
1055 if (!i->f->set_starget_##field) { \
1056 i->private_starget_attrs[count].attr.mode = S_IRUGO; \
1057 i->private_starget_attrs[count].store = NULL; \
1058 } \
1059 i->starget_attrs[count] = &i->private_starget_attrs[count]; \
1060 if (i->f->show_starget_##field) \
1061 count++
1062
1063
1064fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1065fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1066fc_starget_rd_attr(port_id, "0x%06x\n", 20);
1067
1068
1069
1070
1071
1072
1073#define fc_vport_show_function(field, format_string, sz, cast) \
1074static ssize_t \
1075show_fc_vport_##field (struct device *dev, \
1076 struct device_attribute *attr, char *buf) \
1077{ \
1078 struct fc_vport *vport = transport_class_to_vport(dev); \
1079 struct Scsi_Host *shost = vport_to_shost(vport); \
1080 struct fc_internal *i = to_fc_internal(shost->transportt); \
1081 if ((i->f->get_vport_##field) && \
1082 !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))) \
1083 i->f->get_vport_##field(vport); \
1084 return snprintf(buf, sz, format_string, cast vport->field); \
1085}
1086
1087#define fc_vport_store_function(field) \
1088static ssize_t \
1089store_fc_vport_##field(struct device *dev, \
1090 struct device_attribute *attr, \
1091 const char *buf, size_t count) \
1092{ \
1093 int val; \
1094 struct fc_vport *vport = transport_class_to_vport(dev); \
1095 struct Scsi_Host *shost = vport_to_shost(vport); \
1096 struct fc_internal *i = to_fc_internal(shost->transportt); \
1097 char *cp; \
1098 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
1099 return -EBUSY; \
1100 val = simple_strtoul(buf, &cp, 0); \
1101 if (*cp && (*cp != '\n')) \
1102 return -EINVAL; \
1103 i->f->set_vport_##field(vport, val); \
1104 return count; \
1105}
1106
1107#define fc_vport_store_str_function(field, slen) \
1108static ssize_t \
1109store_fc_vport_##field(struct device *dev, \
1110 struct device_attribute *attr, \
1111 const char *buf, size_t count) \
1112{ \
1113 struct fc_vport *vport = transport_class_to_vport(dev); \
1114 struct Scsi_Host *shost = vport_to_shost(vport); \
1115 struct fc_internal *i = to_fc_internal(shost->transportt); \
1116 unsigned int cnt=count; \
1117 \
1118 \
1119 if (buf[cnt-1] == '\n') \
1120 cnt--; \
1121 if (cnt > ((slen) - 1)) \
1122 return -EINVAL; \
1123 memcpy(vport->field, buf, cnt); \
1124 i->f->set_vport_##field(vport); \
1125 return count; \
1126}
1127
1128#define fc_vport_rd_attr(field, format_string, sz) \
1129 fc_vport_show_function(field, format_string, sz, ) \
1130static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
1131 show_fc_vport_##field, NULL)
1132
1133#define fc_vport_rd_attr_cast(field, format_string, sz, cast) \
1134 fc_vport_show_function(field, format_string, sz, (cast)) \
1135static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
1136 show_fc_vport_##field, NULL)
1137
1138#define fc_vport_rw_attr(field, format_string, sz) \
1139 fc_vport_show_function(field, format_string, sz, ) \
1140 fc_vport_store_function(field) \
1141static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
1142 show_fc_vport_##field, \
1143 store_fc_vport_##field)
1144
1145#define fc_private_vport_show_function(field, format_string, sz, cast) \
1146static ssize_t \
1147show_fc_vport_##field (struct device *dev, \
1148 struct device_attribute *attr, char *buf) \
1149{ \
1150 struct fc_vport *vport = transport_class_to_vport(dev); \
1151 return snprintf(buf, sz, format_string, cast vport->field); \
1152}
1153
1154#define fc_private_vport_store_u32_function(field) \
1155static ssize_t \
1156store_fc_vport_##field(struct device *dev, \
1157 struct device_attribute *attr, \
1158 const char *buf, size_t count) \
1159{ \
1160 u32 val; \
1161 struct fc_vport *vport = transport_class_to_vport(dev); \
1162 char *cp; \
1163 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
1164 return -EBUSY; \
1165 val = simple_strtoul(buf, &cp, 0); \
1166 if (*cp && (*cp != '\n')) \
1167 return -EINVAL; \
1168 vport->field = val; \
1169 return count; \
1170}
1171
1172
1173#define fc_private_vport_rd_attr(field, format_string, sz) \
1174 fc_private_vport_show_function(field, format_string, sz, ) \
1175static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
1176 show_fc_vport_##field, NULL)
1177
1178#define fc_private_vport_rd_attr_cast(field, format_string, sz, cast) \
1179 fc_private_vport_show_function(field, format_string, sz, (cast)) \
1180static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
1181 show_fc_vport_##field, NULL)
1182
1183#define fc_private_vport_rw_u32_attr(field, format_string, sz) \
1184 fc_private_vport_show_function(field, format_string, sz, ) \
1185 fc_private_vport_store_u32_function(field) \
1186static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
1187 show_fc_vport_##field, \
1188 store_fc_vport_##field)
1189
1190
1191#define fc_private_vport_rd_enum_attr(title, maxlen) \
1192static ssize_t \
1193show_fc_vport_##title (struct device *dev, \
1194 struct device_attribute *attr, \
1195 char *buf) \
1196{ \
1197 struct fc_vport *vport = transport_class_to_vport(dev); \
1198 const char *name; \
1199 name = get_fc_##title##_name(vport->title); \
1200 if (!name) \
1201 return -EINVAL; \
1202 return snprintf(buf, maxlen, "%s\n", name); \
1203} \
1204static FC_DEVICE_ATTR(vport, title, S_IRUGO, \
1205 show_fc_vport_##title, NULL)
1206
1207
1208#define SETUP_VPORT_ATTRIBUTE_RD(field) \
1209 i->private_vport_attrs[count] = device_attr_vport_##field; \
1210 i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1211 i->private_vport_attrs[count].store = NULL; \
1212 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1213 if (i->f->get_##field) \
1214 count++
1215
1216
1217#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field) \
1218 i->private_vport_attrs[count] = device_attr_vport_##field; \
1219 i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1220 i->private_vport_attrs[count].store = NULL; \
1221 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1222 count++
1223
1224#define SETUP_VPORT_ATTRIBUTE_WR(field) \
1225 i->private_vport_attrs[count] = device_attr_vport_##field; \
1226 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1227 if (i->f->field) \
1228 count++
1229
1230
1231#define SETUP_VPORT_ATTRIBUTE_RW(field) \
1232 i->private_vport_attrs[count] = device_attr_vport_##field; \
1233 if (!i->f->set_vport_##field) { \
1234 i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1235 i->private_vport_attrs[count].store = NULL; \
1236 } \
1237 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1238 count++
1239
1240
1241#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field) \
1242{ \
1243 i->private_vport_attrs[count] = device_attr_vport_##field; \
1244 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1245 count++; \
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
1258fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
1259fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1260fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1261
1262static ssize_t
1263show_fc_vport_roles (struct device *dev, struct device_attribute *attr,
1264 char *buf)
1265{
1266 struct fc_vport *vport = transport_class_to_vport(dev);
1267
1268 if (vport->roles == FC_PORT_ROLE_UNKNOWN)
1269 return snprintf(buf, 20, "unknown\n");
1270 return get_fc_port_roles_names(vport->roles, buf);
1271}
1272static FC_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
1273
1274fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
1275
1276fc_private_vport_show_function(symbolic_name, "%s\n",
1277 FC_VPORT_SYMBOLIC_NAMELEN + 1, )
1278fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN)
1279static FC_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
1280 show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
1281
1282static ssize_t
1283store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
1284 const char *buf, size_t count)
1285{
1286 struct fc_vport *vport = transport_class_to_vport(dev);
1287 struct Scsi_Host *shost = vport_to_shost(vport);
1288 unsigned long flags;
1289
1290 spin_lock_irqsave(shost->host_lock, flags);
1291 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
1292 spin_unlock_irqrestore(shost->host_lock, flags);
1293 return -EBUSY;
1294 }
1295 vport->flags |= FC_VPORT_DELETING;
1296 spin_unlock_irqrestore(shost->host_lock, flags);
1297
1298 fc_queue_work(shost, &vport->vport_delete_work);
1299 return count;
1300}
1301static FC_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
1302 NULL, store_fc_vport_delete);
1303
1304
1305
1306
1307
1308
1309static ssize_t
1310store_fc_vport_disable(struct device *dev, struct device_attribute *attr,
1311 const char *buf,
1312 size_t count)
1313{
1314 struct fc_vport *vport = transport_class_to_vport(dev);
1315 struct Scsi_Host *shost = vport_to_shost(vport);
1316 struct fc_internal *i = to_fc_internal(shost->transportt);
1317 int stat;
1318
1319 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1320 return -EBUSY;
1321
1322 if (*buf == '0') {
1323 if (vport->vport_state != FC_VPORT_DISABLED)
1324 return -EALREADY;
1325 } else if (*buf == '1') {
1326 if (vport->vport_state == FC_VPORT_DISABLED)
1327 return -EALREADY;
1328 } else
1329 return -EINVAL;
1330
1331 stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
1332 return stat ? stat : count;
1333}
1334static FC_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
1335 NULL, store_fc_vport_disable);
1336
1337
1338
1339
1340
1341
1342#define fc_host_show_function(field, format_string, sz, cast) \
1343static ssize_t \
1344show_fc_host_##field (struct device *dev, \
1345 struct device_attribute *attr, char *buf) \
1346{ \
1347 struct Scsi_Host *shost = transport_class_to_shost(dev); \
1348 struct fc_internal *i = to_fc_internal(shost->transportt); \
1349 if (i->f->get_host_##field) \
1350 i->f->get_host_##field(shost); \
1351 return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1352}
1353
1354#define fc_host_store_function(field) \
1355static ssize_t \
1356store_fc_host_##field(struct device *dev, \
1357 struct device_attribute *attr, \
1358 const char *buf, size_t count) \
1359{ \
1360 int val; \
1361 struct Scsi_Host *shost = transport_class_to_shost(dev); \
1362 struct fc_internal *i = to_fc_internal(shost->transportt); \
1363 char *cp; \
1364 \
1365 val = simple_strtoul(buf, &cp, 0); \
1366 if (*cp && (*cp != '\n')) \
1367 return -EINVAL; \
1368 i->f->set_host_##field(shost, val); \
1369 return count; \
1370}
1371
1372#define fc_host_store_str_function(field, slen) \
1373static ssize_t \
1374store_fc_host_##field(struct device *dev, \
1375 struct device_attribute *attr, \
1376 const char *buf, size_t count) \
1377{ \
1378 struct Scsi_Host *shost = transport_class_to_shost(dev); \
1379 struct fc_internal *i = to_fc_internal(shost->transportt); \
1380 unsigned int cnt=count; \
1381 \
1382 \
1383 if (buf[cnt-1] == '\n') \
1384 cnt--; \
1385 if (cnt > ((slen) - 1)) \
1386 return -EINVAL; \
1387 memcpy(fc_host_##field(shost), buf, cnt); \
1388 i->f->set_host_##field(shost); \
1389 return count; \
1390}
1391
1392#define fc_host_rd_attr(field, format_string, sz) \
1393 fc_host_show_function(field, format_string, sz, ) \
1394static FC_DEVICE_ATTR(host, field, S_IRUGO, \
1395 show_fc_host_##field, NULL)
1396
1397#define fc_host_rd_attr_cast(field, format_string, sz, cast) \
1398 fc_host_show_function(field, format_string, sz, (cast)) \
1399static FC_DEVICE_ATTR(host, field, S_IRUGO, \
1400 show_fc_host_##field, NULL)
1401
1402#define fc_host_rw_attr(field, format_string, sz) \
1403 fc_host_show_function(field, format_string, sz, ) \
1404 fc_host_store_function(field) \
1405static FC_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR, \
1406 show_fc_host_##field, \
1407 store_fc_host_##field)
1408
1409#define fc_host_rd_enum_attr(title, maxlen) \
1410static ssize_t \
1411show_fc_host_##title (struct device *dev, \
1412 struct device_attribute *attr, char *buf) \
1413{ \
1414 struct Scsi_Host *shost = transport_class_to_shost(dev); \
1415 struct fc_internal *i = to_fc_internal(shost->transportt); \
1416 const char *name; \
1417 if (i->f->get_host_##title) \
1418 i->f->get_host_##title(shost); \
1419 name = get_fc_##title##_name(fc_host_##title(shost)); \
1420 if (!name) \
1421 return -EINVAL; \
1422 return snprintf(buf, maxlen, "%s\n", name); \
1423} \
1424static FC_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
1425
1426#define SETUP_HOST_ATTRIBUTE_RD(field) \
1427 i->private_host_attrs[count] = device_attr_host_##field; \
1428 i->private_host_attrs[count].attr.mode = S_IRUGO; \
1429 i->private_host_attrs[count].store = NULL; \
1430 i->host_attrs[count] = &i->private_host_attrs[count]; \
1431 if (i->f->show_host_##field) \
1432 count++
1433
1434#define SETUP_HOST_ATTRIBUTE_RD_NS(field) \
1435 i->private_host_attrs[count] = device_attr_host_##field; \
1436 i->private_host_attrs[count].attr.mode = S_IRUGO; \
1437 i->private_host_attrs[count].store = NULL; \
1438 i->host_attrs[count] = &i->private_host_attrs[count]; \
1439 count++
1440
1441#define SETUP_HOST_ATTRIBUTE_RW(field) \
1442 i->private_host_attrs[count] = device_attr_host_##field; \
1443 if (!i->f->set_host_##field) { \
1444 i->private_host_attrs[count].attr.mode = S_IRUGO; \
1445 i->private_host_attrs[count].store = NULL; \
1446 } \
1447 i->host_attrs[count] = &i->private_host_attrs[count]; \
1448 if (i->f->show_host_##field) \
1449 count++
1450
1451
1452#define fc_private_host_show_function(field, format_string, sz, cast) \
1453static ssize_t \
1454show_fc_host_##field (struct device *dev, \
1455 struct device_attribute *attr, char *buf) \
1456{ \
1457 struct Scsi_Host *shost = transport_class_to_shost(dev); \
1458 return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
1459}
1460
1461#define fc_private_host_rd_attr(field, format_string, sz) \
1462 fc_private_host_show_function(field, format_string, sz, ) \
1463static FC_DEVICE_ATTR(host, field, S_IRUGO, \
1464 show_fc_host_##field, NULL)
1465
1466#define fc_private_host_rd_attr_cast(field, format_string, sz, cast) \
1467 fc_private_host_show_function(field, format_string, sz, (cast)) \
1468static FC_DEVICE_ATTR(host, field, S_IRUGO, \
1469 show_fc_host_##field, NULL)
1470
1471#define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field) \
1472 i->private_host_attrs[count] = device_attr_host_##field; \
1473 i->private_host_attrs[count].attr.mode = S_IRUGO; \
1474 i->private_host_attrs[count].store = NULL; \
1475 i->host_attrs[count] = &i->private_host_attrs[count]; \
1476 count++
1477
1478#define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field) \
1479{ \
1480 i->private_host_attrs[count] = device_attr_host_##field; \
1481 i->host_attrs[count] = &i->private_host_attrs[count]; \
1482 count++; \
1483}
1484
1485
1486
1487
1488static ssize_t
1489show_fc_host_supported_classes (struct device *dev,
1490 struct device_attribute *attr, char *buf)
1491{
1492 struct Scsi_Host *shost = transport_class_to_shost(dev);
1493
1494 if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED)
1495 return snprintf(buf, 20, "unspecified\n");
1496
1497 return get_fc_cos_names(fc_host_supported_classes(shost), buf);
1498}
1499static FC_DEVICE_ATTR(host, supported_classes, S_IRUGO,
1500 show_fc_host_supported_classes, NULL);
1501
1502static ssize_t
1503show_fc_host_supported_fc4s (struct device *dev,
1504 struct device_attribute *attr, char *buf)
1505{
1506 struct Scsi_Host *shost = transport_class_to_shost(dev);
1507 return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost));
1508}
1509static FC_DEVICE_ATTR(host, supported_fc4s, S_IRUGO,
1510 show_fc_host_supported_fc4s, NULL);
1511
1512static ssize_t
1513show_fc_host_supported_speeds (struct device *dev,
1514 struct device_attribute *attr, char *buf)
1515{
1516 struct Scsi_Host *shost = transport_class_to_shost(dev);
1517
1518 if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN)
1519 return snprintf(buf, 20, "unknown\n");
1520
1521 return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf);
1522}
1523static FC_DEVICE_ATTR(host, supported_speeds, S_IRUGO,
1524 show_fc_host_supported_speeds, NULL);
1525
1526
1527fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1528fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1529fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
1530 unsigned long long);
1531fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
1532fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
1533fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
1534fc_private_host_rd_attr(manufacturer, "%s\n", FC_SERIAL_NUMBER_SIZE + 1);
1535fc_private_host_rd_attr(model, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1536fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1537fc_private_host_rd_attr(hardware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1538fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1539fc_private_host_rd_attr(firmware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1540fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1541
1542
1543
1544
1545static ssize_t
1546show_fc_host_active_fc4s (struct device *dev,
1547 struct device_attribute *attr, char *buf)
1548{
1549 struct Scsi_Host *shost = transport_class_to_shost(dev);
1550 struct fc_internal *i = to_fc_internal(shost->transportt);
1551
1552 if (i->f->get_host_active_fc4s)
1553 i->f->get_host_active_fc4s(shost);
1554
1555 return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost));
1556}
1557static FC_DEVICE_ATTR(host, active_fc4s, S_IRUGO,
1558 show_fc_host_active_fc4s, NULL);
1559
1560static ssize_t
1561show_fc_host_speed (struct device *dev,
1562 struct device_attribute *attr, char *buf)
1563{
1564 struct Scsi_Host *shost = transport_class_to_shost(dev);
1565 struct fc_internal *i = to_fc_internal(shost->transportt);
1566
1567 if (i->f->get_host_speed)
1568 i->f->get_host_speed(shost);
1569
1570 if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN)
1571 return snprintf(buf, 20, "unknown\n");
1572
1573 return get_fc_port_speed_names(fc_host_speed(shost), buf);
1574}
1575static FC_DEVICE_ATTR(host, speed, S_IRUGO,
1576 show_fc_host_speed, NULL);
1577
1578
1579fc_host_rd_attr(port_id, "0x%06x\n", 20);
1580fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN);
1581fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
1582fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
1583fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1584
1585fc_private_host_show_function(system_hostname, "%s\n",
1586 FC_SYMBOLIC_NAME_SIZE + 1, )
1587fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE)
1588static FC_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR,
1589 show_fc_host_system_hostname, store_fc_host_system_hostname);
1590
1591
1592
1593
1594static ssize_t
1595show_fc_private_host_tgtid_bind_type(struct device *dev,
1596 struct device_attribute *attr, char *buf)
1597{
1598 struct Scsi_Host *shost = transport_class_to_shost(dev);
1599 const char *name;
1600
1601 name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost));
1602 if (!name)
1603 return -EINVAL;
1604 return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name);
1605}
1606
1607#define get_list_head_entry(pos, head, member) \
1608 pos = list_entry((head)->next, typeof(*pos), member)
1609
1610static ssize_t
1611store_fc_private_host_tgtid_bind_type(struct device *dev,
1612 struct device_attribute *attr, const char *buf, size_t count)
1613{
1614 struct Scsi_Host *shost = transport_class_to_shost(dev);
1615 struct fc_rport *rport;
1616 enum fc_tgtid_binding_type val;
1617 unsigned long flags;
1618
1619 if (get_fc_tgtid_bind_type_match(buf, &val))
1620 return -EINVAL;
1621
1622
1623 if (val != fc_host_tgtid_bind_type(shost)) {
1624 spin_lock_irqsave(shost->host_lock, flags);
1625 while (!list_empty(&fc_host_rport_bindings(shost))) {
1626 get_list_head_entry(rport,
1627 &fc_host_rport_bindings(shost), peers);
1628 list_del(&rport->peers);
1629 rport->port_state = FC_PORTSTATE_DELETED;
1630 fc_queue_work(shost, &rport->rport_delete_work);
1631 }
1632 spin_unlock_irqrestore(shost->host_lock, flags);
1633 }
1634
1635 fc_host_tgtid_bind_type(shost) = val;
1636 return count;
1637}
1638
1639static FC_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR,
1640 show_fc_private_host_tgtid_bind_type,
1641 store_fc_private_host_tgtid_bind_type);
1642
1643static ssize_t
1644store_fc_private_host_issue_lip(struct device *dev,
1645 struct device_attribute *attr, const char *buf, size_t count)
1646{
1647 struct Scsi_Host *shost = transport_class_to_shost(dev);
1648 struct fc_internal *i = to_fc_internal(shost->transportt);
1649 int ret;
1650
1651
1652 if (i->f->issue_fc_host_lip) {
1653 ret = i->f->issue_fc_host_lip(shost);
1654 return ret ? ret: count;
1655 }
1656
1657 return -ENOENT;
1658}
1659
1660static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
1661 store_fc_private_host_issue_lip);
1662
1663static ssize_t
1664store_fc_private_host_dev_loss_tmo(struct device *dev,
1665 struct device_attribute *attr,
1666 const char *buf, size_t count)
1667{
1668 struct Scsi_Host *shost = transport_class_to_shost(dev);
1669 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
1670 struct fc_rport *rport;
1671 unsigned long val, flags;
1672 int rc;
1673
1674 rc = fc_str_to_dev_loss(buf, &val);
1675 if (rc)
1676 return rc;
1677
1678 fc_host_dev_loss_tmo(shost) = val;
1679 spin_lock_irqsave(shost->host_lock, flags);
1680 list_for_each_entry(rport, &fc_host->rports, peers)
1681 fc_rport_set_dev_loss_tmo(rport, val);
1682 spin_unlock_irqrestore(shost->host_lock, flags);
1683 return count;
1684}
1685
1686fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, );
1687static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR,
1688 show_fc_host_dev_loss_tmo,
1689 store_fc_private_host_dev_loss_tmo);
1690
1691fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
1692
1693
1694
1695
1696
1697
1698static ssize_t
1699fc_stat_show(const struct device *dev, char *buf, unsigned long offset)
1700{
1701 struct Scsi_Host *shost = transport_class_to_shost(dev);
1702 struct fc_internal *i = to_fc_internal(shost->transportt);
1703 struct fc_host_statistics *stats;
1704 ssize_t ret = -ENOENT;
1705
1706 if (offset > sizeof(struct fc_host_statistics) ||
1707 offset % sizeof(u64) != 0)
1708 WARN_ON(1);
1709
1710 if (i->f->get_fc_host_stats) {
1711 stats = (i->f->get_fc_host_stats)(shost);
1712 if (stats)
1713 ret = snprintf(buf, 20, "0x%llx\n",
1714 (unsigned long long)*(u64 *)(((u8 *) stats) + offset));
1715 }
1716 return ret;
1717}
1718
1719
1720
1721#define fc_host_statistic(name) \
1722static ssize_t show_fcstat_##name(struct device *cd, \
1723 struct device_attribute *attr, \
1724 char *buf) \
1725{ \
1726 return fc_stat_show(cd, buf, \
1727 offsetof(struct fc_host_statistics, name)); \
1728} \
1729static FC_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL)
1730
1731fc_host_statistic(seconds_since_last_reset);
1732fc_host_statistic(tx_frames);
1733fc_host_statistic(tx_words);
1734fc_host_statistic(rx_frames);
1735fc_host_statistic(rx_words);
1736fc_host_statistic(lip_count);
1737fc_host_statistic(nos_count);
1738fc_host_statistic(error_frames);
1739fc_host_statistic(dumped_frames);
1740fc_host_statistic(link_failure_count);
1741fc_host_statistic(loss_of_sync_count);
1742fc_host_statistic(loss_of_signal_count);
1743fc_host_statistic(prim_seq_protocol_err_count);
1744fc_host_statistic(invalid_tx_word_count);
1745fc_host_statistic(invalid_crc_count);
1746fc_host_statistic(fcp_input_requests);
1747fc_host_statistic(fcp_output_requests);
1748fc_host_statistic(fcp_control_requests);
1749fc_host_statistic(fcp_input_megabytes);
1750fc_host_statistic(fcp_output_megabytes);
1751fc_host_statistic(fcp_packet_alloc_failures);
1752fc_host_statistic(fcp_packet_aborts);
1753fc_host_statistic(fcp_frame_alloc_failures);
1754fc_host_statistic(fc_no_free_exch);
1755fc_host_statistic(fc_no_free_exch_xid);
1756fc_host_statistic(fc_xid_not_found);
1757fc_host_statistic(fc_xid_busy);
1758fc_host_statistic(fc_seq_not_found);
1759fc_host_statistic(fc_non_bls_resp);
1760
1761static ssize_t
1762fc_reset_statistics(struct device *dev, struct device_attribute *attr,
1763 const char *buf, size_t count)
1764{
1765 struct Scsi_Host *shost = transport_class_to_shost(dev);
1766 struct fc_internal *i = to_fc_internal(shost->transportt);
1767
1768
1769 if (i->f->reset_fc_host_stats) {
1770 i->f->reset_fc_host_stats(shost);
1771 return count;
1772 }
1773
1774 return -ENOENT;
1775}
1776static FC_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
1777 fc_reset_statistics);
1778
1779static struct attribute *fc_statistics_attrs[] = {
1780 &device_attr_host_seconds_since_last_reset.attr,
1781 &device_attr_host_tx_frames.attr,
1782 &device_attr_host_tx_words.attr,
1783 &device_attr_host_rx_frames.attr,
1784 &device_attr_host_rx_words.attr,
1785 &device_attr_host_lip_count.attr,
1786 &device_attr_host_nos_count.attr,
1787 &device_attr_host_error_frames.attr,
1788 &device_attr_host_dumped_frames.attr,
1789 &device_attr_host_link_failure_count.attr,
1790 &device_attr_host_loss_of_sync_count.attr,
1791 &device_attr_host_loss_of_signal_count.attr,
1792 &device_attr_host_prim_seq_protocol_err_count.attr,
1793 &device_attr_host_invalid_tx_word_count.attr,
1794 &device_attr_host_invalid_crc_count.attr,
1795 &device_attr_host_fcp_input_requests.attr,
1796 &device_attr_host_fcp_output_requests.attr,
1797 &device_attr_host_fcp_control_requests.attr,
1798 &device_attr_host_fcp_input_megabytes.attr,
1799 &device_attr_host_fcp_output_megabytes.attr,
1800 &device_attr_host_fcp_packet_alloc_failures.attr,
1801 &device_attr_host_fcp_packet_aborts.attr,
1802 &device_attr_host_fcp_frame_alloc_failures.attr,
1803 &device_attr_host_fc_no_free_exch.attr,
1804 &device_attr_host_fc_no_free_exch_xid.attr,
1805 &device_attr_host_fc_xid_not_found.attr,
1806 &device_attr_host_fc_xid_busy.attr,
1807 &device_attr_host_fc_seq_not_found.attr,
1808 &device_attr_host_fc_non_bls_resp.attr,
1809 &device_attr_host_reset_statistics.attr,
1810 NULL
1811};
1812
1813static struct attribute_group fc_statistics_group = {
1814 .name = "statistics",
1815 .attrs = fc_statistics_attrs,
1816};
1817
1818
1819
1820
1821static int
1822fc_parse_wwn(const char *ns, u64 *nm)
1823{
1824 unsigned int i, j;
1825 u8 wwn[8];
1826
1827 memset(wwn, 0, sizeof(wwn));
1828
1829
1830 for (i=0, j=0; i < 16; i++) {
1831 int value;
1832
1833 value = hex_to_bin(*ns++);
1834 if (value >= 0)
1835 j = (j << 4) | value;
1836 else
1837 return -EINVAL;
1838 if (i % 2) {
1839 wwn[i/2] = j & 0xff;
1840 j = 0;
1841 }
1842 }
1843
1844 *nm = wwn_to_u64(wwn);
1845
1846 return 0;
1847}
1848
1849
1850
1851
1852
1853
1854
1855
1856static ssize_t
1857store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
1858 const char *buf, size_t count)
1859{
1860 struct Scsi_Host *shost = transport_class_to_shost(dev);
1861 struct fc_vport_identifiers vid;
1862 struct fc_vport *vport;
1863 unsigned int cnt=count;
1864 int stat;
1865
1866 memset(&vid, 0, sizeof(vid));
1867
1868
1869 if (buf[cnt-1] == '\n')
1870 cnt--;
1871
1872
1873 if ((cnt != (16+1+16)) || (buf[16] != ':'))
1874 return -EINVAL;
1875
1876 stat = fc_parse_wwn(&buf[0], &vid.port_name);
1877 if (stat)
1878 return stat;
1879
1880 stat = fc_parse_wwn(&buf[17], &vid.node_name);
1881 if (stat)
1882 return stat;
1883
1884 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1885 vid.vport_type = FC_PORTTYPE_NPIV;
1886
1887 vid.disable = false;
1888
1889
1890 stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
1891 return stat ? stat : count;
1892}
1893static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
1894 store_fc_host_vport_create);
1895
1896
1897
1898
1899
1900
1901
1902
1903static ssize_t
1904store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
1905 const char *buf, size_t count)
1906{
1907 struct Scsi_Host *shost = transport_class_to_shost(dev);
1908 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
1909 struct fc_vport *vport;
1910 u64 wwpn, wwnn;
1911 unsigned long flags;
1912 unsigned int cnt=count;
1913 int stat, match;
1914
1915
1916 if (buf[cnt-1] == '\n')
1917 cnt--;
1918
1919
1920 if ((cnt != (16+1+16)) || (buf[16] != ':'))
1921 return -EINVAL;
1922
1923 stat = fc_parse_wwn(&buf[0], &wwpn);
1924 if (stat)
1925 return stat;
1926
1927 stat = fc_parse_wwn(&buf[17], &wwnn);
1928 if (stat)
1929 return stat;
1930
1931 spin_lock_irqsave(shost->host_lock, flags);
1932 match = 0;
1933
1934 list_for_each_entry(vport, &fc_host->vports, peers) {
1935 if ((vport->channel == 0) &&
1936 (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
1937 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1938 break;
1939 vport->flags |= FC_VPORT_DELETING;
1940 match = 1;
1941 break;
1942 }
1943 }
1944 spin_unlock_irqrestore(shost->host_lock, flags);
1945
1946 if (!match)
1947 return -ENODEV;
1948
1949 stat = fc_vport_terminate(vport);
1950 return stat ? stat : count;
1951}
1952static FC_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
1953 store_fc_host_vport_delete);
1954
1955
1956static int fc_host_match(struct attribute_container *cont,
1957 struct device *dev)
1958{
1959 struct Scsi_Host *shost;
1960 struct fc_internal *i;
1961
1962 if (!scsi_is_host_device(dev))
1963 return 0;
1964
1965 shost = dev_to_shost(dev);
1966 if (!shost->transportt || shost->transportt->host_attrs.ac.class
1967 != &fc_host_class.class)
1968 return 0;
1969
1970 i = to_fc_internal(shost->transportt);
1971
1972 return &i->t.host_attrs.ac == cont;
1973}
1974
1975static int fc_target_match(struct attribute_container *cont,
1976 struct device *dev)
1977{
1978 struct Scsi_Host *shost;
1979 struct fc_internal *i;
1980
1981 if (!scsi_is_target_device(dev))
1982 return 0;
1983
1984 shost = dev_to_shost(dev->parent);
1985 if (!shost->transportt || shost->transportt->host_attrs.ac.class
1986 != &fc_host_class.class)
1987 return 0;
1988
1989 i = to_fc_internal(shost->transportt);
1990
1991 return &i->t.target_attrs.ac == cont;
1992}
1993
1994static void fc_rport_dev_release(struct device *dev)
1995{
1996 struct fc_rport *rport = dev_to_rport(dev);
1997 put_device(dev->parent);
1998 kfree(rport);
1999}
2000
2001int scsi_is_fc_rport(const struct device *dev)
2002{
2003 return dev->release == fc_rport_dev_release;
2004}
2005EXPORT_SYMBOL(scsi_is_fc_rport);
2006
2007static int fc_rport_match(struct attribute_container *cont,
2008 struct device *dev)
2009{
2010 struct Scsi_Host *shost;
2011 struct fc_internal *i;
2012
2013 if (!scsi_is_fc_rport(dev))
2014 return 0;
2015
2016 shost = dev_to_shost(dev->parent);
2017 if (!shost->transportt || shost->transportt->host_attrs.ac.class
2018 != &fc_host_class.class)
2019 return 0;
2020
2021 i = to_fc_internal(shost->transportt);
2022
2023 return &i->rport_attr_cont.ac == cont;
2024}
2025
2026
2027static void fc_vport_dev_release(struct device *dev)
2028{
2029 struct fc_vport *vport = dev_to_vport(dev);
2030 put_device(dev->parent);
2031 kfree(vport);
2032}
2033
2034int scsi_is_fc_vport(const struct device *dev)
2035{
2036 return dev->release == fc_vport_dev_release;
2037}
2038EXPORT_SYMBOL(scsi_is_fc_vport);
2039
2040static int fc_vport_match(struct attribute_container *cont,
2041 struct device *dev)
2042{
2043 struct fc_vport *vport;
2044 struct Scsi_Host *shost;
2045 struct fc_internal *i;
2046
2047 if (!scsi_is_fc_vport(dev))
2048 return 0;
2049 vport = dev_to_vport(dev);
2050
2051 shost = vport_to_shost(vport);
2052 if (!shost->transportt || shost->transportt->host_attrs.ac.class
2053 != &fc_host_class.class)
2054 return 0;
2055
2056 i = to_fc_internal(shost->transportt);
2057 return &i->vport_attr_cont.ac == cont;
2058}
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083static enum blk_eh_timer_return
2084fc_timed_out(struct scsi_cmnd *scmd)
2085{
2086 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
2087
2088 if (rport->port_state == FC_PORTSTATE_BLOCKED)
2089 return BLK_EH_RESET_TIMER;
2090
2091 return BLK_EH_NOT_HANDLED;
2092}
2093
2094
2095
2096
2097
2098
2099static void
2100fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun)
2101{
2102 struct fc_rport *rport;
2103 unsigned long flags;
2104
2105 spin_lock_irqsave(shost->host_lock, flags);
2106
2107 list_for_each_entry(rport, &fc_host_rports(shost), peers) {
2108 if (rport->scsi_target_id == -1)
2109 continue;
2110
2111 if (rport->port_state != FC_PORTSTATE_ONLINE)
2112 continue;
2113
2114 if ((channel == rport->channel) &&
2115 (id == rport->scsi_target_id)) {
2116 spin_unlock_irqrestore(shost->host_lock, flags);
2117 scsi_scan_target(&rport->dev, channel, id, lun,
2118 SCSI_SCAN_MANUAL);
2119 return;
2120 }
2121 }
2122
2123 spin_unlock_irqrestore(shost->host_lock, flags);
2124}
2125
2126
2127
2128
2129
2130
2131
2132static int
2133fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, uint lun)
2134{
2135 uint chlo, chhi;
2136 uint tgtlo, tgthi;
2137
2138 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
2139 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
2140 ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
2141 return -EINVAL;
2142
2143 if (channel == SCAN_WILD_CARD) {
2144 chlo = 0;
2145 chhi = shost->max_channel + 1;
2146 } else {
2147 chlo = channel;
2148 chhi = channel + 1;
2149 }
2150
2151 if (id == SCAN_WILD_CARD) {
2152 tgtlo = 0;
2153 tgthi = shost->max_id;
2154 } else {
2155 tgtlo = id;
2156 tgthi = id + 1;
2157 }
2158
2159 for ( ; chlo < chhi; chlo++)
2160 for ( ; tgtlo < tgthi; tgtlo++)
2161 fc_user_scan_tgt(shost, chlo, tgtlo, lun);
2162
2163 return 0;
2164}
2165
2166static int fc_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,
2167 int result)
2168{
2169 struct fc_internal *i = to_fc_internal(shost->transportt);
2170 return i->f->tsk_mgmt_response(shost, nexus, tm_id, result);
2171}
2172
2173static int fc_it_nexus_response(struct Scsi_Host *shost, u64 nexus, int result)
2174{
2175 struct fc_internal *i = to_fc_internal(shost->transportt);
2176 return i->f->it_nexus_response(shost, nexus, result);
2177}
2178
2179struct scsi_transport_template *
2180fc_attach_transport(struct fc_function_template *ft)
2181{
2182 int count;
2183 struct fc_internal *i = kzalloc(sizeof(struct fc_internal),
2184 GFP_KERNEL);
2185
2186 if (unlikely(!i))
2187 return NULL;
2188
2189 i->t.target_attrs.ac.attrs = &i->starget_attrs[0];
2190 i->t.target_attrs.ac.class = &fc_transport_class.class;
2191 i->t.target_attrs.ac.match = fc_target_match;
2192 i->t.target_size = sizeof(struct fc_starget_attrs);
2193 transport_container_register(&i->t.target_attrs);
2194
2195 i->t.host_attrs.ac.attrs = &i->host_attrs[0];
2196 i->t.host_attrs.ac.class = &fc_host_class.class;
2197 i->t.host_attrs.ac.match = fc_host_match;
2198 i->t.host_size = sizeof(struct fc_host_attrs);
2199 if (ft->get_fc_host_stats)
2200 i->t.host_attrs.statistics = &fc_statistics_group;
2201 transport_container_register(&i->t.host_attrs);
2202
2203 i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
2204 i->rport_attr_cont.ac.class = &fc_rport_class.class;
2205 i->rport_attr_cont.ac.match = fc_rport_match;
2206 transport_container_register(&i->rport_attr_cont);
2207
2208 i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
2209 i->vport_attr_cont.ac.class = &fc_vport_class.class;
2210 i->vport_attr_cont.ac.match = fc_vport_match;
2211 transport_container_register(&i->vport_attr_cont);
2212
2213 i->f = ft;
2214
2215
2216 i->t.create_work_queue = 1;
2217
2218 i->t.eh_timed_out = fc_timed_out;
2219
2220 i->t.user_scan = fc_user_scan;
2221
2222
2223 i->t.tsk_mgmt_response = fc_tsk_mgmt_response;
2224 i->t.it_nexus_response = fc_it_nexus_response;
2225
2226
2227
2228
2229 count = 0;
2230 SETUP_STARGET_ATTRIBUTE_RD(node_name);
2231 SETUP_STARGET_ATTRIBUTE_RD(port_name);
2232 SETUP_STARGET_ATTRIBUTE_RD(port_id);
2233
2234 BUG_ON(count > FC_STARGET_NUM_ATTRS);
2235
2236 i->starget_attrs[count] = NULL;
2237
2238
2239
2240
2241
2242 count=0;
2243 SETUP_HOST_ATTRIBUTE_RD(node_name);
2244 SETUP_HOST_ATTRIBUTE_RD(port_name);
2245 SETUP_HOST_ATTRIBUTE_RD(permanent_port_name);
2246 SETUP_HOST_ATTRIBUTE_RD(supported_classes);
2247 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
2248 SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
2249 SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
2250 if (ft->vport_create) {
2251 SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports);
2252 SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
2253 }
2254 SETUP_HOST_ATTRIBUTE_RD(serial_number);
2255 SETUP_HOST_ATTRIBUTE_RD(manufacturer);
2256 SETUP_HOST_ATTRIBUTE_RD(model);
2257 SETUP_HOST_ATTRIBUTE_RD(model_description);
2258 SETUP_HOST_ATTRIBUTE_RD(hardware_version);
2259 SETUP_HOST_ATTRIBUTE_RD(driver_version);
2260 SETUP_HOST_ATTRIBUTE_RD(firmware_version);
2261 SETUP_HOST_ATTRIBUTE_RD(optionrom_version);
2262
2263 SETUP_HOST_ATTRIBUTE_RD(port_id);
2264 SETUP_HOST_ATTRIBUTE_RD(port_type);
2265 SETUP_HOST_ATTRIBUTE_RD(port_state);
2266 SETUP_HOST_ATTRIBUTE_RD(active_fc4s);
2267 SETUP_HOST_ATTRIBUTE_RD(speed);
2268 SETUP_HOST_ATTRIBUTE_RD(fabric_name);
2269 SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
2270 SETUP_HOST_ATTRIBUTE_RW(system_hostname);
2271
2272
2273 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo);
2274 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
2275 if (ft->issue_fc_host_lip)
2276 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
2277 if (ft->vport_create)
2278 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
2279 if (ft->vport_delete)
2280 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
2281
2282 BUG_ON(count > FC_HOST_NUM_ATTRS);
2283
2284 i->host_attrs[count] = NULL;
2285
2286
2287
2288
2289 count=0;
2290 SETUP_RPORT_ATTRIBUTE_RD(maxframe_size);
2291 SETUP_RPORT_ATTRIBUTE_RD(supported_classes);
2292 SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo);
2293 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name);
2294 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name);
2295 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id);
2296 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
2297 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
2298 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
2299 SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
2300
2301 BUG_ON(count > FC_RPORT_NUM_ATTRS);
2302
2303 i->rport_attrs[count] = NULL;
2304
2305
2306
2307
2308 count=0;
2309 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state);
2310 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state);
2311 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name);
2312 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name);
2313 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles);
2314 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type);
2315 SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
2316 SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
2317 SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
2318
2319 BUG_ON(count > FC_VPORT_NUM_ATTRS);
2320
2321 i->vport_attrs[count] = NULL;
2322
2323 return &i->t;
2324}
2325EXPORT_SYMBOL(fc_attach_transport);
2326
2327void fc_release_transport(struct scsi_transport_template *t)
2328{
2329 struct fc_internal *i = to_fc_internal(t);
2330
2331 transport_container_unregister(&i->t.target_attrs);
2332 transport_container_unregister(&i->t.host_attrs);
2333 transport_container_unregister(&i->rport_attr_cont);
2334 transport_container_unregister(&i->vport_attr_cont);
2335
2336 kfree(i);
2337}
2338EXPORT_SYMBOL(fc_release_transport);
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350static int
2351fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
2352{
2353 if (unlikely(!fc_host_work_q(shost))) {
2354 printk(KERN_ERR
2355 "ERROR: FC host '%s' attempted to queue work, "
2356 "when no workqueue created.\n", shost->hostt->name);
2357 dump_stack();
2358
2359 return -EINVAL;
2360 }
2361
2362 return queue_work(fc_host_work_q(shost), work);
2363}
2364
2365
2366
2367
2368
2369static void
2370fc_flush_work(struct Scsi_Host *shost)
2371{
2372 if (!fc_host_work_q(shost)) {
2373 printk(KERN_ERR
2374 "ERROR: FC host '%s' attempted to flush work, "
2375 "when no workqueue created.\n", shost->hostt->name);
2376 dump_stack();
2377 return;
2378 }
2379
2380 flush_workqueue(fc_host_work_q(shost));
2381}
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392static int
2393fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
2394 unsigned long delay)
2395{
2396 if (unlikely(!fc_host_devloss_work_q(shost))) {
2397 printk(KERN_ERR
2398 "ERROR: FC host '%s' attempted to queue work, "
2399 "when no workqueue created.\n", shost->hostt->name);
2400 dump_stack();
2401
2402 return -EINVAL;
2403 }
2404
2405 return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
2406}
2407
2408
2409
2410
2411
2412static void
2413fc_flush_devloss(struct Scsi_Host *shost)
2414{
2415 if (!fc_host_devloss_work_q(shost)) {
2416 printk(KERN_ERR
2417 "ERROR: FC host '%s' attempted to flush work, "
2418 "when no workqueue created.\n", shost->hostt->name);
2419 dump_stack();
2420 return;
2421 }
2422
2423 flush_workqueue(fc_host_devloss_work_q(shost));
2424}
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442void
2443fc_remove_host(struct Scsi_Host *shost)
2444{
2445 struct fc_vport *vport = NULL, *next_vport = NULL;
2446 struct fc_rport *rport = NULL, *next_rport = NULL;
2447 struct workqueue_struct *work_q;
2448 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2449 unsigned long flags;
2450
2451 spin_lock_irqsave(shost->host_lock, flags);
2452
2453
2454 list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers)
2455 fc_queue_work(shost, &vport->vport_delete_work);
2456
2457
2458 list_for_each_entry_safe(rport, next_rport,
2459 &fc_host->rports, peers) {
2460 list_del(&rport->peers);
2461 rport->port_state = FC_PORTSTATE_DELETED;
2462 fc_queue_work(shost, &rport->rport_delete_work);
2463 }
2464
2465 list_for_each_entry_safe(rport, next_rport,
2466 &fc_host->rport_bindings, peers) {
2467 list_del(&rport->peers);
2468 rport->port_state = FC_PORTSTATE_DELETED;
2469 fc_queue_work(shost, &rport->rport_delete_work);
2470 }
2471
2472 spin_unlock_irqrestore(shost->host_lock, flags);
2473
2474
2475 scsi_flush_work(shost);
2476
2477
2478 if (fc_host->work_q) {
2479 work_q = fc_host->work_q;
2480 fc_host->work_q = NULL;
2481 destroy_workqueue(work_q);
2482 }
2483
2484
2485 if (fc_host->devloss_work_q) {
2486 work_q = fc_host->devloss_work_q;
2487 fc_host->devloss_work_q = NULL;
2488 destroy_workqueue(work_q);
2489 }
2490}
2491EXPORT_SYMBOL(fc_remove_host);
2492
2493static void fc_terminate_rport_io(struct fc_rport *rport)
2494{
2495 struct Scsi_Host *shost = rport_to_shost(rport);
2496 struct fc_internal *i = to_fc_internal(shost->transportt);
2497
2498
2499 if (i->f->terminate_rport_io)
2500 i->f->terminate_rport_io(rport);
2501
2502
2503
2504
2505 scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
2506}
2507
2508
2509
2510
2511
2512
2513
2514static void
2515fc_starget_delete(struct work_struct *work)
2516{
2517 struct fc_rport *rport =
2518 container_of(work, struct fc_rport, stgt_delete_work);
2519
2520 fc_terminate_rport_io(rport);
2521 scsi_remove_target(&rport->dev);
2522}
2523
2524
2525
2526
2527
2528
2529static void
2530fc_rport_final_delete(struct work_struct *work)
2531{
2532 struct fc_rport *rport =
2533 container_of(work, struct fc_rport, rport_delete_work);
2534 struct device *dev = &rport->dev;
2535 struct Scsi_Host *shost = rport_to_shost(rport);
2536 struct fc_internal *i = to_fc_internal(shost->transportt);
2537 unsigned long flags;
2538 int do_callback = 0;
2539
2540 fc_terminate_rport_io(rport);
2541
2542
2543
2544
2545
2546 if (rport->flags & FC_RPORT_SCAN_PENDING)
2547 scsi_flush_work(shost);
2548
2549
2550
2551
2552
2553
2554 spin_lock_irqsave(shost->host_lock, flags);
2555 if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
2556 spin_unlock_irqrestore(shost->host_lock, flags);
2557 if (!cancel_delayed_work(&rport->fail_io_work))
2558 fc_flush_devloss(shost);
2559 if (!cancel_delayed_work(&rport->dev_loss_work))
2560 fc_flush_devloss(shost);
2561 cancel_work_sync(&rport->scan_work);
2562 spin_lock_irqsave(shost->host_lock, flags);
2563 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
2564 }
2565 spin_unlock_irqrestore(shost->host_lock, flags);
2566
2567
2568 if (rport->scsi_target_id != -1)
2569 fc_starget_delete(&rport->stgt_delete_work);
2570
2571
2572
2573
2574
2575
2576
2577
2578 spin_lock_irqsave(shost->host_lock, flags);
2579 if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
2580 (i->f->dev_loss_tmo_callbk)) {
2581 rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
2582 do_callback = 1;
2583 }
2584 spin_unlock_irqrestore(shost->host_lock, flags);
2585
2586 if (do_callback)
2587 i->f->dev_loss_tmo_callbk(rport);
2588
2589 fc_bsg_remove(rport->rqst_q);
2590
2591 transport_remove_device(dev);
2592 device_del(dev);
2593 transport_destroy_device(dev);
2594 scsi_host_put(shost);
2595 put_device(dev);
2596}
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612static struct fc_rport *
2613fc_rport_create(struct Scsi_Host *shost, int channel,
2614 struct fc_rport_identifiers *ids)
2615{
2616 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2617 struct fc_internal *fci = to_fc_internal(shost->transportt);
2618 struct fc_rport *rport;
2619 struct device *dev;
2620 unsigned long flags;
2621 int error;
2622 size_t size;
2623
2624 size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
2625 rport = kzalloc(size, GFP_KERNEL);
2626 if (unlikely(!rport)) {
2627 printk(KERN_ERR "%s: allocation failure\n", __func__);
2628 return NULL;
2629 }
2630
2631 rport->maxframe_size = -1;
2632 rport->supported_classes = FC_COS_UNSPECIFIED;
2633 rport->dev_loss_tmo = fc_host->dev_loss_tmo;
2634 memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
2635 memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
2636 rport->port_id = ids->port_id;
2637 rport->roles = ids->roles;
2638 rport->port_state = FC_PORTSTATE_ONLINE;
2639 if (fci->f->dd_fcrport_size)
2640 rport->dd_data = &rport[1];
2641 rport->channel = channel;
2642 rport->fast_io_fail_tmo = -1;
2643
2644 INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
2645 INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
2646 INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
2647 INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
2648 INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
2649
2650 spin_lock_irqsave(shost->host_lock, flags);
2651
2652 rport->number = fc_host->next_rport_number++;
2653 if ((rport->roles & FC_PORT_ROLE_FCP_TARGET) ||
2654 (rport->roles & FC_PORT_ROLE_FCP_DUMMY_INITIATOR))
2655 rport->scsi_target_id = fc_host->next_target_id++;
2656 else
2657 rport->scsi_target_id = -1;
2658 list_add_tail(&rport->peers, &fc_host->rports);
2659 scsi_host_get(shost);
2660
2661 spin_unlock_irqrestore(shost->host_lock, flags);
2662
2663 dev = &rport->dev;
2664 device_initialize(dev);
2665 dev->parent = get_device(&shost->shost_gendev);
2666 dev->release = fc_rport_dev_release;
2667 dev_set_name(dev, "rport-%d:%d-%d",
2668 shost->host_no, channel, rport->number);
2669 transport_setup_device(dev);
2670
2671 error = device_add(dev);
2672 if (error) {
2673 printk(KERN_ERR "FC Remote Port device_add failed\n");
2674 goto delete_rport;
2675 }
2676 transport_add_device(dev);
2677 transport_configure_device(dev);
2678
2679 fc_bsg_rportadd(shost, rport);
2680
2681
2682 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2683
2684 rport->flags |= FC_RPORT_SCAN_PENDING;
2685 scsi_queue_work(shost, &rport->scan_work);
2686 }
2687
2688 return rport;
2689
2690delete_rport:
2691 transport_destroy_device(dev);
2692 spin_lock_irqsave(shost->host_lock, flags);
2693 list_del(&rport->peers);
2694 scsi_host_put(shost);
2695 spin_unlock_irqrestore(shost->host_lock, flags);
2696 put_device(dev->parent);
2697 kfree(rport);
2698 return NULL;
2699}
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739struct fc_rport *
2740fc_remote_port_add(struct Scsi_Host *shost, int channel,
2741 struct fc_rport_identifiers *ids)
2742{
2743 struct fc_internal *fci = to_fc_internal(shost->transportt);
2744 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2745 struct fc_rport *rport;
2746 unsigned long flags;
2747 int match = 0;
2748
2749
2750 fc_flush_work(shost);
2751
2752
2753
2754
2755
2756
2757 spin_lock_irqsave(shost->host_lock, flags);
2758
2759 list_for_each_entry(rport, &fc_host->rports, peers) {
2760
2761 if ((rport->port_state == FC_PORTSTATE_BLOCKED) &&
2762 (rport->channel == channel)) {
2763
2764 switch (fc_host->tgtid_bind_type) {
2765 case FC_TGTID_BIND_BY_WWPN:
2766 case FC_TGTID_BIND_NONE:
2767 if (rport->port_name == ids->port_name)
2768 match = 1;
2769 break;
2770 case FC_TGTID_BIND_BY_WWNN:
2771 if (rport->node_name == ids->node_name)
2772 match = 1;
2773 break;
2774 case FC_TGTID_BIND_BY_ID:
2775 if (rport->port_id == ids->port_id)
2776 match = 1;
2777 break;
2778 }
2779
2780 if (match) {
2781
2782 memcpy(&rport->node_name, &ids->node_name,
2783 sizeof(rport->node_name));
2784 memcpy(&rport->port_name, &ids->port_name,
2785 sizeof(rport->port_name));
2786 rport->port_id = ids->port_id;
2787
2788 rport->port_state = FC_PORTSTATE_ONLINE;
2789 rport->roles = ids->roles;
2790
2791 spin_unlock_irqrestore(shost->host_lock, flags);
2792
2793 if (fci->f->dd_fcrport_size)
2794 memset(rport->dd_data, 0,
2795 fci->f->dd_fcrport_size);
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814 if ((rport->scsi_target_id != -1) &&
2815 (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
2816 return rport;
2817
2818
2819
2820
2821
2822
2823 if (!cancel_delayed_work(&rport->fail_io_work))
2824 fc_flush_devloss(shost);
2825 if (!cancel_delayed_work(&rport->dev_loss_work))
2826 fc_flush_devloss(shost);
2827
2828 spin_lock_irqsave(shost->host_lock, flags);
2829
2830 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
2831 FC_RPORT_DEVLOSS_PENDING |
2832 FC_RPORT_DEVLOSS_CALLBK_DONE);
2833
2834 spin_unlock_irqrestore(shost->host_lock, flags);
2835
2836
2837 if (rport->scsi_target_id != -1) {
2838 scsi_target_unblock(&rport->dev,
2839 SDEV_RUNNING);
2840 spin_lock_irqsave(shost->host_lock,
2841 flags);
2842 rport->flags |= FC_RPORT_SCAN_PENDING;
2843 scsi_queue_work(shost,
2844 &rport->scan_work);
2845 spin_unlock_irqrestore(shost->host_lock,
2846 flags);
2847 }
2848
2849 fc_bsg_goose_queue(rport);
2850
2851 return rport;
2852 }
2853 }
2854 }
2855
2856
2857
2858
2859
2860 if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) {
2861
2862
2863
2864 list_for_each_entry(rport, &fc_host->rport_bindings,
2865 peers) {
2866 if (rport->channel != channel)
2867 continue;
2868
2869 switch (fc_host->tgtid_bind_type) {
2870 case FC_TGTID_BIND_BY_WWPN:
2871 if (rport->port_name == ids->port_name)
2872 match = 1;
2873 break;
2874 case FC_TGTID_BIND_BY_WWNN:
2875 if (rport->node_name == ids->node_name)
2876 match = 1;
2877 break;
2878 case FC_TGTID_BIND_BY_ID:
2879 if (rport->port_id == ids->port_id)
2880 match = 1;
2881 break;
2882 case FC_TGTID_BIND_NONE:
2883 break;
2884 }
2885
2886 if (match) {
2887 list_move_tail(&rport->peers, &fc_host->rports);
2888 break;
2889 }
2890 }
2891
2892 if (match) {
2893 memcpy(&rport->node_name, &ids->node_name,
2894 sizeof(rport->node_name));
2895 memcpy(&rport->port_name, &ids->port_name,
2896 sizeof(rport->port_name));
2897 rport->port_id = ids->port_id;
2898 rport->roles = ids->roles;
2899 rport->port_state = FC_PORTSTATE_ONLINE;
2900 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
2901
2902 if (fci->f->dd_fcrport_size)
2903 memset(rport->dd_data, 0,
2904 fci->f->dd_fcrport_size);
2905 spin_unlock_irqrestore(shost->host_lock, flags);
2906
2907 if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
2908 scsi_target_unblock(&rport->dev, SDEV_RUNNING);
2909
2910
2911 spin_lock_irqsave(shost->host_lock, flags);
2912 rport->flags |= FC_RPORT_SCAN_PENDING;
2913 scsi_queue_work(shost, &rport->scan_work);
2914 spin_unlock_irqrestore(shost->host_lock, flags);
2915 }
2916 return rport;
2917 }
2918 }
2919
2920 spin_unlock_irqrestore(shost->host_lock, flags);
2921
2922
2923 rport = fc_rport_create(shost, channel, ids);
2924
2925 return rport;
2926}
2927EXPORT_SYMBOL(fc_remote_port_add);
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980void
2981fc_remote_port_delete(struct fc_rport *rport)
2982{
2983 struct Scsi_Host *shost = rport_to_shost(rport);
2984 unsigned long timeout = rport->dev_loss_tmo;
2985 unsigned long flags;
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995 spin_lock_irqsave(shost->host_lock, flags);
2996
2997 if (rport->port_state != FC_PORTSTATE_ONLINE) {
2998 spin_unlock_irqrestore(shost->host_lock, flags);
2999 return;
3000 }
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015 rport->port_state = FC_PORTSTATE_BLOCKED;
3016
3017 rport->flags |= FC_RPORT_DEVLOSS_PENDING;
3018
3019 spin_unlock_irqrestore(shost->host_lock, flags);
3020
3021 if (rport->roles & FC_PORT_ROLE_FCP_INITIATOR &&
3022 shost->active_mode & MODE_TARGET)
3023 fc_tgt_it_nexus_destroy(shost, (unsigned long)rport);
3024
3025 scsi_target_block(&rport->dev);
3026
3027
3028 if ((rport->fast_io_fail_tmo != -1) &&
3029 (rport->fast_io_fail_tmo < timeout))
3030 fc_queue_devloss_work(shost, &rport->fail_io_work,
3031 rport->fast_io_fail_tmo * HZ);
3032
3033
3034 fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
3035}
3036EXPORT_SYMBOL(fc_remote_port_delete);
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058void
3059fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
3060{
3061 struct Scsi_Host *shost = rport_to_shost(rport);
3062 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3063 unsigned long flags;
3064 int create = 0;
3065 int ret;
3066
3067 spin_lock_irqsave(shost->host_lock, flags);
3068 if (roles & FC_PORT_ROLE_FCP_TARGET) {
3069 if (rport->scsi_target_id == -1) {
3070 rport->scsi_target_id = fc_host->next_target_id++;
3071 create = 1;
3072 } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
3073 create = 1;
3074 } else if (shost->active_mode & MODE_TARGET) {
3075 ret = fc_tgt_it_nexus_create(shost, (unsigned long)rport,
3076 (char *)&rport->node_name);
3077 if (ret)
3078 printk(KERN_ERR "FC Remore Port tgt nexus failed %d\n",
3079 ret);
3080 }
3081
3082 rport->roles = roles;
3083
3084 spin_unlock_irqrestore(shost->host_lock, flags);
3085
3086 if (create) {
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099 if (!cancel_delayed_work(&rport->fail_io_work))
3100 fc_flush_devloss(shost);
3101 if (!cancel_delayed_work(&rport->dev_loss_work))
3102 fc_flush_devloss(shost);
3103
3104 spin_lock_irqsave(shost->host_lock, flags);
3105 rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
3106 FC_RPORT_DEVLOSS_PENDING |
3107 FC_RPORT_DEVLOSS_CALLBK_DONE);
3108 spin_unlock_irqrestore(shost->host_lock, flags);
3109
3110
3111 fc_flush_work(shost);
3112
3113 scsi_target_unblock(&rport->dev, SDEV_RUNNING);
3114
3115 spin_lock_irqsave(shost->host_lock, flags);
3116 rport->flags |= FC_RPORT_SCAN_PENDING;
3117 scsi_queue_work(shost, &rport->scan_work);
3118 spin_unlock_irqrestore(shost->host_lock, flags);
3119 }
3120}
3121EXPORT_SYMBOL(fc_remote_port_rolechg);
3122
3123
3124
3125
3126
3127
3128
3129
3130static void
3131fc_timeout_deleted_rport(struct work_struct *work)
3132{
3133 struct fc_rport *rport =
3134 container_of(work, struct fc_rport, dev_loss_work.work);
3135 struct Scsi_Host *shost = rport_to_shost(rport);
3136 struct fc_internal *i = to_fc_internal(shost->transportt);
3137 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3138 unsigned long flags;
3139 int do_callback = 0;
3140
3141 spin_lock_irqsave(shost->host_lock, flags);
3142
3143 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
3144
3145
3146
3147
3148
3149
3150 if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
3151 (rport->scsi_target_id != -1) &&
3152 !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
3153 dev_printk(KERN_ERR, &rport->dev,
3154 "blocked FC remote port time out: no longer"
3155 " a FCP target, removing starget\n");
3156 spin_unlock_irqrestore(shost->host_lock, flags);
3157 scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
3158 fc_queue_work(shost, &rport->stgt_delete_work);
3159 return;
3160 }
3161
3162
3163 if (rport->port_state != FC_PORTSTATE_BLOCKED) {
3164 spin_unlock_irqrestore(shost->host_lock, flags);
3165 dev_printk(KERN_ERR, &rport->dev,
3166 "blocked FC remote port time out: leaving"
3167 " rport%s alone\n",
3168 (rport->scsi_target_id != -1) ? " and starget" : "");
3169 return;
3170 }
3171
3172 if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) ||
3173 (rport->scsi_target_id == -1)) {
3174 list_del(&rport->peers);
3175 rport->port_state = FC_PORTSTATE_DELETED;
3176 dev_printk(KERN_ERR, &rport->dev,
3177 "blocked FC remote port time out: removing"
3178 " rport%s\n",
3179 (rport->scsi_target_id != -1) ? " and starget" : "");
3180 fc_queue_work(shost, &rport->rport_delete_work);
3181 spin_unlock_irqrestore(shost->host_lock, flags);
3182 return;
3183 }
3184
3185 dev_printk(KERN_ERR, &rport->dev,
3186 "blocked FC remote port time out: removing target and "
3187 "saving binding\n");
3188
3189 list_move_tail(&rport->peers, &fc_host->rport_bindings);
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200 rport->maxframe_size = -1;
3201 rport->supported_classes = FC_COS_UNSPECIFIED;
3202 rport->roles = FC_PORT_ROLE_UNKNOWN;
3203 rport->port_state = FC_PORTSTATE_NOTPRESENT;
3204 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
3205
3206
3207
3208
3209
3210
3211 spin_unlock_irqrestore(shost->host_lock, flags);
3212 fc_terminate_rport_io(rport);
3213
3214 spin_lock_irqsave(shost->host_lock, flags);
3215
3216 if (rport->port_state == FC_PORTSTATE_NOTPRESENT) {
3217
3218
3219 switch (fc_host->tgtid_bind_type) {
3220 case FC_TGTID_BIND_BY_WWPN:
3221 rport->node_name = -1;
3222 rport->port_id = -1;
3223 break;
3224 case FC_TGTID_BIND_BY_WWNN:
3225 rport->port_name = -1;
3226 rport->port_id = -1;
3227 break;
3228 case FC_TGTID_BIND_BY_ID:
3229 rport->node_name = -1;
3230 rport->port_name = -1;
3231 break;
3232 case FC_TGTID_BIND_NONE:
3233 break;
3234 }
3235
3236
3237
3238
3239
3240
3241 rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
3242 fc_queue_work(shost, &rport->stgt_delete_work);
3243
3244 do_callback = 1;
3245 }
3246
3247 spin_unlock_irqrestore(shost->host_lock, flags);
3248
3249
3250
3251
3252
3253
3254
3255 if (do_callback && i->f->dev_loss_tmo_callbk)
3256 i->f->dev_loss_tmo_callbk(rport);
3257}
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267static void
3268fc_timeout_fail_rport_io(struct work_struct *work)
3269{
3270 struct fc_rport *rport =
3271 container_of(work, struct fc_rport, fail_io_work.work);
3272
3273 if (rport->port_state != FC_PORTSTATE_BLOCKED)
3274 return;
3275
3276 rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
3277 fc_terminate_rport_io(rport);
3278}
3279
3280
3281
3282
3283
3284static void
3285fc_scsi_scan_rport(struct work_struct *work)
3286{
3287 struct fc_rport *rport =
3288 container_of(work, struct fc_rport, scan_work);
3289 struct Scsi_Host *shost = rport_to_shost(rport);
3290 struct fc_internal *i = to_fc_internal(shost->transportt);
3291 unsigned long flags;
3292
3293 if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
3294 (rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
3295 !(i->f->disable_target_scan)) {
3296 scsi_scan_target(&rport->dev, rport->channel,
3297 rport->scsi_target_id, SCAN_WILD_CARD,
3298 SCSI_SCAN_RESCAN);
3299 }
3300
3301 spin_lock_irqsave(shost->host_lock, flags);
3302 rport->flags &= ~FC_RPORT_SCAN_PENDING;
3303 spin_unlock_irqrestore(shost->host_lock, flags);
3304}
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
3321{
3322 struct Scsi_Host *shost = cmnd->device->host;
3323 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
3324 unsigned long flags;
3325
3326 spin_lock_irqsave(shost->host_lock, flags);
3327 while (rport->port_state == FC_PORTSTATE_BLOCKED &&
3328 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
3329 spin_unlock_irqrestore(shost->host_lock, flags);
3330 msleep(1000);
3331 spin_lock_irqsave(shost->host_lock, flags);
3332 }
3333 spin_unlock_irqrestore(shost->host_lock, flags);
3334
3335 if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
3336 return FAST_IO_FAIL;
3337
3338 return 0;
3339}
3340EXPORT_SYMBOL(fc_block_scsi_eh);
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357static int
3358fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
3359 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
3360{
3361 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3362 struct fc_internal *fci = to_fc_internal(shost->transportt);
3363 struct fc_vport *vport;
3364 struct device *dev;
3365 unsigned long flags;
3366 size_t size;
3367 int error;
3368
3369 *ret_vport = NULL;
3370
3371 if ( ! fci->f->vport_create)
3372 return -ENOENT;
3373
3374 size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
3375 vport = kzalloc(size, GFP_KERNEL);
3376 if (unlikely(!vport)) {
3377 printk(KERN_ERR "%s: allocation failure\n", __func__);
3378 return -ENOMEM;
3379 }
3380
3381 vport->vport_state = FC_VPORT_UNKNOWN;
3382 vport->vport_last_state = FC_VPORT_UNKNOWN;
3383 vport->node_name = ids->node_name;
3384 vport->port_name = ids->port_name;
3385 vport->roles = ids->roles;
3386 vport->vport_type = ids->vport_type;
3387 if (fci->f->dd_fcvport_size)
3388 vport->dd_data = &vport[1];
3389 vport->shost = shost;
3390 vport->channel = channel;
3391 vport->flags = FC_VPORT_CREATING;
3392 INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
3393
3394 spin_lock_irqsave(shost->host_lock, flags);
3395
3396 if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
3397 spin_unlock_irqrestore(shost->host_lock, flags);
3398 kfree(vport);
3399 return -ENOSPC;
3400 }
3401 fc_host->npiv_vports_inuse++;
3402 vport->number = fc_host->next_vport_number++;
3403 list_add_tail(&vport->peers, &fc_host->vports);
3404 scsi_host_get(shost);
3405
3406 spin_unlock_irqrestore(shost->host_lock, flags);
3407
3408 dev = &vport->dev;
3409 device_initialize(dev);
3410 dev->parent = get_device(pdev);
3411 dev->release = fc_vport_dev_release;
3412 dev_set_name(dev, "vport-%d:%d-%d",
3413 shost->host_no, channel, vport->number);
3414 transport_setup_device(dev);
3415
3416 error = device_add(dev);
3417 if (error) {
3418 printk(KERN_ERR "FC Virtual Port device_add failed\n");
3419 goto delete_vport;
3420 }
3421 transport_add_device(dev);
3422 transport_configure_device(dev);
3423
3424 error = fci->f->vport_create(vport, ids->disable);
3425 if (error) {
3426 printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
3427 goto delete_vport_all;
3428 }
3429
3430
3431
3432
3433
3434 if (pdev != &shost->shost_gendev) {
3435 error = sysfs_create_link(&shost->shost_gendev.kobj,
3436 &dev->kobj, dev_name(dev));
3437 if (error)
3438 printk(KERN_ERR
3439 "%s: Cannot create vport symlinks for "
3440 "%s, err=%d\n",
3441 __func__, dev_name(dev), error);
3442 }
3443 spin_lock_irqsave(shost->host_lock, flags);
3444 vport->flags &= ~FC_VPORT_CREATING;
3445 spin_unlock_irqrestore(shost->host_lock, flags);
3446
3447 dev_printk(KERN_NOTICE, pdev,
3448 "%s created via shost%d channel %d\n", dev_name(dev),
3449 shost->host_no, channel);
3450
3451 *ret_vport = vport;
3452
3453 return 0;
3454
3455delete_vport_all:
3456 transport_remove_device(dev);
3457 device_del(dev);
3458delete_vport:
3459 transport_destroy_device(dev);
3460 spin_lock_irqsave(shost->host_lock, flags);
3461 list_del(&vport->peers);
3462 scsi_host_put(shost);
3463 fc_host->npiv_vports_inuse--;
3464 spin_unlock_irqrestore(shost->host_lock, flags);
3465 put_device(dev->parent);
3466 kfree(vport);
3467
3468 return error;
3469}
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481struct fc_vport *
3482fc_vport_create(struct Scsi_Host *shost, int channel,
3483 struct fc_vport_identifiers *ids)
3484{
3485 int stat;
3486 struct fc_vport *vport;
3487
3488 stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
3489 ids, &vport);
3490 return stat ? NULL : vport;
3491}
3492EXPORT_SYMBOL(fc_vport_create);
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504int
3505fc_vport_terminate(struct fc_vport *vport)
3506{
3507 struct Scsi_Host *shost = vport_to_shost(vport);
3508 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3509 struct fc_internal *i = to_fc_internal(shost->transportt);
3510 struct device *dev = &vport->dev;
3511 unsigned long flags;
3512 int stat;
3513
3514 if (i->f->vport_delete)
3515 stat = i->f->vport_delete(vport);
3516 else
3517 stat = -ENOENT;
3518
3519 spin_lock_irqsave(shost->host_lock, flags);
3520 vport->flags &= ~FC_VPORT_DELETING;
3521 if (!stat) {
3522 vport->flags |= FC_VPORT_DELETED;
3523 list_del(&vport->peers);
3524 fc_host->npiv_vports_inuse--;
3525 scsi_host_put(shost);
3526 }
3527 spin_unlock_irqrestore(shost->host_lock, flags);
3528
3529 if (stat)
3530 return stat;
3531
3532 if (dev->parent != &shost->shost_gendev)
3533 sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
3534 transport_remove_device(dev);
3535 device_del(dev);
3536 transport_destroy_device(dev);
3537
3538
3539
3540
3541
3542
3543 put_device(dev);
3544
3545 return 0;
3546}
3547EXPORT_SYMBOL(fc_vport_terminate);
3548
3549
3550
3551
3552
3553static void
3554fc_vport_sched_delete(struct work_struct *work)
3555{
3556 struct fc_vport *vport =
3557 container_of(work, struct fc_vport, vport_delete_work);
3558 int stat;
3559
3560 stat = fc_vport_terminate(vport);
3561 if (stat)
3562 dev_printk(KERN_ERR, vport->dev.parent,
3563 "%s: %s could not be deleted created via "
3564 "shost%d channel %d - error %d\n", __func__,
3565 dev_name(&vport->dev), vport->shost->host_no,
3566 vport->channel, stat);
3567}
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579static void
3580fc_destroy_bsgjob(struct fc_bsg_job *job)
3581{
3582 unsigned long flags;
3583
3584 spin_lock_irqsave(&job->job_lock, flags);
3585 if (job->ref_cnt) {
3586 spin_unlock_irqrestore(&job->job_lock, flags);
3587 return;
3588 }
3589 spin_unlock_irqrestore(&job->job_lock, flags);
3590
3591 put_device(job->dev);
3592
3593 kfree(job->request_payload.sg_list);
3594 kfree(job->reply_payload.sg_list);
3595 kfree(job);
3596}
3597
3598
3599
3600
3601
3602
3603static void
3604fc_bsg_jobdone(struct fc_bsg_job *job)
3605{
3606 struct request *req = job->req;
3607 struct request *rsp = req->next_rq;
3608 int err;
3609
3610 err = job->req->errors = job->reply->result;
3611
3612 if (err < 0)
3613
3614 job->req->sense_len = sizeof(uint32_t);
3615 else
3616 job->req->sense_len = job->reply_len;
3617
3618
3619 req->resid_len = 0;
3620
3621 if (rsp) {
3622 WARN_ON(job->reply->reply_payload_rcv_len > rsp->resid_len);
3623
3624
3625 rsp->resid_len -= min(job->reply->reply_payload_rcv_len,
3626 rsp->resid_len);
3627 }
3628 blk_complete_request(req);
3629}
3630
3631
3632
3633
3634
3635static void fc_bsg_softirq_done(struct request *rq)
3636{
3637 struct fc_bsg_job *job = rq->special;
3638 unsigned long flags;
3639
3640 spin_lock_irqsave(&job->job_lock, flags);
3641 job->state_flags |= FC_RQST_STATE_DONE;
3642 job->ref_cnt--;
3643 spin_unlock_irqrestore(&job->job_lock, flags);
3644
3645 blk_end_request_all(rq, rq->errors);
3646 fc_destroy_bsgjob(job);
3647}
3648
3649
3650
3651
3652
3653static enum blk_eh_timer_return
3654fc_bsg_job_timeout(struct request *req)
3655{
3656 struct fc_bsg_job *job = (void *) req->special;
3657 struct Scsi_Host *shost = job->shost;
3658 struct fc_internal *i = to_fc_internal(shost->transportt);
3659 unsigned long flags;
3660 int err = 0, done = 0;
3661
3662 if (job->rport && job->rport->port_state == FC_PORTSTATE_BLOCKED)
3663 return BLK_EH_RESET_TIMER;
3664
3665 spin_lock_irqsave(&job->job_lock, flags);
3666 if (job->state_flags & FC_RQST_STATE_DONE)
3667 done = 1;
3668 else
3669 job->ref_cnt++;
3670 spin_unlock_irqrestore(&job->job_lock, flags);
3671
3672 if (!done && i->f->bsg_timeout) {
3673
3674 err = i->f->bsg_timeout(job);
3675 if (err == -EAGAIN) {
3676 job->ref_cnt--;
3677 return BLK_EH_RESET_TIMER;
3678 } else if (err)
3679 printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
3680 "abort failed with status %d\n", err);
3681 }
3682
3683
3684 if (done)
3685 return BLK_EH_NOT_HANDLED;
3686 else
3687 return BLK_EH_HANDLED;
3688}
3689
3690static int
3691fc_bsg_map_buffer(struct fc_bsg_buffer *buf, struct request *req)
3692{
3693 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
3694
3695 BUG_ON(!req->nr_phys_segments);
3696
3697 buf->sg_list = kzalloc(sz, GFP_KERNEL);
3698 if (!buf->sg_list)
3699 return -ENOMEM;
3700 sg_init_table(buf->sg_list, req->nr_phys_segments);
3701 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
3702 buf->payload_len = blk_rq_bytes(req);
3703 return 0;
3704}
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714static int
3715fc_req_to_bsgjob(struct Scsi_Host *shost, struct fc_rport *rport,
3716 struct request *req)
3717{
3718 struct fc_internal *i = to_fc_internal(shost->transportt);
3719 struct request *rsp = req->next_rq;
3720 struct fc_bsg_job *job;
3721 int ret;
3722
3723 BUG_ON(req->special);
3724
3725 job = kzalloc(sizeof(struct fc_bsg_job) + i->f->dd_bsg_size,
3726 GFP_KERNEL);
3727 if (!job)
3728 return -ENOMEM;
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739 req->special = job;
3740 job->shost = shost;
3741 job->rport = rport;
3742 job->req = req;
3743 if (i->f->dd_bsg_size)
3744 job->dd_data = (void *)&job[1];
3745 spin_lock_init(&job->job_lock);
3746 job->request = (struct fc_bsg_request *)req->cmd;
3747 job->request_len = req->cmd_len;
3748 job->reply = req->sense;
3749 job->reply_len = SCSI_SENSE_BUFFERSIZE;
3750
3751 if (req->bio) {
3752 ret = fc_bsg_map_buffer(&job->request_payload, req);
3753 if (ret)
3754 goto failjob_rls_job;
3755 }
3756 if (rsp && rsp->bio) {
3757 ret = fc_bsg_map_buffer(&job->reply_payload, rsp);
3758 if (ret)
3759 goto failjob_rls_rqst_payload;
3760 }
3761 job->job_done = fc_bsg_jobdone;
3762 if (rport)
3763 job->dev = &rport->dev;
3764 else
3765 job->dev = &shost->shost_gendev;
3766 get_device(job->dev);
3767
3768 job->ref_cnt = 1;
3769
3770 return 0;
3771
3772
3773failjob_rls_rqst_payload:
3774 kfree(job->request_payload.sg_list);
3775failjob_rls_job:
3776 kfree(job);
3777 return -ENOMEM;
3778}
3779
3780
3781enum fc_dispatch_result {
3782 FC_DISPATCH_BREAK,
3783 FC_DISPATCH_LOCKED,
3784 FC_DISPATCH_UNLOCKED,
3785};
3786
3787
3788
3789
3790
3791
3792
3793
3794static enum fc_dispatch_result
3795fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3796 struct fc_bsg_job *job)
3797{
3798 struct fc_internal *i = to_fc_internal(shost->transportt);
3799 int cmdlen = sizeof(uint32_t);
3800 int ret;
3801
3802
3803 switch (job->request->msgcode) {
3804 case FC_BSG_HST_ADD_RPORT:
3805 cmdlen += sizeof(struct fc_bsg_host_add_rport);
3806 break;
3807
3808 case FC_BSG_HST_DEL_RPORT:
3809 cmdlen += sizeof(struct fc_bsg_host_del_rport);
3810 break;
3811
3812 case FC_BSG_HST_ELS_NOLOGIN:
3813 cmdlen += sizeof(struct fc_bsg_host_els);
3814
3815 if ((!job->request_payload.payload_len) ||
3816 (!job->reply_payload.payload_len)) {
3817 ret = -EINVAL;
3818 goto fail_host_msg;
3819 }
3820 break;
3821
3822 case FC_BSG_HST_CT:
3823 cmdlen += sizeof(struct fc_bsg_host_ct);
3824
3825 if ((!job->request_payload.payload_len) ||
3826 (!job->reply_payload.payload_len)) {
3827 ret = -EINVAL;
3828 goto fail_host_msg;
3829 }
3830 break;
3831
3832 case FC_BSG_HST_VENDOR:
3833 cmdlen += sizeof(struct fc_bsg_host_vendor);
3834 if ((shost->hostt->vendor_id == 0L) ||
3835 (job->request->rqst_data.h_vendor.vendor_id !=
3836 shost->hostt->vendor_id)) {
3837 ret = -ESRCH;
3838 goto fail_host_msg;
3839 }
3840 break;
3841
3842 default:
3843 ret = -EBADR;
3844 goto fail_host_msg;
3845 }
3846
3847
3848 if (job->request_len < cmdlen) {
3849 ret = -ENOMSG;
3850 goto fail_host_msg;
3851 }
3852
3853 ret = i->f->bsg_request(job);
3854 if (!ret)
3855 return FC_DISPATCH_UNLOCKED;
3856
3857fail_host_msg:
3858
3859 BUG_ON(job->reply_len < sizeof(uint32_t));
3860 job->reply->reply_payload_rcv_len = 0;
3861 job->reply->result = ret;
3862 job->reply_len = sizeof(uint32_t);
3863 fc_bsg_jobdone(job);
3864 return FC_DISPATCH_UNLOCKED;
3865}
3866
3867
3868
3869
3870
3871
3872static void
3873fc_bsg_goose_queue(struct fc_rport *rport)
3874{
3875 if (!rport->rqst_q)
3876 return;
3877
3878
3879
3880
3881 get_device(&rport->dev);
3882 blk_run_queue_async(rport->rqst_q);
3883 put_device(&rport->dev);
3884}
3885
3886
3887
3888
3889
3890
3891
3892
3893static enum fc_dispatch_result
3894fc_bsg_rport_dispatch(struct request_queue *q, struct Scsi_Host *shost,
3895 struct fc_rport *rport, struct fc_bsg_job *job)
3896{
3897 struct fc_internal *i = to_fc_internal(shost->transportt);
3898 int cmdlen = sizeof(uint32_t);
3899 int ret;
3900
3901
3902 switch (job->request->msgcode) {
3903 case FC_BSG_RPT_ELS:
3904 cmdlen += sizeof(struct fc_bsg_rport_els);
3905 goto check_bidi;
3906
3907 case FC_BSG_RPT_CT:
3908 cmdlen += sizeof(struct fc_bsg_rport_ct);
3909check_bidi:
3910
3911 if ((!job->request_payload.payload_len) ||
3912 (!job->reply_payload.payload_len)) {
3913 ret = -EINVAL;
3914 goto fail_rport_msg;
3915 }
3916 break;
3917 default:
3918 ret = -EBADR;
3919 goto fail_rport_msg;
3920 }
3921
3922
3923 if (job->request_len < cmdlen) {
3924 ret = -ENOMSG;
3925 goto fail_rport_msg;
3926 }
3927
3928 ret = i->f->bsg_request(job);
3929 if (!ret)
3930 return FC_DISPATCH_UNLOCKED;
3931
3932fail_rport_msg:
3933
3934 BUG_ON(job->reply_len < sizeof(uint32_t));
3935 job->reply->reply_payload_rcv_len = 0;
3936 job->reply->result = ret;
3937 job->reply_len = sizeof(uint32_t);
3938 fc_bsg_jobdone(job);
3939 return FC_DISPATCH_UNLOCKED;
3940}
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950static void
3951fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
3952 struct fc_rport *rport, struct device *dev)
3953{
3954 struct request *req;
3955 struct fc_bsg_job *job;
3956 enum fc_dispatch_result ret;
3957
3958 if (!get_device(dev))
3959 return;
3960
3961 while (1) {
3962 if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
3963 !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
3964 break;
3965
3966 req = blk_fetch_request(q);
3967 if (!req)
3968 break;
3969
3970 if (rport && (rport->port_state != FC_PORTSTATE_ONLINE)) {
3971 req->errors = -ENXIO;
3972 spin_unlock_irq(q->queue_lock);
3973 blk_end_request_all(req, -ENXIO);
3974 spin_lock_irq(q->queue_lock);
3975 continue;
3976 }
3977
3978 spin_unlock_irq(q->queue_lock);
3979
3980 ret = fc_req_to_bsgjob(shost, rport, req);
3981 if (ret) {
3982 req->errors = ret;
3983 blk_end_request_all(req, ret);
3984 spin_lock_irq(q->queue_lock);
3985 continue;
3986 }
3987
3988 job = req->special;
3989
3990
3991 if (job->request_len < sizeof(uint32_t)) {
3992 BUG_ON(job->reply_len < sizeof(uint32_t));
3993 job->reply->reply_payload_rcv_len = 0;
3994 job->reply->result = -ENOMSG;
3995 job->reply_len = sizeof(uint32_t);
3996 fc_bsg_jobdone(job);
3997 spin_lock_irq(q->queue_lock);
3998 continue;
3999 }
4000
4001
4002 if (rport)
4003 ret = fc_bsg_rport_dispatch(q, shost, rport, job);
4004 else
4005 ret = fc_bsg_host_dispatch(q, shost, job);
4006
4007
4008 if (ret == FC_DISPATCH_BREAK)
4009 break;
4010
4011
4012 if (ret == FC_DISPATCH_UNLOCKED)
4013 spin_lock_irq(q->queue_lock);
4014 }
4015
4016 spin_unlock_irq(q->queue_lock);
4017 put_device(dev);
4018 spin_lock_irq(q->queue_lock);
4019}
4020
4021
4022
4023
4024
4025
4026static void
4027fc_bsg_host_handler(struct request_queue *q)
4028{
4029 struct Scsi_Host *shost = q->queuedata;
4030
4031 fc_bsg_request_handler(q, shost, NULL, &shost->shost_gendev);
4032}
4033
4034
4035
4036
4037
4038
4039static void
4040fc_bsg_rport_handler(struct request_queue *q)
4041{
4042 struct fc_rport *rport = q->queuedata;
4043 struct Scsi_Host *shost = rport_to_shost(rport);
4044
4045 fc_bsg_request_handler(q, shost, rport, &rport->dev);
4046}
4047
4048
4049
4050
4051
4052
4053
4054static int
4055fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
4056{
4057 struct device *dev = &shost->shost_gendev;
4058 struct fc_internal *i = to_fc_internal(shost->transportt);
4059 struct request_queue *q;
4060 int err;
4061 char bsg_name[20];
4062
4063 fc_host->rqst_q = NULL;
4064
4065 if (!i->f->bsg_request)
4066 return -ENOTSUPP;
4067
4068 snprintf(bsg_name, sizeof(bsg_name),
4069 "fc_host%d", shost->host_no);
4070
4071 q = __scsi_alloc_queue(shost, fc_bsg_host_handler);
4072 if (!q) {
4073 printk(KERN_ERR "fc_host%d: bsg interface failed to "
4074 "initialize - no request queue\n",
4075 shost->host_no);
4076 return -ENOMEM;
4077 }
4078
4079 q->queuedata = shost;
4080 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
4081 blk_queue_softirq_done(q, fc_bsg_softirq_done);
4082 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
4083 blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
4084
4085 err = bsg_register_queue(q, dev, bsg_name, NULL);
4086 if (err) {
4087 printk(KERN_ERR "fc_host%d: bsg interface failed to "
4088 "initialize - register queue\n",
4089 shost->host_no);
4090 blk_cleanup_queue(q);
4091 return err;
4092 }
4093
4094 fc_host->rqst_q = q;
4095 return 0;
4096}
4097
4098
4099
4100
4101
4102
4103
4104static int
4105fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
4106{
4107 struct device *dev = &rport->dev;
4108 struct fc_internal *i = to_fc_internal(shost->transportt);
4109 struct request_queue *q;
4110 int err;
4111
4112 rport->rqst_q = NULL;
4113
4114 if (!i->f->bsg_request)
4115 return -ENOTSUPP;
4116
4117 q = __scsi_alloc_queue(shost, fc_bsg_rport_handler);
4118 if (!q) {
4119 printk(KERN_ERR "%s: bsg interface failed to "
4120 "initialize - no request queue\n",
4121 dev->kobj.name);
4122 return -ENOMEM;
4123 }
4124
4125 q->queuedata = rport;
4126 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
4127 blk_queue_softirq_done(q, fc_bsg_softirq_done);
4128 blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
4129 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
4130
4131 err = bsg_register_queue(q, dev, NULL, NULL);
4132 if (err) {
4133 printk(KERN_ERR "%s: bsg interface failed to "
4134 "initialize - register queue\n",
4135 dev->kobj.name);
4136 blk_cleanup_queue(q);
4137 return err;
4138 }
4139
4140 rport->rqst_q = q;
4141 return 0;
4142}
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154static void
4155fc_bsg_remove(struct request_queue *q)
4156{
4157 if (q) {
4158 bsg_unregister_queue(q);
4159 blk_cleanup_queue(q);
4160 }
4161}
4162
4163
4164
4165MODULE_AUTHOR("James Smart");
4166MODULE_DESCRIPTION("FC Transport Attributes");
4167MODULE_LICENSE("GPL");
4168
4169module_init(fc_transport_init);
4170module_exit(fc_transport_exit);
4171