1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/slab.h>
25#include <linux/pci.h>
26#include <linux/kthread.h>
27#include <linux/interrupt.h>
28#include <linux/lockdep.h>
29
30#include <scsi/scsi.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h>
34
35#include "lpfc_hw4.h"
36#include "lpfc_hw.h"
37#include "lpfc_nl.h"
38#include "lpfc_disc.h"
39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
41#include "lpfc_scsi.h"
42#include "lpfc.h"
43#include "lpfc_logmsg.h"
44#include "lpfc_crtn.h"
45#include "lpfc_vport.h"
46#include "lpfc_debugfs.h"
47
48
49static uint8_t lpfcAlpaArray[] = {
50 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
51 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
52 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
53 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
54 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
55 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
56 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
57 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
58 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
59 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
60 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
61 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
62 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
63};
64
65static void lpfc_disc_timeout_handler(struct lpfc_vport *);
66static void lpfc_disc_flush_list(struct lpfc_vport *vport);
67static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
68static int lpfc_fcf_inuse(struct lpfc_hba *);
69
70void
71lpfc_terminate_rport_io(struct fc_rport *rport)
72{
73 struct lpfc_rport_data *rdata;
74 struct lpfc_nodelist * ndlp;
75 struct lpfc_hba *phba;
76
77 rdata = rport->dd_data;
78 ndlp = rdata->pnode;
79
80 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
81 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
82 printk(KERN_ERR "Cannot find remote node"
83 " to terminate I/O Data x%x\n",
84 rport->port_id);
85 return;
86 }
87
88 phba = ndlp->phba;
89
90 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
91 "rport terminate: sid:x%x did:x%x flg:x%x",
92 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
93
94 if (ndlp->nlp_sid != NLP_NO_SID) {
95 lpfc_sli_abort_iocb(ndlp->vport,
96 &phba->sli.ring[phba->sli.fcp_ring],
97 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
98 }
99}
100
101
102
103
104void
105lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
106{
107 struct lpfc_rport_data *rdata;
108 struct lpfc_nodelist * ndlp;
109 struct lpfc_vport *vport;
110 struct Scsi_Host *shost;
111 struct lpfc_hba *phba;
112 struct lpfc_work_evt *evtp;
113 int put_node;
114 int put_rport;
115
116 rdata = rport->dd_data;
117 ndlp = rdata->pnode;
118 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
119 return;
120
121 vport = ndlp->vport;
122 phba = vport->phba;
123
124 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
125 "rport devlosscb: sid:x%x did:x%x flg:x%x",
126 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
127
128 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
129 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
130 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
131
132
133
134
135
136 if (vport->load_flag & FC_UNLOADING) {
137 put_node = rdata->pnode != NULL;
138 put_rport = ndlp->rport != NULL;
139 rdata->pnode = NULL;
140 ndlp->rport = NULL;
141 if (put_node)
142 lpfc_nlp_put(ndlp);
143 if (put_rport)
144 put_device(&rport->dev);
145 return;
146 }
147
148 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
149 return;
150
151 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
152 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
153 "6789 rport name %llx != node port name %llx",
154 rport->port_name,
155 wwn_to_u64(ndlp->nlp_portname.u.wwn));
156
157 evtp = &ndlp->dev_loss_evt;
158
159 if (!list_empty(&evtp->evt_listp)) {
160 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
161 "6790 rport name %llx dev_loss_evt pending",
162 rport->port_name);
163 return;
164 }
165
166 shost = lpfc_shost_from_vport(vport);
167 spin_lock_irq(shost->host_lock);
168 ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
169 spin_unlock_irq(shost->host_lock);
170
171
172
173
174 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
175
176 spin_lock_irq(&phba->hbalock);
177 if (evtp->evt_arg1) {
178 evtp->evt = LPFC_EVT_DEV_LOSS;
179 list_add_tail(&evtp->evt_listp, &phba->work_list);
180 lpfc_worker_wake_up(phba);
181 }
182 spin_unlock_irq(&phba->hbalock);
183
184 return;
185}
186
187
188
189
190
191
192
193
194
195
196
197static int
198lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
199{
200 struct lpfc_rport_data *rdata;
201 struct fc_rport *rport;
202 struct lpfc_vport *vport;
203 struct lpfc_hba *phba;
204 struct Scsi_Host *shost;
205 uint8_t *name;
206 int put_node;
207 int warn_on = 0;
208 int fcf_inuse = 0;
209
210 rport = ndlp->rport;
211 vport = ndlp->vport;
212 shost = lpfc_shost_from_vport(vport);
213
214 spin_lock_irq(shost->host_lock);
215 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
216 spin_unlock_irq(shost->host_lock);
217
218 if (!rport)
219 return fcf_inuse;
220
221 name = (uint8_t *) &ndlp->nlp_portname;
222 phba = vport->phba;
223
224 if (phba->sli_rev == LPFC_SLI_REV4)
225 fcf_inuse = lpfc_fcf_inuse(phba);
226
227 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
228 "rport devlosstmo:did:x%x type:x%x id:x%x",
229 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
230
231 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
232 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
233 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
234
235
236
237
238
239
240 rdata = rport->dd_data;
241
242
243
244
245
246 if (vport->load_flag & FC_UNLOADING) {
247 if (ndlp->nlp_sid != NLP_NO_SID) {
248
249 lpfc_sli_abort_iocb(vport,
250 &phba->sli.ring[phba->sli.fcp_ring],
251 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
252 }
253 put_node = rdata->pnode != NULL;
254 rdata->pnode = NULL;
255 ndlp->rport = NULL;
256 if (put_node)
257 lpfc_nlp_put(ndlp);
258 put_device(&rport->dev);
259
260 return fcf_inuse;
261 }
262
263 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
264 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
265 "0284 Devloss timeout Ignored on "
266 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
267 "NPort x%x\n",
268 *name, *(name+1), *(name+2), *(name+3),
269 *(name+4), *(name+5), *(name+6), *(name+7),
270 ndlp->nlp_DID);
271 return fcf_inuse;
272 }
273
274 put_node = rdata->pnode != NULL;
275 rdata->pnode = NULL;
276 ndlp->rport = NULL;
277 if (put_node)
278 lpfc_nlp_put(ndlp);
279 put_device(&rport->dev);
280
281 if (ndlp->nlp_type & NLP_FABRIC)
282 return fcf_inuse;
283
284 if (ndlp->nlp_sid != NLP_NO_SID) {
285 warn_on = 1;
286 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
287 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
288 }
289
290 if (warn_on) {
291 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
292 "0203 Devloss timeout on "
293 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
294 "NPort x%06x Data: x%x x%x x%x\n",
295 *name, *(name+1), *(name+2), *(name+3),
296 *(name+4), *(name+5), *(name+6), *(name+7),
297 ndlp->nlp_DID, ndlp->nlp_flag,
298 ndlp->nlp_state, ndlp->nlp_rpi);
299 } else {
300 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
301 "0204 Devloss timeout on "
302 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
303 "NPort x%06x Data: x%x x%x x%x\n",
304 *name, *(name+1), *(name+2), *(name+3),
305 *(name+4), *(name+5), *(name+6), *(name+7),
306 ndlp->nlp_DID, ndlp->nlp_flag,
307 ndlp->nlp_state, ndlp->nlp_rpi);
308 }
309
310 if (!(vport->load_flag & FC_UNLOADING) &&
311 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
312 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
313 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
314 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
315 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
316 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
317
318 return fcf_inuse;
319}
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339static void
340lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
341 uint32_t nlp_did)
342{
343
344
345
346 if (!fcf_inuse)
347 return;
348
349 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
350 spin_lock_irq(&phba->hbalock);
351 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
352 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
353 spin_unlock_irq(&phba->hbalock);
354 return;
355 }
356 phba->hba_flag |= HBA_DEVLOSS_TMO;
357 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
358 "2847 Last remote node (x%x) using "
359 "FCF devloss tmo\n", nlp_did);
360 }
361 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
362 spin_unlock_irq(&phba->hbalock);
363 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
364 "2868 Devloss tmo to FCF rediscovery "
365 "in progress\n");
366 return;
367 }
368 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
369 spin_unlock_irq(&phba->hbalock);
370 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
371 "2869 Devloss tmo to idle FIP engine, "
372 "unreg in-use FCF and rescan.\n");
373
374 lpfc_unregister_fcf_rescan(phba);
375 return;
376 }
377 spin_unlock_irq(&phba->hbalock);
378 if (phba->hba_flag & FCF_TS_INPROG)
379 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
380 "2870 FCF table scan in progress\n");
381 if (phba->hba_flag & FCF_RR_INPROG)
382 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
383 "2871 FLOGI roundrobin FCF failover "
384 "in progress\n");
385 }
386 lpfc_unregister_unused_fcf(phba);
387}
388
389
390
391
392
393
394
395
396
397
398
399struct lpfc_fast_path_event *
400lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
401 struct lpfc_fast_path_event *ret;
402
403
404 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
405 return NULL;
406
407 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
408 GFP_ATOMIC);
409 if (ret) {
410 atomic_inc(&phba->fast_event_count);
411 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
412 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
413 }
414 return ret;
415}
416
417
418
419
420
421
422
423
424
425void
426lpfc_free_fast_evt(struct lpfc_hba *phba,
427 struct lpfc_fast_path_event *evt) {
428
429 atomic_dec(&phba->fast_event_count);
430 kfree(evt);
431}
432
433
434
435
436
437
438
439
440
441
442static void
443lpfc_send_fastpath_evt(struct lpfc_hba *phba,
444 struct lpfc_work_evt *evtp)
445{
446 unsigned long evt_category, evt_sub_category;
447 struct lpfc_fast_path_event *fast_evt_data;
448 char *evt_data;
449 uint32_t evt_data_size;
450 struct Scsi_Host *shost;
451
452 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
453 work_evt);
454
455 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
456 evt_sub_category = (unsigned long) fast_evt_data->un.
457 fabric_evt.subcategory;
458 shost = lpfc_shost_from_vport(fast_evt_data->vport);
459 if (evt_category == FC_REG_FABRIC_EVENT) {
460 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
461 evt_data = (char *) &fast_evt_data->un.read_check_error;
462 evt_data_size = sizeof(fast_evt_data->un.
463 read_check_error);
464 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
465 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
466 evt_data = (char *) &fast_evt_data->un.fabric_evt;
467 evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
468 } else {
469 lpfc_free_fast_evt(phba, fast_evt_data);
470 return;
471 }
472 } else if (evt_category == FC_REG_SCSI_EVENT) {
473 switch (evt_sub_category) {
474 case LPFC_EVENT_QFULL:
475 case LPFC_EVENT_DEVBSY:
476 evt_data = (char *) &fast_evt_data->un.scsi_evt;
477 evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
478 break;
479 case LPFC_EVENT_CHECK_COND:
480 evt_data = (char *) &fast_evt_data->un.check_cond_evt;
481 evt_data_size = sizeof(fast_evt_data->un.
482 check_cond_evt);
483 break;
484 case LPFC_EVENT_VARQUEDEPTH:
485 evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
486 evt_data_size = sizeof(fast_evt_data->un.
487 queue_depth_evt);
488 break;
489 default:
490 lpfc_free_fast_evt(phba, fast_evt_data);
491 return;
492 }
493 } else {
494 lpfc_free_fast_evt(phba, fast_evt_data);
495 return;
496 }
497
498 fc_host_post_vendor_event(shost,
499 fc_get_event_number(),
500 evt_data_size,
501 evt_data,
502 LPFC_NL_VENDOR_ID);
503
504 lpfc_free_fast_evt(phba, fast_evt_data);
505 return;
506}
507
508static void
509lpfc_work_list_done(struct lpfc_hba *phba)
510{
511 struct lpfc_work_evt *evtp = NULL;
512 struct lpfc_nodelist *ndlp;
513 int free_evt;
514 int fcf_inuse;
515 uint32_t nlp_did;
516
517 spin_lock_irq(&phba->hbalock);
518 while (!list_empty(&phba->work_list)) {
519 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
520 evt_listp);
521 spin_unlock_irq(&phba->hbalock);
522 free_evt = 1;
523 switch (evtp->evt) {
524 case LPFC_EVT_ELS_RETRY:
525 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
526 lpfc_els_retry_delay_handler(ndlp);
527 free_evt = 0;
528
529
530
531 lpfc_nlp_put(ndlp);
532 break;
533 case LPFC_EVT_DEV_LOSS:
534 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
535 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
536 free_evt = 0;
537
538
539
540 nlp_did = ndlp->nlp_DID;
541 lpfc_nlp_put(ndlp);
542 if (phba->sli_rev == LPFC_SLI_REV4)
543 lpfc_sli4_post_dev_loss_tmo_handler(phba,
544 fcf_inuse,
545 nlp_did);
546 break;
547 case LPFC_EVT_ONLINE:
548 if (phba->link_state < LPFC_LINK_DOWN)
549 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
550 else
551 *(int *) (evtp->evt_arg1) = 0;
552 complete((struct completion *)(evtp->evt_arg2));
553 break;
554 case LPFC_EVT_OFFLINE_PREP:
555 if (phba->link_state >= LPFC_LINK_DOWN)
556 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
557 *(int *)(evtp->evt_arg1) = 0;
558 complete((struct completion *)(evtp->evt_arg2));
559 break;
560 case LPFC_EVT_OFFLINE:
561 lpfc_offline(phba);
562 lpfc_sli_brdrestart(phba);
563 *(int *)(evtp->evt_arg1) =
564 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
565 lpfc_unblock_mgmt_io(phba);
566 complete((struct completion *)(evtp->evt_arg2));
567 break;
568 case LPFC_EVT_WARM_START:
569 lpfc_offline(phba);
570 lpfc_reset_barrier(phba);
571 lpfc_sli_brdreset(phba);
572 lpfc_hba_down_post(phba);
573 *(int *)(evtp->evt_arg1) =
574 lpfc_sli_brdready(phba, HS_MBRDY);
575 lpfc_unblock_mgmt_io(phba);
576 complete((struct completion *)(evtp->evt_arg2));
577 break;
578 case LPFC_EVT_KILL:
579 lpfc_offline(phba);
580 *(int *)(evtp->evt_arg1)
581 = (phba->pport->stopped)
582 ? 0 : lpfc_sli_brdkill(phba);
583 lpfc_unblock_mgmt_io(phba);
584 complete((struct completion *)(evtp->evt_arg2));
585 break;
586 case LPFC_EVT_FASTPATH_MGMT_EVT:
587 lpfc_send_fastpath_evt(phba, evtp);
588 free_evt = 0;
589 break;
590 case LPFC_EVT_RESET_HBA:
591 if (!(phba->pport->load_flag & FC_UNLOADING))
592 lpfc_reset_hba(phba);
593 break;
594 }
595 if (free_evt)
596 kfree(evtp);
597 spin_lock_irq(&phba->hbalock);
598 }
599 spin_unlock_irq(&phba->hbalock);
600
601}
602
603static void
604lpfc_work_done(struct lpfc_hba *phba)
605{
606 struct lpfc_sli_ring *pring;
607 uint32_t ha_copy, status, control, work_port_events;
608 struct lpfc_vport **vports;
609 struct lpfc_vport *vport;
610 int i;
611
612 spin_lock_irq(&phba->hbalock);
613 ha_copy = phba->work_ha;
614 phba->work_ha = 0;
615 spin_unlock_irq(&phba->hbalock);
616
617
618 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
619 lpfc_sli4_post_async_mbox(phba);
620
621 if (ha_copy & HA_ERATT)
622
623 lpfc_handle_eratt(phba);
624
625 if (ha_copy & HA_MBATT)
626 lpfc_sli_handle_mb_event(phba);
627
628 if (ha_copy & HA_LATT)
629 lpfc_handle_latt(phba);
630
631
632 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
633 if (phba->hba_flag & HBA_RRQ_ACTIVE)
634 lpfc_handle_rrq_active(phba);
635 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
636 lpfc_sli4_fcp_xri_abort_event_proc(phba);
637 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
638 lpfc_sli4_els_xri_abort_event_proc(phba);
639 if (phba->hba_flag & ASYNC_EVENT)
640 lpfc_sli4_async_event_proc(phba);
641 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
642 spin_lock_irq(&phba->hbalock);
643 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
644 spin_unlock_irq(&phba->hbalock);
645 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
646 }
647 if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
648 lpfc_sli4_fcf_redisc_event_proc(phba);
649 }
650
651 vports = lpfc_create_vport_work_array(phba);
652 if (vports != NULL)
653 for (i = 0; i <= phba->max_vports; i++) {
654
655
656
657
658 if (vports[i] == NULL && i == 0)
659 vport = phba->pport;
660 else
661 vport = vports[i];
662 if (vport == NULL)
663 break;
664 spin_lock_irq(&vport->work_port_lock);
665 work_port_events = vport->work_port_events;
666 vport->work_port_events &= ~work_port_events;
667 spin_unlock_irq(&vport->work_port_lock);
668 if (work_port_events & WORKER_DISC_TMO)
669 lpfc_disc_timeout_handler(vport);
670 if (work_port_events & WORKER_ELS_TMO)
671 lpfc_els_timeout_handler(vport);
672 if (work_port_events & WORKER_HB_TMO)
673 lpfc_hb_timeout_handler(phba);
674 if (work_port_events & WORKER_MBOX_TMO)
675 lpfc_mbox_timeout_handler(phba);
676 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
677 lpfc_unblock_fabric_iocbs(phba);
678 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
679 lpfc_ramp_down_queue_handler(phba);
680 if (work_port_events & WORKER_DELAYED_DISC_TMO)
681 lpfc_delayed_disc_timeout_handler(vport);
682 }
683 lpfc_destroy_vport_work_array(phba, vports);
684
685 pring = &phba->sli.ring[LPFC_ELS_RING];
686 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
687 status >>= (4*LPFC_ELS_RING);
688 if ((status & HA_RXMASK) ||
689 (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
690 (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
691 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
692 pring->flag |= LPFC_DEFERRED_RING_EVENT;
693
694 set_bit(LPFC_DATA_READY, &phba->data_flags);
695 } else {
696 if (phba->link_state >= LPFC_LINK_UP) {
697 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
698 lpfc_sli_handle_slow_ring_event(phba, pring,
699 (status &
700 HA_RXMASK));
701 }
702 }
703 if ((phba->sli_rev == LPFC_SLI_REV4) &&
704 (!list_empty(&pring->txq)))
705 lpfc_drain_txq(phba);
706
707
708
709 if (phba->sli_rev <= LPFC_SLI_REV3) {
710 spin_lock_irq(&phba->hbalock);
711 control = readl(phba->HCregaddr);
712 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
713 lpfc_debugfs_slow_ring_trc(phba,
714 "WRK Enable ring: cntl:x%x hacopy:x%x",
715 control, ha_copy, 0);
716
717 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
718 writel(control, phba->HCregaddr);
719 readl(phba->HCregaddr);
720 } else {
721 lpfc_debugfs_slow_ring_trc(phba,
722 "WRK Ring ok: cntl:x%x hacopy:x%x",
723 control, ha_copy, 0);
724 }
725 spin_unlock_irq(&phba->hbalock);
726 }
727 }
728 lpfc_work_list_done(phba);
729}
730
731int
732lpfc_do_work(void *p)
733{
734 struct lpfc_hba *phba = p;
735 int rc;
736
737 set_user_nice(current, MIN_NICE);
738 current->flags |= PF_NOFREEZE;
739 phba->data_flags = 0;
740
741 while (!kthread_should_stop()) {
742
743 rc = wait_event_interruptible(phba->work_waitq,
744 (test_and_clear_bit(LPFC_DATA_READY,
745 &phba->data_flags)
746 || kthread_should_stop()));
747
748 if (rc) {
749 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
750 "0433 Wakeup on signal: rc=x%x\n", rc);
751 break;
752 }
753
754
755 lpfc_work_done(phba);
756 }
757 phba->worker_thread = NULL;
758 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
759 "0432 Worker thread stopped.\n");
760 return 0;
761}
762
763
764
765
766
767
768int
769lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
770 uint32_t evt)
771{
772 struct lpfc_work_evt *evtp;
773 unsigned long flags;
774
775
776
777
778
779 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
780 if (!evtp)
781 return 0;
782
783 evtp->evt_arg1 = arg1;
784 evtp->evt_arg2 = arg2;
785 evtp->evt = evt;
786
787 spin_lock_irqsave(&phba->hbalock, flags);
788 list_add_tail(&evtp->evt_listp, &phba->work_list);
789 spin_unlock_irqrestore(&phba->hbalock, flags);
790
791 lpfc_worker_wake_up(phba);
792
793 return 1;
794}
795
796void
797lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
798{
799 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
800 struct lpfc_hba *phba = vport->phba;
801 struct lpfc_nodelist *ndlp, *next_ndlp;
802
803 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
804 if (!NLP_CHK_NODE_ACT(ndlp))
805 continue;
806 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
807 continue;
808 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
809 ((vport->port_type == LPFC_NPIV_PORT) &&
810 (ndlp->nlp_DID == NameServer_DID)))
811 lpfc_unreg_rpi(vport, ndlp);
812
813
814 if ((phba->sli_rev < LPFC_SLI_REV4) &&
815 (!remove && ndlp->nlp_type & NLP_FABRIC))
816 continue;
817 lpfc_disc_state_machine(vport, ndlp, NULL,
818 remove
819 ? NLP_EVT_DEVICE_RM
820 : NLP_EVT_DEVICE_RECOVERY);
821 }
822 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
823 if (phba->sli_rev == LPFC_SLI_REV4)
824 lpfc_sli4_unreg_all_rpis(vport);
825 lpfc_mbx_unreg_vpi(vport);
826 spin_lock_irq(shost->host_lock);
827 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
828 spin_unlock_irq(shost->host_lock);
829 }
830}
831
832void
833lpfc_port_link_failure(struct lpfc_vport *vport)
834{
835 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
836
837
838 lpfc_cleanup_rcv_buffers(vport);
839
840
841 lpfc_els_flush_rscn(vport);
842
843
844 lpfc_els_flush_cmd(vport);
845
846 lpfc_cleanup_rpis(vport, 0);
847
848
849 lpfc_can_disctmo(vport);
850}
851
852void
853lpfc_linkdown_port(struct lpfc_vport *vport)
854{
855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
856
857 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
858
859 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
860 "Link Down: state:x%x rtry:x%x flg:x%x",
861 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
862
863 lpfc_port_link_failure(vport);
864
865
866 spin_lock_irq(shost->host_lock);
867 vport->fc_flag &= ~FC_DISC_DELAYED;
868 spin_unlock_irq(shost->host_lock);
869 del_timer_sync(&vport->delayed_disc_tmo);
870}
871
872int
873lpfc_linkdown(struct lpfc_hba *phba)
874{
875 struct lpfc_vport *vport = phba->pport;
876 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
877 struct lpfc_vport **vports;
878 LPFC_MBOXQ_t *mb;
879 int i;
880
881 if (phba->link_state == LPFC_LINK_DOWN)
882 return 0;
883
884
885 lpfc_scsi_dev_block(phba);
886
887 spin_lock_irq(&phba->hbalock);
888 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
889 spin_unlock_irq(&phba->hbalock);
890 if (phba->link_state > LPFC_LINK_DOWN) {
891 phba->link_state = LPFC_LINK_DOWN;
892 spin_lock_irq(shost->host_lock);
893 phba->pport->fc_flag &= ~FC_LBIT;
894 spin_unlock_irq(shost->host_lock);
895 }
896 vports = lpfc_create_vport_work_array(phba);
897 if (vports != NULL)
898 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
899
900 lpfc_linkdown_port(vports[i]);
901 }
902 lpfc_destroy_vport_work_array(phba, vports);
903
904 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
905 if (mb) {
906 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
907 mb->vport = vport;
908 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
909 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
910 == MBX_NOT_FINISHED) {
911 mempool_free(mb, phba->mbox_mem_pool);
912 }
913 }
914
915
916 if (phba->pport->fc_flag & FC_PT2PT) {
917 phba->pport->fc_myDID = 0;
918 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
919 if (mb) {
920 lpfc_config_link(phba, mb);
921 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
922 mb->vport = vport;
923 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
924 == MBX_NOT_FINISHED) {
925 mempool_free(mb, phba->mbox_mem_pool);
926 }
927 }
928 spin_lock_irq(shost->host_lock);
929 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
930 spin_unlock_irq(shost->host_lock);
931 }
932
933 return 0;
934}
935
936static void
937lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
938{
939 struct lpfc_nodelist *ndlp;
940
941 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
942 if (!NLP_CHK_NODE_ACT(ndlp))
943 continue;
944 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
945 continue;
946 if (ndlp->nlp_type & NLP_FABRIC) {
947
948
949
950 if (ndlp->nlp_DID != Fabric_DID)
951 lpfc_unreg_rpi(vport, ndlp);
952 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
953 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
954
955
956
957 lpfc_unreg_rpi(vport, ndlp);
958 }
959 }
960}
961
962static void
963lpfc_linkup_port(struct lpfc_vport *vport)
964{
965 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
966 struct lpfc_hba *phba = vport->phba;
967
968 if ((vport->load_flag & FC_UNLOADING) != 0)
969 return;
970
971 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
972 "Link Up: top:x%x speed:x%x flg:x%x",
973 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
974
975
976 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
977 (vport != phba->pport))
978 return;
979
980 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
981
982 spin_lock_irq(shost->host_lock);
983 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
984 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
985 vport->fc_flag |= FC_NDISC_ACTIVE;
986 vport->fc_ns_retry = 0;
987 spin_unlock_irq(shost->host_lock);
988
989 if (vport->fc_flag & FC_LBIT)
990 lpfc_linkup_cleanup_nodes(vport);
991
992}
993
994static int
995lpfc_linkup(struct lpfc_hba *phba)
996{
997 struct lpfc_vport **vports;
998 int i;
999
1000 phba->link_state = LPFC_LINK_UP;
1001
1002
1003 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
1004 del_timer_sync(&phba->fabric_block_timer);
1005
1006 vports = lpfc_create_vport_work_array(phba);
1007 if (vports != NULL)
1008 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1009 lpfc_linkup_port(vports[i]);
1010 lpfc_destroy_vport_work_array(phba, vports);
1011
1012 return 0;
1013}
1014
1015
1016
1017
1018
1019
1020
1021static void
1022lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1023{
1024 struct lpfc_vport *vport = pmb->vport;
1025 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1026 struct lpfc_sli *psli = &phba->sli;
1027 MAILBOX_t *mb = &pmb->u.mb;
1028 uint32_t control;
1029
1030
1031 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
1032 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
1033 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
1034
1035
1036 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1037
1038 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1039 "0320 CLEAR_LA mbxStatus error x%x hba "
1040 "state x%x\n",
1041 mb->mbxStatus, vport->port_state);
1042 phba->link_state = LPFC_HBA_ERROR;
1043 goto out;
1044 }
1045
1046 if (vport->port_type == LPFC_PHYSICAL_PORT)
1047 phba->link_state = LPFC_HBA_READY;
1048
1049 spin_lock_irq(&phba->hbalock);
1050 psli->sli_flag |= LPFC_PROCESS_LA;
1051 control = readl(phba->HCregaddr);
1052 control |= HC_LAINT_ENA;
1053 writel(control, phba->HCregaddr);
1054 readl(phba->HCregaddr);
1055 spin_unlock_irq(&phba->hbalock);
1056 mempool_free(pmb, phba->mbox_mem_pool);
1057 return;
1058
1059out:
1060
1061 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1062 "0225 Device Discovery completes\n");
1063 mempool_free(pmb, phba->mbox_mem_pool);
1064
1065 spin_lock_irq(shost->host_lock);
1066 vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1067 spin_unlock_irq(shost->host_lock);
1068
1069 lpfc_can_disctmo(vport);
1070
1071
1072
1073 spin_lock_irq(&phba->hbalock);
1074 psli->sli_flag |= LPFC_PROCESS_LA;
1075 control = readl(phba->HCregaddr);
1076 control |= HC_LAINT_ENA;
1077 writel(control, phba->HCregaddr);
1078 readl(phba->HCregaddr);
1079 spin_unlock_irq(&phba->hbalock);
1080
1081 return;
1082}
1083
1084
1085void
1086lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1087{
1088 struct lpfc_vport *vport = pmb->vport;
1089
1090 if (pmb->u.mb.mbxStatus)
1091 goto out;
1092
1093 mempool_free(pmb, phba->mbox_mem_pool);
1094
1095
1096 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1097 !(phba->hba_flag & HBA_FCOE_MODE) &&
1098 (phba->link_flag & LS_LOOPBACK_MODE))
1099 return;
1100
1101 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1102 vport->fc_flag & FC_PUBLIC_LOOP &&
1103 !(vport->fc_flag & FC_LBIT)) {
1104
1105
1106
1107
1108 lpfc_set_disctmo(vport);
1109 return;
1110 }
1111
1112
1113
1114
1115 if (vport->port_state != LPFC_FLOGI)
1116 lpfc_initial_flogi(vport);
1117 else if (vport->fc_flag & FC_PT2PT)
1118 lpfc_disc_start(vport);
1119 return;
1120
1121out:
1122 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1123 "0306 CONFIG_LINK mbxStatus error x%x "
1124 "HBA state x%x\n",
1125 pmb->u.mb.mbxStatus, vport->port_state);
1126 mempool_free(pmb, phba->mbox_mem_pool);
1127
1128 lpfc_linkdown(phba);
1129
1130 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1131 "0200 CONFIG_LINK bad hba state x%x\n",
1132 vport->port_state);
1133
1134 lpfc_issue_clear_la(phba, vport);
1135 return;
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146void
1147lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1148{
1149 struct lpfc_fcf_pri *fcf_pri;
1150 struct lpfc_fcf_pri *next_fcf_pri;
1151 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1152 spin_lock_irq(&phba->hbalock);
1153 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1154 &phba->fcf.fcf_pri_list, list) {
1155 list_del_init(&fcf_pri->list);
1156 fcf_pri->fcf_rec.flag = 0;
1157 }
1158 spin_unlock_irq(&phba->hbalock);
1159}
1160static void
1161lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1162{
1163 struct lpfc_vport *vport = mboxq->vport;
1164
1165 if (mboxq->u.mb.mbxStatus) {
1166 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1167 "2017 REG_FCFI mbxStatus error x%x "
1168 "HBA state x%x\n",
1169 mboxq->u.mb.mbxStatus, vport->port_state);
1170 goto fail_out;
1171 }
1172
1173
1174 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1175
1176 spin_lock_irq(&phba->hbalock);
1177 phba->fcf.fcf_flag |= FCF_REGISTERED;
1178 spin_unlock_irq(&phba->hbalock);
1179
1180
1181 if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1182 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1183 goto fail_out;
1184
1185
1186 spin_lock_irq(&phba->hbalock);
1187 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1188 phba->hba_flag &= ~FCF_TS_INPROG;
1189 if (vport->port_state != LPFC_FLOGI) {
1190 phba->hba_flag |= FCF_RR_INPROG;
1191 spin_unlock_irq(&phba->hbalock);
1192 lpfc_issue_init_vfi(vport);
1193 goto out;
1194 }
1195 spin_unlock_irq(&phba->hbalock);
1196 goto out;
1197
1198fail_out:
1199 spin_lock_irq(&phba->hbalock);
1200 phba->hba_flag &= ~FCF_RR_INPROG;
1201 spin_unlock_irq(&phba->hbalock);
1202out:
1203 mempool_free(mboxq, phba->mbox_mem_pool);
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static uint32_t
1216lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1217{
1218 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1219 return 0;
1220 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1221 return 0;
1222 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1223 return 0;
1224 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1225 return 0;
1226 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1227 return 0;
1228 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1229 return 0;
1230 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1231 return 0;
1232 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1233 return 0;
1234 return 1;
1235}
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246static uint32_t
1247lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1248{
1249 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1250 return 0;
1251 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1252 return 0;
1253 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1254 return 0;
1255 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1256 return 0;
1257 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1258 return 0;
1259 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1260 return 0;
1261 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1262 return 0;
1263 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1264 return 0;
1265 return 1;
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277static uint32_t
1278lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1279{
1280 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1281 return 0;
1282 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1283 return 0;
1284 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1285 return 0;
1286 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1287 return 0;
1288 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1289 return 0;
1290 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1291 return 0;
1292 return 1;
1293}
1294
1295static bool
1296lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1297{
1298 return (curr_vlan_id == new_vlan_id);
1299}
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311static void
1312__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1313 struct fcf_record *new_fcf_record
1314 )
1315{
1316 struct lpfc_fcf_pri *fcf_pri;
1317
1318 lockdep_assert_held(&phba->hbalock);
1319
1320 fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1321 fcf_pri->fcf_rec.fcf_index = fcf_index;
1322
1323 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1324
1325}
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335static void
1336lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1337 struct fcf_record *new_fcf_record)
1338{
1339
1340 fcf_rec->fabric_name[0] =
1341 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1342 fcf_rec->fabric_name[1] =
1343 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1344 fcf_rec->fabric_name[2] =
1345 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1346 fcf_rec->fabric_name[3] =
1347 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1348 fcf_rec->fabric_name[4] =
1349 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1350 fcf_rec->fabric_name[5] =
1351 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1352 fcf_rec->fabric_name[6] =
1353 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1354 fcf_rec->fabric_name[7] =
1355 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1356
1357 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1358 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1359 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1360 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1361 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1362 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1363
1364 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1365
1366 fcf_rec->priority = new_fcf_record->fip_priority;
1367
1368 fcf_rec->switch_name[0] =
1369 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1370 fcf_rec->switch_name[1] =
1371 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1372 fcf_rec->switch_name[2] =
1373 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1374 fcf_rec->switch_name[3] =
1375 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1376 fcf_rec->switch_name[4] =
1377 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1378 fcf_rec->switch_name[5] =
1379 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1380 fcf_rec->switch_name[6] =
1381 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1382 fcf_rec->switch_name[7] =
1383 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399static void
1400__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1401 struct fcf_record *new_fcf_record, uint32_t addr_mode,
1402 uint16_t vlan_id, uint32_t flag)
1403{
1404 lockdep_assert_held(&phba->hbalock);
1405
1406
1407 lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1408
1409 fcf_rec->addr_mode = addr_mode;
1410 fcf_rec->vlan_id = vlan_id;
1411 fcf_rec->flag |= (flag | RECORD_VALID);
1412 __lpfc_update_fcf_record_pri(phba,
1413 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1414 new_fcf_record);
1415}
1416
1417
1418
1419
1420
1421
1422
1423
1424static void
1425lpfc_register_fcf(struct lpfc_hba *phba)
1426{
1427 LPFC_MBOXQ_t *fcf_mbxq;
1428 int rc;
1429
1430 spin_lock_irq(&phba->hbalock);
1431
1432 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1433 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1434 spin_unlock_irq(&phba->hbalock);
1435 return;
1436 }
1437
1438
1439 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1440 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1441 phba->hba_flag &= ~FCF_TS_INPROG;
1442 if (phba->pport->port_state != LPFC_FLOGI &&
1443 phba->pport->fc_flag & FC_FABRIC) {
1444 phba->hba_flag |= FCF_RR_INPROG;
1445 spin_unlock_irq(&phba->hbalock);
1446 lpfc_initial_flogi(phba->pport);
1447 return;
1448 }
1449 spin_unlock_irq(&phba->hbalock);
1450 return;
1451 }
1452 spin_unlock_irq(&phba->hbalock);
1453
1454 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1455 if (!fcf_mbxq) {
1456 spin_lock_irq(&phba->hbalock);
1457 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1458 spin_unlock_irq(&phba->hbalock);
1459 return;
1460 }
1461
1462 lpfc_reg_fcfi(phba, fcf_mbxq);
1463 fcf_mbxq->vport = phba->pport;
1464 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1465 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1466 if (rc == MBX_NOT_FINISHED) {
1467 spin_lock_irq(&phba->hbalock);
1468 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1469 spin_unlock_irq(&phba->hbalock);
1470 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1471 }
1472
1473 return;
1474}
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494static int
1495lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1496 struct fcf_record *new_fcf_record,
1497 uint32_t *boot_flag, uint32_t *addr_mode,
1498 uint16_t *vlan_id)
1499{
1500 struct lpfc_fcf_conn_entry *conn_entry;
1501 int i, j, fcf_vlan_id = 0;
1502
1503
1504 for (i = 0; i < 512; i++) {
1505 if (new_fcf_record->vlan_bitmap[i]) {
1506 fcf_vlan_id = i * 8;
1507 j = 0;
1508 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1509 j++;
1510 fcf_vlan_id++;
1511 }
1512 break;
1513 }
1514 }
1515
1516
1517 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1518 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1519 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1520 return 0;
1521
1522 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1523 *boot_flag = 0;
1524 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1525 new_fcf_record);
1526 if (phba->valid_vlan)
1527 *vlan_id = phba->vlan_id;
1528 else
1529 *vlan_id = LPFC_FCOE_NULL_VID;
1530 return 1;
1531 }
1532
1533
1534
1535
1536
1537 if (list_empty(&phba->fcf_conn_rec_list)) {
1538 *boot_flag = 0;
1539 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1540 new_fcf_record);
1541
1542
1543
1544
1545
1546 if (*addr_mode & LPFC_FCF_FPMA)
1547 *addr_mode = LPFC_FCF_FPMA;
1548
1549
1550 if (fcf_vlan_id)
1551 *vlan_id = fcf_vlan_id;
1552 else
1553 *vlan_id = LPFC_FCOE_NULL_VID;
1554 return 1;
1555 }
1556
1557 list_for_each_entry(conn_entry,
1558 &phba->fcf_conn_rec_list, list) {
1559 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1560 continue;
1561
1562 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1563 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1564 new_fcf_record))
1565 continue;
1566 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1567 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1568 new_fcf_record))
1569 continue;
1570 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1571
1572
1573
1574
1575 if (!(new_fcf_record->vlan_bitmap
1576 [conn_entry->conn_rec.vlan_tag / 8] &
1577 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1578 continue;
1579 }
1580
1581
1582
1583
1584
1585 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1586 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1587 continue;
1588
1589
1590
1591
1592
1593 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1594 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1595
1596
1597
1598
1599 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1600 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1601 new_fcf_record) & LPFC_FCF_SPMA))
1602 continue;
1603
1604
1605
1606
1607 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1608 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1609 new_fcf_record) & LPFC_FCF_FPMA))
1610 continue;
1611 }
1612
1613
1614
1615
1616 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1617 *boot_flag = 1;
1618 else
1619 *boot_flag = 0;
1620
1621
1622
1623
1624
1625
1626 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1627 new_fcf_record);
1628
1629
1630
1631
1632 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1633 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1634 *addr_mode = (conn_entry->conn_rec.flags &
1635 FCFCNCT_AM_SPMA) ?
1636 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1637
1638
1639
1640
1641 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1642 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1643 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1644 (*addr_mode & LPFC_FCF_SPMA))
1645 *addr_mode = LPFC_FCF_SPMA;
1646 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1647 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1648 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1649 (*addr_mode & LPFC_FCF_FPMA))
1650 *addr_mode = LPFC_FCF_FPMA;
1651
1652
1653 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1654 *vlan_id = conn_entry->conn_rec.vlan_tag;
1655
1656
1657
1658
1659 else if (fcf_vlan_id)
1660 *vlan_id = fcf_vlan_id;
1661 else
1662 *vlan_id = LPFC_FCOE_NULL_VID;
1663
1664 return 1;
1665 }
1666
1667 return 0;
1668}
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679int
1680lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1681{
1682
1683
1684
1685
1686 if ((phba->link_state >= LPFC_LINK_UP) &&
1687 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1688 return 0;
1689
1690 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1691 "2768 Pending link or FCF event during current "
1692 "handling of the previous event: link_state:x%x, "
1693 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1694 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1695 phba->fcoe_eventtag);
1696
1697 spin_lock_irq(&phba->hbalock);
1698 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1699 spin_unlock_irq(&phba->hbalock);
1700
1701 if (phba->link_state >= LPFC_LINK_UP) {
1702 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1703 "2780 Restart FCF table scan due to "
1704 "pending FCF event:evt_tag_at_scan:x%x, "
1705 "evt_tag_current:x%x\n",
1706 phba->fcoe_eventtag_at_fcf_scan,
1707 phba->fcoe_eventtag);
1708 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1709 } else {
1710
1711
1712
1713
1714 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1715 "2833 Stop FCF discovery process due to link "
1716 "state change (x%x)\n", phba->link_state);
1717 spin_lock_irq(&phba->hbalock);
1718 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1719 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1720 spin_unlock_irq(&phba->hbalock);
1721 }
1722
1723
1724 if (unreg_fcf) {
1725 spin_lock_irq(&phba->hbalock);
1726 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1727 spin_unlock_irq(&phba->hbalock);
1728 lpfc_sli4_unregister_fcf(phba);
1729 }
1730 return 1;
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748static bool
1749lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1750{
1751 uint32_t rand_num;
1752
1753
1754 rand_num = 0xFFFF & prandom_u32();
1755
1756
1757 if ((fcf_cnt * rand_num) < 0xFFFF)
1758 return true;
1759 else
1760 return false;
1761}
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776static struct fcf_record *
1777lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1778 uint16_t *next_fcf_index)
1779{
1780 void *virt_addr;
1781 struct lpfc_mbx_sge sge;
1782 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1783 uint32_t shdr_status, shdr_add_status, if_type;
1784 union lpfc_sli4_cfg_shdr *shdr;
1785 struct fcf_record *new_fcf_record;
1786
1787
1788
1789
1790 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1791 if (unlikely(!mboxq->sge_array)) {
1792 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1793 "2524 Failed to get the non-embedded SGE "
1794 "virtual address\n");
1795 return NULL;
1796 }
1797 virt_addr = mboxq->sge_array->addr[0];
1798
1799 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1800 lpfc_sli_pcimem_bcopy(shdr, shdr,
1801 sizeof(union lpfc_sli4_cfg_shdr));
1802 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1803 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1804 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1805 if (shdr_status || shdr_add_status) {
1806 if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
1807 if_type == LPFC_SLI_INTF_IF_TYPE_2)
1808 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1809 "2726 READ_FCF_RECORD Indicates empty "
1810 "FCF table.\n");
1811 else
1812 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1813 "2521 READ_FCF_RECORD mailbox failed "
1814 "with status x%x add_status x%x, "
1815 "mbx\n", shdr_status, shdr_add_status);
1816 return NULL;
1817 }
1818
1819
1820 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1821 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1822 sizeof(struct lpfc_mbx_read_fcf_tbl));
1823 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1824 new_fcf_record = (struct fcf_record *)(virt_addr +
1825 sizeof(struct lpfc_mbx_read_fcf_tbl));
1826 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1827 offsetof(struct fcf_record, vlan_bitmap));
1828 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1829 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
1830
1831 return new_fcf_record;
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844static void
1845lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1846 struct fcf_record *fcf_record,
1847 uint16_t vlan_id,
1848 uint16_t next_fcf_index)
1849{
1850 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1851 "2764 READ_FCF_RECORD:\n"
1852 "\tFCF_Index : x%x\n"
1853 "\tFCF_Avail : x%x\n"
1854 "\tFCF_Valid : x%x\n"
1855 "\tFCF_SOL : x%x\n"
1856 "\tFIP_Priority : x%x\n"
1857 "\tMAC_Provider : x%x\n"
1858 "\tLowest VLANID : x%x\n"
1859 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1860 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1861 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1862 "\tNext_FCF_Index: x%x\n",
1863 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1864 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1865 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1866 bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
1867 fcf_record->fip_priority,
1868 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1869 vlan_id,
1870 bf_get(lpfc_fcf_record_mac_0, fcf_record),
1871 bf_get(lpfc_fcf_record_mac_1, fcf_record),
1872 bf_get(lpfc_fcf_record_mac_2, fcf_record),
1873 bf_get(lpfc_fcf_record_mac_3, fcf_record),
1874 bf_get(lpfc_fcf_record_mac_4, fcf_record),
1875 bf_get(lpfc_fcf_record_mac_5, fcf_record),
1876 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1877 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1878 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1879 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1880 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1881 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1882 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1883 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1884 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1885 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1886 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1887 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1888 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1889 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1890 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1891 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1892 next_fcf_index);
1893}
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908static bool
1909lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1910 struct lpfc_fcf_rec *fcf_rec,
1911 struct fcf_record *new_fcf_record,
1912 uint16_t new_vlan_id)
1913{
1914 if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
1915 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
1916 return false;
1917 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
1918 return false;
1919 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
1920 return false;
1921 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
1922 return false;
1923 if (fcf_rec->priority != new_fcf_record->fip_priority)
1924 return false;
1925 return true;
1926}
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
1940{
1941 struct lpfc_hba *phba = vport->phba;
1942 int rc;
1943
1944 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
1945 spin_lock_irq(&phba->hbalock);
1946 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
1947 spin_unlock_irq(&phba->hbalock);
1948 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1949 "2872 Devloss tmo with no eligible "
1950 "FCF, unregister in-use FCF (x%x) "
1951 "and rescan FCF table\n",
1952 phba->fcf.current_rec.fcf_indx);
1953 lpfc_unregister_fcf_rescan(phba);
1954 goto stop_flogi_current_fcf;
1955 }
1956
1957 phba->hba_flag &= ~FCF_RR_INPROG;
1958
1959 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
1960 spin_unlock_irq(&phba->hbalock);
1961 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1962 "2865 No FCF available, stop roundrobin FCF "
1963 "failover and change port state:x%x/x%x\n",
1964 phba->pport->port_state, LPFC_VPORT_UNKNOWN);
1965 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1966 goto stop_flogi_current_fcf;
1967 } else {
1968 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
1969 "2794 Try FLOGI roundrobin FCF failover to "
1970 "(x%x)\n", fcf_index);
1971 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
1972 if (rc)
1973 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1974 "2761 FLOGI roundrobin FCF failover "
1975 "failed (rc:x%x) to read FCF (x%x)\n",
1976 rc, phba->fcf.current_rec.fcf_indx);
1977 else
1978 goto stop_flogi_current_fcf;
1979 }
1980 return 0;
1981
1982stop_flogi_current_fcf:
1983 lpfc_can_disctmo(vport);
1984 return 1;
1985}
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
1997 uint16_t fcf_index)
1998{
1999 struct lpfc_fcf_pri *new_fcf_pri;
2000
2001 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2002 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2003 "3058 deleting idx x%x pri x%x flg x%x\n",
2004 fcf_index, new_fcf_pri->fcf_rec.priority,
2005 new_fcf_pri->fcf_rec.flag);
2006 spin_lock_irq(&phba->hbalock);
2007 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
2008 if (phba->fcf.current_rec.priority ==
2009 new_fcf_pri->fcf_rec.priority)
2010 phba->fcf.eligible_fcf_cnt--;
2011 list_del_init(&new_fcf_pri->list);
2012 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2013 }
2014 spin_unlock_irq(&phba->hbalock);
2015}
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027void
2028lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2029{
2030 struct lpfc_fcf_pri *new_fcf_pri;
2031 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2032 spin_lock_irq(&phba->hbalock);
2033 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2034 spin_unlock_irq(&phba->hbalock);
2035}
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
2053 uint16_t fcf_index,
2054 struct fcf_record *new_fcf_record)
2055{
2056 uint16_t current_fcf_pri;
2057 uint16_t last_index;
2058 struct lpfc_fcf_pri *fcf_pri;
2059 struct lpfc_fcf_pri *next_fcf_pri;
2060 struct lpfc_fcf_pri *new_fcf_pri;
2061 int ret;
2062
2063 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2064 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2065 "3059 adding idx x%x pri x%x flg x%x\n",
2066 fcf_index, new_fcf_record->fip_priority,
2067 new_fcf_pri->fcf_rec.flag);
2068 spin_lock_irq(&phba->hbalock);
2069 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2070 list_del_init(&new_fcf_pri->list);
2071 new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2072 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2073 if (list_empty(&phba->fcf.fcf_pri_list)) {
2074 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2075 ret = lpfc_sli4_fcf_rr_index_set(phba,
2076 new_fcf_pri->fcf_rec.fcf_index);
2077 goto out;
2078 }
2079
2080 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2081 LPFC_SLI4_FCF_TBL_INDX_MAX);
2082 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2083 ret = 0;
2084 goto out;
2085 }
2086 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2087 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2088 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2089 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2090 memset(phba->fcf.fcf_rr_bmask, 0,
2091 sizeof(*phba->fcf.fcf_rr_bmask));
2092
2093 phba->fcf.eligible_fcf_cnt = 1;
2094 } else
2095
2096 phba->fcf.eligible_fcf_cnt++;
2097 ret = lpfc_sli4_fcf_rr_index_set(phba,
2098 new_fcf_pri->fcf_rec.fcf_index);
2099 goto out;
2100 }
2101
2102 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2103 &phba->fcf.fcf_pri_list, list) {
2104 if (new_fcf_pri->fcf_rec.priority <=
2105 fcf_pri->fcf_rec.priority) {
2106 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2107 list_add(&new_fcf_pri->list,
2108 &phba->fcf.fcf_pri_list);
2109 else
2110 list_add(&new_fcf_pri->list,
2111 &((struct lpfc_fcf_pri *)
2112 fcf_pri->list.prev)->list);
2113 ret = 0;
2114 goto out;
2115 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2116 || new_fcf_pri->fcf_rec.priority <
2117 next_fcf_pri->fcf_rec.priority) {
2118 list_add(&new_fcf_pri->list, &fcf_pri->list);
2119 ret = 0;
2120 goto out;
2121 }
2122 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2123 continue;
2124
2125 }
2126 ret = 1;
2127out:
2128
2129 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2130 spin_unlock_irq(&phba->hbalock);
2131 return ret;
2132}
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149void
2150lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2151{
2152 struct fcf_record *new_fcf_record;
2153 uint32_t boot_flag, addr_mode;
2154 uint16_t fcf_index, next_fcf_index;
2155 struct lpfc_fcf_rec *fcf_rec = NULL;
2156 uint16_t vlan_id;
2157 bool select_new_fcf;
2158 int rc;
2159
2160
2161 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
2162 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2163 return;
2164 }
2165
2166
2167 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2168 &next_fcf_index);
2169 if (!new_fcf_record) {
2170 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2171 "2765 Mailbox command READ_FCF_RECORD "
2172 "failed to retrieve a FCF record.\n");
2173
2174 spin_lock_irq(&phba->hbalock);
2175 phba->hba_flag &= ~FCF_TS_INPROG;
2176 spin_unlock_irq(&phba->hbalock);
2177 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2178 return;
2179 }
2180
2181
2182 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2183 &addr_mode, &vlan_id);
2184
2185
2186 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2187 next_fcf_index);
2188
2189
2190
2191
2192
2193
2194 if (!rc) {
2195 lpfc_sli4_fcf_pri_list_del(phba,
2196 bf_get(lpfc_fcf_record_fcf_index,
2197 new_fcf_record));
2198 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2199 "2781 FCF (x%x) failed connection "
2200 "list check: (x%x/x%x/%x)\n",
2201 bf_get(lpfc_fcf_record_fcf_index,
2202 new_fcf_record),
2203 bf_get(lpfc_fcf_record_fcf_avail,
2204 new_fcf_record),
2205 bf_get(lpfc_fcf_record_fcf_valid,
2206 new_fcf_record),
2207 bf_get(lpfc_fcf_record_fcf_sol,
2208 new_fcf_record));
2209 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2210 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2211 new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
2212 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2213 phba->fcf.current_rec.fcf_indx) {
2214 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2215 "2862 FCF (x%x) matches property "
2216 "of in-use FCF (x%x)\n",
2217 bf_get(lpfc_fcf_record_fcf_index,
2218 new_fcf_record),
2219 phba->fcf.current_rec.fcf_indx);
2220 goto read_next_fcf;
2221 }
2222
2223
2224
2225
2226
2227
2228 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2229 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2230 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2231 "2835 Invalid in-use FCF "
2232 "(x%x), enter FCF failover "
2233 "table scan.\n",
2234 phba->fcf.current_rec.fcf_indx);
2235 spin_lock_irq(&phba->hbalock);
2236 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2237 spin_unlock_irq(&phba->hbalock);
2238 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2239 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2240 LPFC_FCOE_FCF_GET_FIRST);
2241 return;
2242 }
2243 }
2244 goto read_next_fcf;
2245 } else {
2246 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2247 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2248 new_fcf_record);
2249 if (rc)
2250 goto read_next_fcf;
2251 }
2252
2253
2254
2255
2256
2257
2258
2259 spin_lock_irq(&phba->hbalock);
2260 if (phba->fcf.fcf_flag & FCF_IN_USE) {
2261 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2262 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2263 new_fcf_record, vlan_id)) {
2264 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2265 phba->fcf.current_rec.fcf_indx) {
2266 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2267 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2268
2269 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2270 phba);
2271 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2272
2273 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2274 spin_unlock_irq(&phba->hbalock);
2275 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2276 "2836 New FCF matches in-use "
2277 "FCF (x%x), port_state:x%x, "
2278 "fc_flag:x%x\n",
2279 phba->fcf.current_rec.fcf_indx,
2280 phba->pport->port_state,
2281 phba->pport->fc_flag);
2282 goto out;
2283 } else
2284 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2285 "2863 New FCF (x%x) matches "
2286 "property of in-use FCF (x%x)\n",
2287 bf_get(lpfc_fcf_record_fcf_index,
2288 new_fcf_record),
2289 phba->fcf.current_rec.fcf_indx);
2290 }
2291
2292
2293
2294
2295
2296
2297
2298 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2299 spin_unlock_irq(&phba->hbalock);
2300 goto read_next_fcf;
2301 }
2302 }
2303
2304
2305
2306
2307 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2308 fcf_rec = &phba->fcf.failover_rec;
2309 else
2310 fcf_rec = &phba->fcf.current_rec;
2311
2312 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2313
2314
2315
2316
2317
2318 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2319
2320 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2321 "2837 Update current FCF record "
2322 "(x%x) with new FCF record (x%x)\n",
2323 fcf_rec->fcf_indx,
2324 bf_get(lpfc_fcf_record_fcf_index,
2325 new_fcf_record));
2326 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2327 addr_mode, vlan_id, BOOT_ENABLE);
2328 spin_unlock_irq(&phba->hbalock);
2329 goto read_next_fcf;
2330 }
2331
2332
2333
2334
2335
2336 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
2337 spin_unlock_irq(&phba->hbalock);
2338 goto read_next_fcf;
2339 }
2340
2341
2342
2343
2344 if (new_fcf_record->fip_priority < fcf_rec->priority) {
2345
2346 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2347 "2838 Update current FCF record "
2348 "(x%x) with new FCF record (x%x)\n",
2349 fcf_rec->fcf_indx,
2350 bf_get(lpfc_fcf_record_fcf_index,
2351 new_fcf_record));
2352 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2353 addr_mode, vlan_id, 0);
2354
2355 phba->fcf.eligible_fcf_cnt = 1;
2356 } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2357
2358 phba->fcf.eligible_fcf_cnt++;
2359 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2360 phba->fcf.eligible_fcf_cnt);
2361 if (select_new_fcf) {
2362 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2363 "2839 Update current FCF record "
2364 "(x%x) with new FCF record (x%x)\n",
2365 fcf_rec->fcf_indx,
2366 bf_get(lpfc_fcf_record_fcf_index,
2367 new_fcf_record));
2368
2369 __lpfc_update_fcf_record(phba, fcf_rec,
2370 new_fcf_record,
2371 addr_mode, vlan_id, 0);
2372 }
2373 }
2374 spin_unlock_irq(&phba->hbalock);
2375 goto read_next_fcf;
2376 }
2377
2378
2379
2380
2381 if (fcf_rec) {
2382 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2383 "2840 Update initial FCF candidate "
2384 "with FCF (x%x)\n",
2385 bf_get(lpfc_fcf_record_fcf_index,
2386 new_fcf_record));
2387 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2388 addr_mode, vlan_id, (boot_flag ?
2389 BOOT_ENABLE : 0));
2390 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2391
2392 phba->fcf.eligible_fcf_cnt = 1;
2393 }
2394 spin_unlock_irq(&phba->hbalock);
2395 goto read_next_fcf;
2396
2397read_next_fcf:
2398 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2399 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2400 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2401
2402
2403
2404
2405
2406
2407
2408
2409 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2410 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2411 "2782 No suitable FCF found: "
2412 "(x%x/x%x)\n",
2413 phba->fcoe_eventtag_at_fcf_scan,
2414 bf_get(lpfc_fcf_record_fcf_index,
2415 new_fcf_record));
2416 spin_lock_irq(&phba->hbalock);
2417 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2418 phba->hba_flag &= ~FCF_TS_INPROG;
2419 spin_unlock_irq(&phba->hbalock);
2420
2421 lpfc_printf_log(phba, KERN_INFO,
2422 LOG_FIP,
2423 "2864 On devloss tmo "
2424 "unreg in-use FCF and "
2425 "rescan FCF table\n");
2426 lpfc_unregister_fcf_rescan(phba);
2427 return;
2428 }
2429
2430
2431
2432 phba->hba_flag &= ~FCF_TS_INPROG;
2433 spin_unlock_irq(&phba->hbalock);
2434 return;
2435 }
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446 lpfc_unregister_fcf(phba);
2447
2448
2449 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2450 "2842 Replace in-use FCF (x%x) "
2451 "with failover FCF (x%x)\n",
2452 phba->fcf.current_rec.fcf_indx,
2453 phba->fcf.failover_rec.fcf_indx);
2454 memcpy(&phba->fcf.current_rec,
2455 &phba->fcf.failover_rec,
2456 sizeof(struct lpfc_fcf_rec));
2457
2458
2459
2460
2461
2462 spin_lock_irq(&phba->hbalock);
2463 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2464 spin_unlock_irq(&phba->hbalock);
2465
2466 lpfc_register_fcf(phba);
2467 } else {
2468
2469
2470
2471
2472 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2473 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2474 return;
2475
2476 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2477 phba->fcf.fcf_flag & FCF_IN_USE) {
2478
2479
2480
2481
2482
2483
2484 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2485 "2841 In-use FCF record (x%x) "
2486 "not reported, entering fast "
2487 "FCF failover mode scanning.\n",
2488 phba->fcf.current_rec.fcf_indx);
2489 spin_lock_irq(&phba->hbalock);
2490 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2491 spin_unlock_irq(&phba->hbalock);
2492 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2493 LPFC_FCOE_FCF_GET_FIRST);
2494 return;
2495 }
2496
2497 lpfc_register_fcf(phba);
2498 }
2499 } else
2500 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
2501 return;
2502
2503out:
2504 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2505 lpfc_register_fcf(phba);
2506
2507 return;
2508}
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525void
2526lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2527{
2528 struct fcf_record *new_fcf_record;
2529 uint32_t boot_flag, addr_mode;
2530 uint16_t next_fcf_index, fcf_index;
2531 uint16_t current_fcf_index;
2532 uint16_t vlan_id;
2533 int rc;
2534
2535
2536 if (phba->link_state < LPFC_LINK_UP) {
2537 spin_lock_irq(&phba->hbalock);
2538 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2539 phba->hba_flag &= ~FCF_RR_INPROG;
2540 spin_unlock_irq(&phba->hbalock);
2541 goto out;
2542 }
2543
2544
2545 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2546 &next_fcf_index);
2547 if (!new_fcf_record) {
2548 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2549 "2766 Mailbox command READ_FCF_RECORD "
2550 "failed to retrieve a FCF record. "
2551 "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
2552 phba->fcf.fcf_flag);
2553 lpfc_unregister_fcf_rescan(phba);
2554 goto out;
2555 }
2556
2557
2558 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2559 &addr_mode, &vlan_id);
2560
2561
2562 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2563 next_fcf_index);
2564
2565 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2566 if (!rc) {
2567 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2568 "2848 Remove ineligible FCF (x%x) from "
2569 "from roundrobin bmask\n", fcf_index);
2570
2571 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2572
2573 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2574 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2575 if (rc)
2576 goto out;
2577 goto error_out;
2578 }
2579
2580 if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2581 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2582 "2760 Perform FLOGI roundrobin FCF failover: "
2583 "FCF (x%x) back to FCF (x%x)\n",
2584 phba->fcf.current_rec.fcf_indx, fcf_index);
2585
2586 msleep(500);
2587 lpfc_issue_init_vfi(phba->pport);
2588 goto out;
2589 }
2590
2591
2592 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2593 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2594 phba->fcf.failover_rec.fcf_indx, fcf_index);
2595 spin_lock_irq(&phba->hbalock);
2596 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2597 new_fcf_record, addr_mode, vlan_id,
2598 (boot_flag ? BOOT_ENABLE : 0));
2599 spin_unlock_irq(&phba->hbalock);
2600
2601 current_fcf_index = phba->fcf.current_rec.fcf_indx;
2602
2603
2604 lpfc_unregister_fcf(phba);
2605
2606
2607 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
2608 sizeof(struct lpfc_fcf_rec));
2609
2610 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2611 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2612 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2613
2614error_out:
2615 lpfc_register_fcf(phba);
2616out:
2617 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2618}
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631void
2632lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2633{
2634 struct fcf_record *new_fcf_record;
2635 uint32_t boot_flag, addr_mode;
2636 uint16_t fcf_index, next_fcf_index;
2637 uint16_t vlan_id;
2638 int rc;
2639
2640
2641 if (phba->link_state < LPFC_LINK_UP)
2642 goto out;
2643
2644
2645 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
2646 goto out;
2647
2648
2649 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2650 &next_fcf_index);
2651 if (!new_fcf_record) {
2652 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2653 "2767 Mailbox command READ_FCF_RECORD "
2654 "failed to retrieve a FCF record.\n");
2655 goto out;
2656 }
2657
2658
2659 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2660 &addr_mode, &vlan_id);
2661
2662
2663 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2664 next_fcf_index);
2665
2666 if (!rc)
2667 goto out;
2668
2669
2670 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2671
2672 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2673
2674out:
2675 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2676}
2677
2678
2679
2680
2681
2682
2683
2684
2685static void
2686lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2687{
2688 struct lpfc_vport *vport = mboxq->vport;
2689
2690
2691
2692
2693
2694 if (mboxq->u.mb.mbxStatus &&
2695 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2696 LPFC_SLI_INTF_IF_TYPE_0) &&
2697 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2698 lpfc_printf_vlog(vport, KERN_ERR,
2699 LOG_MBOX,
2700 "2891 Init VFI mailbox failed 0x%x\n",
2701 mboxq->u.mb.mbxStatus);
2702 mempool_free(mboxq, phba->mbox_mem_pool);
2703 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2704 return;
2705 }
2706
2707 lpfc_initial_flogi(vport);
2708 mempool_free(mboxq, phba->mbox_mem_pool);
2709 return;
2710}
2711
2712
2713
2714
2715
2716
2717
2718
2719void
2720lpfc_issue_init_vfi(struct lpfc_vport *vport)
2721{
2722 LPFC_MBOXQ_t *mboxq;
2723 int rc;
2724 struct lpfc_hba *phba = vport->phba;
2725
2726 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2727 if (!mboxq) {
2728 lpfc_printf_vlog(vport, KERN_ERR,
2729 LOG_MBOX, "2892 Failed to allocate "
2730 "init_vfi mailbox\n");
2731 return;
2732 }
2733 lpfc_init_vfi(mboxq, vport);
2734 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
2735 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
2736 if (rc == MBX_NOT_FINISHED) {
2737 lpfc_printf_vlog(vport, KERN_ERR,
2738 LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
2739 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2740 }
2741}
2742
2743
2744
2745
2746
2747
2748
2749
2750void
2751lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2752{
2753 struct lpfc_vport *vport = mboxq->vport;
2754 struct lpfc_nodelist *ndlp;
2755 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2756
2757 if (mboxq->u.mb.mbxStatus) {
2758 lpfc_printf_vlog(vport, KERN_ERR,
2759 LOG_MBOX,
2760 "2609 Init VPI mailbox failed 0x%x\n",
2761 mboxq->u.mb.mbxStatus);
2762 mempool_free(mboxq, phba->mbox_mem_pool);
2763 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2764 return;
2765 }
2766 spin_lock_irq(shost->host_lock);
2767 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2768 spin_unlock_irq(shost->host_lock);
2769
2770
2771 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2772 ndlp = lpfc_findnode_did(vport, Fabric_DID);
2773 if (!ndlp)
2774 lpfc_printf_vlog(vport, KERN_ERR,
2775 LOG_DISCOVERY,
2776 "2731 Cannot find fabric "
2777 "controller node\n");
2778 else
2779 lpfc_register_new_vport(phba, vport, ndlp);
2780 mempool_free(mboxq, phba->mbox_mem_pool);
2781 return;
2782 }
2783
2784 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2785 lpfc_initial_fdisc(vport);
2786 else {
2787 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
2788 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2789 "2606 No NPIV Fabric support\n");
2790 }
2791 mempool_free(mboxq, phba->mbox_mem_pool);
2792 return;
2793}
2794
2795
2796
2797
2798
2799
2800
2801
2802void
2803lpfc_issue_init_vpi(struct lpfc_vport *vport)
2804{
2805 LPFC_MBOXQ_t *mboxq;
2806 int rc, vpi;
2807
2808 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
2809 vpi = lpfc_alloc_vpi(vport->phba);
2810 if (!vpi) {
2811 lpfc_printf_vlog(vport, KERN_ERR,
2812 LOG_MBOX,
2813 "3303 Failed to obtain vport vpi\n");
2814 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2815 return;
2816 }
2817 vport->vpi = vpi;
2818 }
2819
2820 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2821 if (!mboxq) {
2822 lpfc_printf_vlog(vport, KERN_ERR,
2823 LOG_MBOX, "2607 Failed to allocate "
2824 "init_vpi mailbox\n");
2825 return;
2826 }
2827 lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2828 mboxq->vport = vport;
2829 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2830 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2831 if (rc == MBX_NOT_FINISHED) {
2832 lpfc_printf_vlog(vport, KERN_ERR,
2833 LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
2834 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2835 }
2836}
2837
2838
2839
2840
2841
2842
2843
2844
2845void
2846lpfc_start_fdiscs(struct lpfc_hba *phba)
2847{
2848 struct lpfc_vport **vports;
2849 int i;
2850
2851 vports = lpfc_create_vport_work_array(phba);
2852 if (vports != NULL) {
2853 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2854 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
2855 continue;
2856
2857 if (vports[i]->vpi > phba->max_vpi) {
2858 lpfc_vport_set_state(vports[i],
2859 FC_VPORT_FAILED);
2860 continue;
2861 }
2862 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2863 lpfc_vport_set_state(vports[i],
2864 FC_VPORT_LINKDOWN);
2865 continue;
2866 }
2867 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
2868 lpfc_issue_init_vpi(vports[i]);
2869 continue;
2870 }
2871 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2872 lpfc_initial_fdisc(vports[i]);
2873 else {
2874 lpfc_vport_set_state(vports[i],
2875 FC_VPORT_NO_FABRIC_SUPP);
2876 lpfc_printf_vlog(vports[i], KERN_ERR,
2877 LOG_ELS,
2878 "0259 No NPIV "
2879 "Fabric support\n");
2880 }
2881 }
2882 }
2883 lpfc_destroy_vport_work_array(phba, vports);
2884}
2885
2886void
2887lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2888{
2889 struct lpfc_dmabuf *dmabuf = mboxq->context1;
2890 struct lpfc_vport *vport = mboxq->vport;
2891 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2892
2893
2894
2895
2896
2897 if (mboxq->u.mb.mbxStatus &&
2898 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2899 LPFC_SLI_INTF_IF_TYPE_0) &&
2900 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2901 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2902 "2018 REG_VFI mbxStatus error x%x "
2903 "HBA state x%x\n",
2904 mboxq->u.mb.mbxStatus, vport->port_state);
2905 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2906
2907 lpfc_disc_list_loopmap(vport);
2908
2909 lpfc_disc_start(vport);
2910 goto out_free_mem;
2911 }
2912 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2913 goto out_free_mem;
2914 }
2915
2916
2917
2918
2919
2920 if (vport->fc_flag & FC_VFI_REGISTERED)
2921 if (!(phba->sli_rev == LPFC_SLI_REV4 &&
2922 vport->fc_flag & FC_PT2PT))
2923 goto out_free_mem;
2924
2925
2926 spin_lock_irq(shost->host_lock);
2927 vport->vpi_state |= LPFC_VPI_REGISTERED;
2928 vport->fc_flag |= FC_VFI_REGISTERED;
2929 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2930 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2931 spin_unlock_irq(shost->host_lock);
2932
2933
2934 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2935 (phba->link_flag & LS_LOOPBACK_MODE)) {
2936 phba->link_state = LPFC_HBA_READY;
2937 goto out_free_mem;
2938 }
2939
2940 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2941 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
2942 "alpacnt:%d LinkState:%x topology:%x\n",
2943 vport->port_state, vport->fc_flag, vport->fc_myDID,
2944 vport->phba->alpa_map[0],
2945 phba->link_state, phba->fc_topology);
2946
2947 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2948
2949
2950
2951
2952 if ((vport->fc_flag & FC_PT2PT) ||
2953 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
2954 !(vport->fc_flag & FC_PUBLIC_LOOP))) {
2955
2956
2957 lpfc_disc_list_loopmap(vport);
2958
2959 if (vport->fc_flag & FC_PT2PT)
2960 vport->port_state = LPFC_VPORT_READY;
2961 else
2962 lpfc_disc_start(vport);
2963 } else {
2964 lpfc_start_fdiscs(phba);
2965 lpfc_do_scr_ns_plogi(phba, vport);
2966 }
2967 }
2968
2969out_free_mem:
2970 mempool_free(mboxq, phba->mbox_mem_pool);
2971 if (dmabuf) {
2972 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2973 kfree(dmabuf);
2974 }
2975 return;
2976}
2977
2978static void
2979lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2980{
2981 MAILBOX_t *mb = &pmb->u.mb;
2982 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
2983 struct lpfc_vport *vport = pmb->vport;
2984 struct serv_parm *sp = &vport->fc_sparam;
2985 uint32_t ed_tov;
2986
2987
2988 if (mb->mbxStatus) {
2989
2990 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2991 "0319 READ_SPARAM mbxStatus error x%x "
2992 "hba state x%x>\n",
2993 mb->mbxStatus, vport->port_state);
2994 lpfc_linkdown(phba);
2995 goto out;
2996 }
2997
2998 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
2999 sizeof (struct serv_parm));
3000
3001 ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
3002 if (sp->cmn.edtovResolution)
3003 ed_tov = (ed_tov + 999999) / 1000000;
3004
3005 phba->fc_edtov = ed_tov;
3006 phba->fc_ratov = (2 * ed_tov) / 1000;
3007 if (phba->fc_ratov < FF_DEF_RATOV) {
3008
3009 phba->fc_ratov = FF_DEF_RATOV;
3010 }
3011
3012 lpfc_update_vport_wwn(vport);
3013 if (vport->port_type == LPFC_PHYSICAL_PORT) {
3014 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
3015 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
3016 }
3017
3018 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3019 kfree(mp);
3020 mempool_free(pmb, phba->mbox_mem_pool);
3021 return;
3022
3023out:
3024 pmb->context1 = NULL;
3025 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3026 kfree(mp);
3027 lpfc_issue_clear_la(phba, vport);
3028 mempool_free(pmb, phba->mbox_mem_pool);
3029 return;
3030}
3031
3032static void
3033lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3034{
3035 struct lpfc_vport *vport = phba->pport;
3036 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
3037 struct Scsi_Host *shost;
3038 int i;
3039 struct lpfc_dmabuf *mp;
3040 int rc;
3041 struct fcf_record *fcf_record;
3042 uint32_t fc_flags = 0;
3043
3044 spin_lock_irq(&phba->hbalock);
3045 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
3046
3047 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3048 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
3049 case LPFC_LINK_SPEED_1GHZ:
3050 case LPFC_LINK_SPEED_2GHZ:
3051 case LPFC_LINK_SPEED_4GHZ:
3052 case LPFC_LINK_SPEED_8GHZ:
3053 case LPFC_LINK_SPEED_10GHZ:
3054 case LPFC_LINK_SPEED_16GHZ:
3055 case LPFC_LINK_SPEED_32GHZ:
3056 break;
3057 default:
3058 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
3059 break;
3060 }
3061 }
3062
3063 if (phba->fc_topology &&
3064 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3065 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3066 "3314 Toplogy changed was 0x%x is 0x%x\n",
3067 phba->fc_topology,
3068 bf_get(lpfc_mbx_read_top_topology, la));
3069 phba->fc_topology_changed = 1;
3070 }
3071
3072 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3073 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
3074
3075 shost = lpfc_shost_from_vport(vport);
3076 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3077 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
3078
3079
3080
3081
3082 if (phba->cfg_enable_npiv && phba->max_vpi)
3083 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3084 "1309 Link Up Event npiv not supported in loop "
3085 "topology\n");
3086
3087 if (bf_get(lpfc_mbx_read_top_il, la))
3088 fc_flags |= FC_LBIT;
3089
3090 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3091 i = la->lilpBde64.tus.f.bdeSize;
3092
3093 if (i == 0) {
3094 phba->alpa_map[0] = 0;
3095 } else {
3096 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
3097 int numalpa, j, k;
3098 union {
3099 uint8_t pamap[16];
3100 struct {
3101 uint32_t wd1;
3102 uint32_t wd2;
3103 uint32_t wd3;
3104 uint32_t wd4;
3105 } pa;
3106 } un;
3107 numalpa = phba->alpa_map[0];
3108 j = 0;
3109 while (j < numalpa) {
3110 memset(un.pamap, 0, 16);
3111 for (k = 1; j < numalpa; k++) {
3112 un.pamap[k - 1] =
3113 phba->alpa_map[j + 1];
3114 j++;
3115 if (k == 16)
3116 break;
3117 }
3118
3119 lpfc_printf_log(phba,
3120 KERN_WARNING,
3121 LOG_LINK_EVENT,
3122 "1304 Link Up Event "
3123 "ALPA map Data: x%x "
3124 "x%x x%x x%x\n",
3125 un.pa.wd1, un.pa.wd2,
3126 un.pa.wd3, un.pa.wd4);
3127 }
3128 }
3129 }
3130 } else {
3131 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3132 if (phba->max_vpi && phba->cfg_enable_npiv &&
3133 (phba->sli_rev >= LPFC_SLI_REV3))
3134 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3135 }
3136 vport->fc_myDID = phba->fc_pref_DID;
3137 fc_flags |= FC_LBIT;
3138 }
3139 spin_unlock_irq(&phba->hbalock);
3140
3141 if (fc_flags) {
3142 spin_lock_irq(shost->host_lock);
3143 vport->fc_flag |= fc_flags;
3144 spin_unlock_irq(shost->host_lock);
3145 }
3146
3147 lpfc_linkup(phba);
3148 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3149 if (!sparam_mbox)
3150 goto out;
3151
3152 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3153 if (rc) {
3154 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3155 goto out;
3156 }
3157 sparam_mbox->vport = vport;
3158 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3159 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3160 if (rc == MBX_NOT_FINISHED) {
3161 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
3162 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3163 kfree(mp);
3164 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3165 goto out;
3166 }
3167
3168 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3169 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3170 if (!cfglink_mbox)
3171 goto out;
3172 vport->port_state = LPFC_LOCAL_CFG_LINK;
3173 lpfc_config_link(phba, cfglink_mbox);
3174 cfglink_mbox->vport = vport;
3175 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3176 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3177 if (rc == MBX_NOT_FINISHED) {
3178 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3179 goto out;
3180 }
3181 } else {
3182 vport->port_state = LPFC_VPORT_UNKNOWN;
3183
3184
3185
3186
3187
3188 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
3189 fcf_record = kzalloc(sizeof(struct fcf_record),
3190 GFP_KERNEL);
3191 if (unlikely(!fcf_record)) {
3192 lpfc_printf_log(phba, KERN_ERR,
3193 LOG_MBOX | LOG_SLI,
3194 "2554 Could not allocate memory for "
3195 "fcf record\n");
3196 rc = -ENODEV;
3197 goto out;
3198 }
3199
3200 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3201 LPFC_FCOE_FCF_DEF_INDEX);
3202 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3203 if (unlikely(rc)) {
3204 lpfc_printf_log(phba, KERN_ERR,
3205 LOG_MBOX | LOG_SLI,
3206 "2013 Could not manually add FCF "
3207 "record 0, status %d\n", rc);
3208 rc = -ENODEV;
3209 kfree(fcf_record);
3210 goto out;
3211 }
3212 kfree(fcf_record);
3213 }
3214
3215
3216
3217
3218 spin_lock_irq(&phba->hbalock);
3219 if (phba->hba_flag & FCF_TS_INPROG) {
3220 spin_unlock_irq(&phba->hbalock);
3221 return;
3222 }
3223
3224 phba->fcf.fcf_flag |= FCF_INIT_DISC;
3225 spin_unlock_irq(&phba->hbalock);
3226 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3227 "2778 Start FCF table scan at linkup\n");
3228 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3229 LPFC_FCOE_FCF_GET_FIRST);
3230 if (rc) {
3231 spin_lock_irq(&phba->hbalock);
3232 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3233 spin_unlock_irq(&phba->hbalock);
3234 goto out;
3235 }
3236
3237 lpfc_sli4_clear_fcf_rr_bmask(phba);
3238 }
3239
3240 return;
3241out:
3242 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3243 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3244 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3245 vport->port_state, sparam_mbox, cfglink_mbox);
3246 lpfc_issue_clear_la(phba, vport);
3247 return;
3248}
3249
3250static void
3251lpfc_enable_la(struct lpfc_hba *phba)
3252{
3253 uint32_t control;
3254 struct lpfc_sli *psli = &phba->sli;
3255 spin_lock_irq(&phba->hbalock);
3256 psli->sli_flag |= LPFC_PROCESS_LA;
3257 if (phba->sli_rev <= LPFC_SLI_REV3) {
3258 control = readl(phba->HCregaddr);
3259 control |= HC_LAINT_ENA;
3260 writel(control, phba->HCregaddr);
3261 readl(phba->HCregaddr);
3262 }
3263 spin_unlock_irq(&phba->hbalock);
3264}
3265
3266static void
3267lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3268{
3269 lpfc_linkdown(phba);
3270 lpfc_enable_la(phba);
3271 lpfc_unregister_unused_fcf(phba);
3272
3273}
3274
3275
3276
3277
3278
3279
3280
3281
3282void
3283lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3284{
3285 struct lpfc_vport *vport = pmb->vport;
3286 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3287 struct lpfc_mbx_read_top *la;
3288 MAILBOX_t *mb = &pmb->u.mb;
3289 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3290
3291
3292 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
3293
3294 if (mb->mbxStatus) {
3295 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3296 "1307 READ_LA mbox error x%x state x%x\n",
3297 mb->mbxStatus, vport->port_state);
3298 lpfc_mbx_issue_link_down(phba);
3299 phba->link_state = LPFC_HBA_ERROR;
3300 goto lpfc_mbx_cmpl_read_topology_free_mbuf;
3301 }
3302
3303 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3304
3305 memcpy(&phba->alpa_map[0], mp->virt, 128);
3306
3307 spin_lock_irq(shost->host_lock);
3308 if (bf_get(lpfc_mbx_read_top_pb, la))
3309 vport->fc_flag |= FC_BYPASSED_MODE;
3310 else
3311 vport->fc_flag &= ~FC_BYPASSED_MODE;
3312 spin_unlock_irq(shost->host_lock);
3313
3314 if (phba->fc_eventTag <= la->eventTag) {
3315 phba->fc_stat.LinkMultiEvent++;
3316 if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
3317 if (phba->fc_eventTag != 0)
3318 lpfc_linkdown(phba);
3319 }
3320
3321 phba->fc_eventTag = la->eventTag;
3322 if (phba->sli_rev < LPFC_SLI_REV4) {
3323 spin_lock_irq(&phba->hbalock);
3324 if (bf_get(lpfc_mbx_read_top_mm, la))
3325 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3326 else
3327 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3328 spin_unlock_irq(&phba->hbalock);
3329 }
3330
3331 phba->link_events++;
3332 if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
3333 !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
3334 phba->fc_stat.LinkUp++;
3335 if (phba->link_flag & LS_LOOPBACK_MODE) {
3336 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3337 "1306 Link Up Event in loop back mode "
3338 "x%x received Data: x%x x%x x%x x%x\n",
3339 la->eventTag, phba->fc_eventTag,
3340 bf_get(lpfc_mbx_read_top_alpa_granted,
3341 la),
3342 bf_get(lpfc_mbx_read_top_link_spd, la),
3343 phba->alpa_map[0]);
3344 } else {
3345 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3346 "1303 Link Up Event x%x received "
3347 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3348 la->eventTag, phba->fc_eventTag,
3349 bf_get(lpfc_mbx_read_top_alpa_granted,
3350 la),
3351 bf_get(lpfc_mbx_read_top_link_spd, la),
3352 phba->alpa_map[0],
3353 bf_get(lpfc_mbx_read_top_mm, la),
3354 bf_get(lpfc_mbx_read_top_fa, la),
3355 phba->wait_4_mlo_maint_flg);
3356 }
3357 lpfc_mbx_process_link_up(phba, la);
3358 } else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
3359 LPFC_ATT_LINK_DOWN) {
3360 phba->fc_stat.LinkDown++;
3361 if (phba->link_flag & LS_LOOPBACK_MODE)
3362 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3363 "1308 Link Down Event in loop back mode "
3364 "x%x received "
3365 "Data: x%x x%x x%x\n",
3366 la->eventTag, phba->fc_eventTag,
3367 phba->pport->port_state, vport->fc_flag);
3368 else
3369 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3370 "1305 Link Down Event x%x received "
3371 "Data: x%x x%x x%x x%x x%x\n",
3372 la->eventTag, phba->fc_eventTag,
3373 phba->pport->port_state, vport->fc_flag,
3374 bf_get(lpfc_mbx_read_top_mm, la),
3375 bf_get(lpfc_mbx_read_top_fa, la));
3376 lpfc_mbx_issue_link_down(phba);
3377 }
3378 if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) &&
3379 ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) {
3380 if (phba->link_state != LPFC_LINK_DOWN) {
3381 phba->fc_stat.LinkDown++;
3382 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3383 "1312 Link Down Event x%x received "
3384 "Data: x%x x%x x%x\n",
3385 la->eventTag, phba->fc_eventTag,
3386 phba->pport->port_state, vport->fc_flag);
3387 lpfc_mbx_issue_link_down(phba);
3388 } else
3389 lpfc_enable_la(phba);
3390
3391 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3392 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3393 "Data: x%x x%x x%x\n",
3394 la->eventTag, phba->fc_eventTag,
3395 phba->pport->port_state, vport->fc_flag);
3396
3397
3398
3399
3400
3401 if (phba->wait_4_mlo_maint_flg) {
3402 phba->wait_4_mlo_maint_flg = 0;
3403 wake_up_interruptible(&phba->wait_4_mlo_m_q);
3404 }
3405 }
3406
3407 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3408 bf_get(lpfc_mbx_read_top_fa, la)) {
3409 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
3410 lpfc_issue_clear_la(phba, vport);
3411 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3412 "1311 fa %d\n",
3413 bf_get(lpfc_mbx_read_top_fa, la));
3414 }
3415
3416lpfc_mbx_cmpl_read_topology_free_mbuf:
3417 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3418 kfree(mp);
3419 mempool_free(pmb, phba->mbox_mem_pool);
3420 return;
3421}
3422
3423
3424
3425
3426
3427
3428
3429void
3430lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3431{
3432 struct lpfc_vport *vport = pmb->vport;
3433 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3434 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3435 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3436
3437 pmb->context1 = NULL;
3438 pmb->context2 = NULL;
3439
3440 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3441 "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n",
3442 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3443 atomic_read(&ndlp->kref.refcount),
3444 ndlp->nlp_usg_map, ndlp);
3445 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3446 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3447
3448 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3449 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3450
3451
3452
3453
3454
3455
3456
3457
3458 spin_lock_irq(shost->host_lock);
3459 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3460 spin_unlock_irq(shost->host_lock);
3461 }
3462
3463
3464 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3465
3466 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3467 kfree(mp);
3468 mempool_free(pmb, phba->mbox_mem_pool);
3469
3470
3471
3472 lpfc_nlp_put(ndlp);
3473
3474 return;
3475}
3476
3477static void
3478lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3479{
3480 MAILBOX_t *mb = &pmb->u.mb;
3481 struct lpfc_vport *vport = pmb->vport;
3482 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3483
3484 switch (mb->mbxStatus) {
3485 case 0x0011:
3486 case 0x0020:
3487 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3488 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3489 mb->mbxStatus);
3490 break;
3491
3492 case 0x9700:
3493 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3494 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3495 vport->vpi, mb->mbxStatus);
3496 if (!(phba->pport->load_flag & FC_UNLOADING))
3497 lpfc_workq_post_event(phba, NULL, NULL,
3498 LPFC_EVT_RESET_HBA);
3499 }
3500 spin_lock_irq(shost->host_lock);
3501 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
3502 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3503 spin_unlock_irq(shost->host_lock);
3504 vport->unreg_vpi_cmpl = VPORT_OK;
3505 mempool_free(pmb, phba->mbox_mem_pool);
3506 lpfc_cleanup_vports_rrqs(vport, NULL);
3507
3508
3509
3510
3511 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
3512 scsi_host_put(shost);
3513}
3514
3515int
3516lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
3517{
3518 struct lpfc_hba *phba = vport->phba;
3519 LPFC_MBOXQ_t *mbox;
3520 int rc;
3521
3522 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3523 if (!mbox)
3524 return 1;
3525
3526 lpfc_unreg_vpi(phba, vport->vpi, mbox);
3527 mbox->vport = vport;
3528 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
3529 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3530 if (rc == MBX_NOT_FINISHED) {
3531 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
3532 "1800 Could not issue unreg_vpi\n");
3533 mempool_free(mbox, phba->mbox_mem_pool);
3534 vport->unreg_vpi_cmpl = VPORT_ERROR;
3535 return rc;
3536 }
3537 return 0;
3538}
3539
3540static void
3541lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3542{
3543 struct lpfc_vport *vport = pmb->vport;
3544 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3545 MAILBOX_t *mb = &pmb->u.mb;
3546
3547 switch (mb->mbxStatus) {
3548 case 0x0011:
3549 case 0x9601:
3550 case 0x9602:
3551 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3552 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3553 mb->mbxStatus);
3554 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3555 spin_lock_irq(shost->host_lock);
3556 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3557 spin_unlock_irq(shost->host_lock);
3558 vport->fc_myDID = 0;
3559 goto out;
3560 }
3561
3562 spin_lock_irq(shost->host_lock);
3563 vport->vpi_state |= LPFC_VPI_REGISTERED;
3564 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3565 spin_unlock_irq(shost->host_lock);
3566 vport->num_disc_nodes = 0;
3567
3568 if (vport->fc_npr_cnt)
3569 lpfc_els_disc_plogi(vport);
3570
3571 if (!vport->num_disc_nodes) {
3572 spin_lock_irq(shost->host_lock);
3573 vport->fc_flag &= ~FC_NDISC_ACTIVE;
3574 spin_unlock_irq(shost->host_lock);
3575 lpfc_can_disctmo(vport);
3576 }
3577 vport->port_state = LPFC_VPORT_READY;
3578
3579out:
3580 mempool_free(pmb, phba->mbox_mem_pool);
3581 return;
3582}
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592void
3593lpfc_create_static_vport(struct lpfc_hba *phba)
3594{
3595 LPFC_MBOXQ_t *pmb = NULL;
3596 MAILBOX_t *mb;
3597 struct static_vport_info *vport_info;
3598 int mbx_wait_rc = 0, i;
3599 struct fc_vport_identifiers vport_id;
3600 struct fc_vport *new_fc_vport;
3601 struct Scsi_Host *shost;
3602 struct lpfc_vport *vport;
3603 uint16_t offset = 0;
3604 uint8_t *vport_buff;
3605 struct lpfc_dmabuf *mp;
3606 uint32_t byte_count = 0;
3607
3608 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3609 if (!pmb) {
3610 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3611 "0542 lpfc_create_static_vport failed to"
3612 " allocate mailbox memory\n");
3613 return;
3614 }
3615 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
3616 mb = &pmb->u.mb;
3617
3618 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
3619 if (!vport_info) {
3620 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3621 "0543 lpfc_create_static_vport failed to"
3622 " allocate vport_info\n");
3623 mempool_free(pmb, phba->mbox_mem_pool);
3624 return;
3625 }
3626
3627 vport_buff = (uint8_t *) vport_info;
3628 do {
3629
3630 if (pmb->context1) {
3631 mp = (struct lpfc_dmabuf *)pmb->context1;
3632 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3633 kfree(mp);
3634 }
3635 if (lpfc_dump_static_vport(phba, pmb, offset))
3636 goto out;
3637
3638 pmb->vport = phba->pport;
3639 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3640 LPFC_MBOX_TMO);
3641
3642 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
3643 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3644 "0544 lpfc_create_static_vport failed to"
3645 " issue dump mailbox command ret 0x%x "
3646 "status 0x%x\n",
3647 mbx_wait_rc, mb->mbxStatus);
3648 goto out;
3649 }
3650
3651 if (phba->sli_rev == LPFC_SLI_REV4) {
3652 byte_count = pmb->u.mqe.un.mb_words[5];
3653 mp = (struct lpfc_dmabuf *)pmb->context1;
3654 if (byte_count > sizeof(struct static_vport_info) -
3655 offset)
3656 byte_count = sizeof(struct static_vport_info)
3657 - offset;
3658 memcpy(vport_buff + offset, mp->virt, byte_count);
3659 offset += byte_count;
3660 } else {
3661 if (mb->un.varDmp.word_cnt >
3662 sizeof(struct static_vport_info) - offset)
3663 mb->un.varDmp.word_cnt =
3664 sizeof(struct static_vport_info)
3665 - offset;
3666 byte_count = mb->un.varDmp.word_cnt;
3667 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
3668 vport_buff + offset,
3669 byte_count);
3670
3671 offset += byte_count;
3672 }
3673
3674 } while (byte_count &&
3675 offset < sizeof(struct static_vport_info));
3676
3677
3678 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
3679 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
3680 != VPORT_INFO_REV)) {
3681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3682 "0545 lpfc_create_static_vport bad"
3683 " information header 0x%x 0x%x\n",
3684 le32_to_cpu(vport_info->signature),
3685 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
3686
3687 goto out;
3688 }
3689
3690 shost = lpfc_shost_from_vport(phba->pport);
3691
3692 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
3693 memset(&vport_id, 0, sizeof(vport_id));
3694 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
3695 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
3696 if (!vport_id.port_name || !vport_id.node_name)
3697 continue;
3698
3699 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
3700 vport_id.vport_type = FC_PORTTYPE_NPIV;
3701 vport_id.disable = false;
3702 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
3703
3704 if (!new_fc_vport) {
3705 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3706 "0546 lpfc_create_static_vport failed to"
3707 " create vport\n");
3708 continue;
3709 }
3710
3711 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
3712 vport->vport_flag |= STATIC_VPORT;
3713 }
3714
3715out:
3716 kfree(vport_info);
3717 if (mbx_wait_rc != MBX_TIMEOUT) {
3718 if (pmb->context1) {
3719 mp = (struct lpfc_dmabuf *)pmb->context1;
3720 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3721 kfree(mp);
3722 }
3723 mempool_free(pmb, phba->mbox_mem_pool);
3724 }
3725
3726 return;
3727}
3728
3729
3730
3731
3732
3733
3734
3735void
3736lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3737{
3738 struct lpfc_vport *vport = pmb->vport;
3739 MAILBOX_t *mb = &pmb->u.mb;
3740 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3741 struct lpfc_nodelist *ndlp;
3742 struct Scsi_Host *shost;
3743
3744 ndlp = (struct lpfc_nodelist *) pmb->context2;
3745 pmb->context1 = NULL;
3746 pmb->context2 = NULL;
3747
3748 if (mb->mbxStatus) {
3749 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3750 "0258 Register Fabric login error: 0x%x\n",
3751 mb->mbxStatus);
3752 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3753 kfree(mp);
3754 mempool_free(pmb, phba->mbox_mem_pool);
3755
3756 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3757
3758 lpfc_disc_list_loopmap(vport);
3759
3760
3761 lpfc_disc_start(vport);
3762
3763
3764
3765 lpfc_nlp_put(ndlp);
3766 return;
3767 }
3768
3769 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3770
3771
3772
3773 lpfc_nlp_put(ndlp);
3774 return;
3775 }
3776
3777 if (phba->sli_rev < LPFC_SLI_REV4)
3778 ndlp->nlp_rpi = mb->un.varWords[0];
3779 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3780 ndlp->nlp_type |= NLP_FABRIC;
3781 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3782
3783 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3784
3785
3786 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3787 lpfc_start_fdiscs(phba);
3788 else {
3789 shost = lpfc_shost_from_vport(vport);
3790 spin_lock_irq(shost->host_lock);
3791 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
3792 spin_unlock_irq(shost->host_lock);
3793 }
3794 lpfc_do_scr_ns_plogi(phba, vport);
3795 }
3796
3797 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3798 kfree(mp);
3799 mempool_free(pmb, phba->mbox_mem_pool);
3800
3801
3802
3803
3804 lpfc_nlp_put(ndlp);
3805 return;
3806}
3807
3808
3809
3810
3811
3812
3813
3814void
3815lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3816{
3817 MAILBOX_t *mb = &pmb->u.mb;
3818 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3819 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3820 struct lpfc_vport *vport = pmb->vport;
3821
3822 pmb->context1 = NULL;
3823 pmb->context2 = NULL;
3824
3825 if (mb->mbxStatus) {
3826out:
3827 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3828 "0260 Register NameServer error: 0x%x\n",
3829 mb->mbxStatus);
3830
3831
3832
3833 lpfc_nlp_put(ndlp);
3834 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3835 kfree(mp);
3836 mempool_free(pmb, phba->mbox_mem_pool);
3837
3838
3839 lpfc_nlp_not_used(ndlp);
3840
3841 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3842
3843
3844
3845
3846 lpfc_disc_list_loopmap(vport);
3847
3848
3849 lpfc_disc_start(vport);
3850 return;
3851 }
3852 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3853 return;
3854 }
3855
3856 if (phba->sli_rev < LPFC_SLI_REV4)
3857 ndlp->nlp_rpi = mb->un.varWords[0];
3858 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3859 ndlp->nlp_type |= NLP_FABRIC;
3860 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3861 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3862 "0003 rpi:%x DID:%x flg:%x %d map%x %p\n",
3863 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3864 atomic_read(&ndlp->kref.refcount),
3865 ndlp->nlp_usg_map, ndlp);
3866
3867 if (vport->port_state < LPFC_VPORT_READY) {
3868
3869 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
3870 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
3871 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3872 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
3873 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0);
3874
3875
3876 lpfc_issue_els_scr(vport, SCR_DID, 0);
3877 }
3878
3879 vport->fc_ns_retry = 0;
3880
3881 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
3882
3883 goto out;
3884 }
3885
3886
3887
3888
3889 lpfc_nlp_put(ndlp);
3890 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3891 kfree(mp);
3892 mempool_free(pmb, phba->mbox_mem_pool);
3893
3894 return;
3895}
3896
3897static void
3898lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3899{
3900 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3901 struct fc_rport *rport;
3902 struct lpfc_rport_data *rdata;
3903 struct fc_rport_identifiers rport_ids;
3904 struct lpfc_hba *phba = vport->phba;
3905
3906
3907 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
3908 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
3909 rport_ids.port_id = ndlp->nlp_DID;
3910 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
3911
3912
3913
3914
3915
3916
3917
3918
3919 rport = ndlp->rport;
3920 if (rport) {
3921 rdata = rport->dd_data;
3922
3923 ndlp->rport = NULL;
3924 if (rdata && rdata->pnode == ndlp)
3925 lpfc_nlp_put(ndlp);
3926 rdata->pnode = NULL;
3927
3928 put_device(&rport->dev);
3929 }
3930
3931 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
3932 "rport add: did:x%x flg:x%x type x%x",
3933 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3934
3935
3936 if (vport->load_flag & FC_UNLOADING)
3937 return;
3938
3939 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
3940 if (!rport || !get_device(&rport->dev)) {
3941 dev_printk(KERN_WARNING, &phba->pcidev->dev,
3942 "Warning: fc_remote_port_add failed\n");
3943 return;
3944 }
3945
3946
3947 rport->maxframe_size = ndlp->nlp_maxframe;
3948 rport->supported_classes = ndlp->nlp_class_sup;
3949 rdata = rport->dd_data;
3950 rdata->pnode = lpfc_nlp_get(ndlp);
3951
3952 if (ndlp->nlp_type & NLP_FCP_TARGET)
3953 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
3954 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
3955 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
3956
3957 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
3958 fc_remote_port_rolechg(rport, rport_ids.roles);
3959
3960 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3961 "3183 rport register x%06x, rport %p role x%x\n",
3962 ndlp->nlp_DID, rport, rport_ids.roles);
3963
3964 if ((rport->scsi_target_id != -1) &&
3965 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
3966 ndlp->nlp_sid = rport->scsi_target_id;
3967 }
3968 return;
3969}
3970
3971static void
3972lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
3973{
3974 struct fc_rport *rport = ndlp->rport;
3975
3976 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
3977 "rport delete: did:x%x flg:x%x type x%x",
3978 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3979
3980 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3981 "3184 rport unregister x%06x, rport %p\n",
3982 ndlp->nlp_DID, rport);
3983
3984 fc_remote_port_delete(rport);
3985
3986 return;
3987}
3988
3989static void
3990lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
3991{
3992 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3993
3994 spin_lock_irq(shost->host_lock);
3995 switch (state) {
3996 case NLP_STE_UNUSED_NODE:
3997 vport->fc_unused_cnt += count;
3998 break;
3999 case NLP_STE_PLOGI_ISSUE:
4000 vport->fc_plogi_cnt += count;
4001 break;
4002 case NLP_STE_ADISC_ISSUE:
4003 vport->fc_adisc_cnt += count;
4004 break;
4005 case NLP_STE_REG_LOGIN_ISSUE:
4006 vport->fc_reglogin_cnt += count;
4007 break;
4008 case NLP_STE_PRLI_ISSUE:
4009 vport->fc_prli_cnt += count;
4010 break;
4011 case NLP_STE_UNMAPPED_NODE:
4012 vport->fc_unmap_cnt += count;
4013 break;
4014 case NLP_STE_MAPPED_NODE:
4015 vport->fc_map_cnt += count;
4016 break;
4017 case NLP_STE_NPR_NODE:
4018 if (vport->fc_npr_cnt == 0 && count == -1)
4019 vport->fc_npr_cnt = 0;
4020 else
4021 vport->fc_npr_cnt += count;
4022 break;
4023 }
4024 spin_unlock_irq(shost->host_lock);
4025}
4026
4027static void
4028lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4029 int old_state, int new_state)
4030{
4031 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4032
4033 if (new_state == NLP_STE_UNMAPPED_NODE) {
4034 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4035 ndlp->nlp_type |= NLP_FC_NODE;
4036 }
4037 if (new_state == NLP_STE_MAPPED_NODE)
4038 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4039 if (new_state == NLP_STE_NPR_NODE)
4040 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4041
4042
4043 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
4044 old_state == NLP_STE_UNMAPPED_NODE)) {
4045 vport->phba->nport_event_cnt++;
4046 lpfc_unregister_remote_port(ndlp);
4047 }
4048
4049 if (new_state == NLP_STE_MAPPED_NODE ||
4050 new_state == NLP_STE_UNMAPPED_NODE) {
4051 vport->phba->nport_event_cnt++;
4052
4053
4054
4055
4056
4057 lpfc_register_remote_port(vport, ndlp);
4058 }
4059 if ((new_state == NLP_STE_MAPPED_NODE) &&
4060 (vport->stat_data_enabled)) {
4061
4062
4063
4064
4065 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
4066 sizeof(struct lpfc_scsicmd_bkt),
4067 GFP_KERNEL);
4068
4069 if (!ndlp->lat_data)
4070 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
4071 "0286 lpfc_nlp_state_cleanup failed to "
4072 "allocate statistical data buffer DID "
4073 "0x%x\n", ndlp->nlp_DID);
4074 }
4075
4076
4077
4078
4079
4080
4081 if (new_state == NLP_STE_MAPPED_NODE &&
4082 (!ndlp->rport ||
4083 ndlp->rport->scsi_target_id == -1 ||
4084 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4085 spin_lock_irq(shost->host_lock);
4086 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
4087 spin_unlock_irq(shost->host_lock);
4088 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4089 }
4090}
4091
4092static char *
4093lpfc_nlp_state_name(char *buffer, size_t size, int state)
4094{
4095 static char *states[] = {
4096 [NLP_STE_UNUSED_NODE] = "UNUSED",
4097 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
4098 [NLP_STE_ADISC_ISSUE] = "ADISC",
4099 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
4100 [NLP_STE_PRLI_ISSUE] = "PRLI",
4101 [NLP_STE_LOGO_ISSUE] = "LOGO",
4102 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
4103 [NLP_STE_MAPPED_NODE] = "MAPPED",
4104 [NLP_STE_NPR_NODE] = "NPR",
4105 };
4106
4107 if (state < NLP_STE_MAX_STATE && states[state])
4108 strlcpy(buffer, states[state], size);
4109 else
4110 snprintf(buffer, size, "unknown (%d)", state);
4111 return buffer;
4112}
4113
4114void
4115lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4116 int state)
4117{
4118 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4119 int old_state = ndlp->nlp_state;
4120 char name1[16], name2[16];
4121
4122 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4123 "0904 NPort state transition x%06x, %s -> %s\n",
4124 ndlp->nlp_DID,
4125 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4126 lpfc_nlp_state_name(name2, sizeof(name2), state));
4127
4128 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4129 "node statechg did:x%x old:%d ste:%d",
4130 ndlp->nlp_DID, old_state, state);
4131
4132 if (old_state == NLP_STE_NPR_NODE &&
4133 state != NLP_STE_NPR_NODE)
4134 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4135 if (old_state == NLP_STE_UNMAPPED_NODE) {
4136 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4137 ndlp->nlp_type &= ~NLP_FC_NODE;
4138 }
4139
4140 if (list_empty(&ndlp->nlp_listp)) {
4141 spin_lock_irq(shost->host_lock);
4142 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4143 spin_unlock_irq(shost->host_lock);
4144 } else if (old_state)
4145 lpfc_nlp_counters(vport, old_state, -1);
4146
4147 ndlp->nlp_state = state;
4148 lpfc_nlp_counters(vport, state, 1);
4149 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4150}
4151
4152void
4153lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4154{
4155 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4156
4157 if (list_empty(&ndlp->nlp_listp)) {
4158 spin_lock_irq(shost->host_lock);
4159 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4160 spin_unlock_irq(shost->host_lock);
4161 }
4162}
4163
4164void
4165lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4166{
4167 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4168
4169 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4170 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4171 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4172 spin_lock_irq(shost->host_lock);
4173 list_del_init(&ndlp->nlp_listp);
4174 spin_unlock_irq(shost->host_lock);
4175 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4176 NLP_STE_UNUSED_NODE);
4177}
4178
4179static void
4180lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4181{
4182 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4183 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4184 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4185 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4186 NLP_STE_UNUSED_NODE);
4187}
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202static inline void
4203lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4204 uint32_t did)
4205{
4206 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4207 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4208 init_timer(&ndlp->nlp_delayfunc);
4209 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
4210 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
4211 ndlp->nlp_DID = did;
4212 ndlp->vport = vport;
4213 ndlp->phba = vport->phba;
4214 ndlp->nlp_sid = NLP_NO_SID;
4215 kref_init(&ndlp->kref);
4216 NLP_INT_NODE_ACT(ndlp);
4217 atomic_set(&ndlp->cmd_pending, 0);
4218 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4219}
4220
4221struct lpfc_nodelist *
4222lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4223 int state)
4224{
4225 struct lpfc_hba *phba = vport->phba;
4226 uint32_t did;
4227 unsigned long flags;
4228 unsigned long *active_rrqs_xri_bitmap = NULL;
4229
4230 if (!ndlp)
4231 return NULL;
4232
4233 spin_lock_irqsave(&phba->ndlp_lock, flags);
4234
4235 if (NLP_CHK_FREE_REQ(ndlp)) {
4236 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4237 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4238 "0277 lpfc_enable_node: ndlp:x%p "
4239 "usgmap:x%x refcnt:%d\n",
4240 (void *)ndlp, ndlp->nlp_usg_map,
4241 atomic_read(&ndlp->kref.refcount));
4242 return NULL;
4243 }
4244
4245 if (NLP_CHK_NODE_ACT(ndlp)) {
4246 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4247 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4248 "0278 lpfc_enable_node: ndlp:x%p "
4249 "usgmap:x%x refcnt:%d\n",
4250 (void *)ndlp, ndlp->nlp_usg_map,
4251 atomic_read(&ndlp->kref.refcount));
4252 return NULL;
4253 }
4254
4255
4256 did = ndlp->nlp_DID;
4257 if (phba->sli_rev == LPFC_SLI_REV4)
4258 active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
4259
4260
4261 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4262 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
4263 lpfc_initialize_node(vport, ndlp, did);
4264
4265 if (phba->sli_rev == LPFC_SLI_REV4)
4266 ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
4267
4268 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4269 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4270 ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
4271 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4272 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
4273 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
4274 ndlp->nlp_flag,
4275 atomic_read(&ndlp->kref.refcount),
4276 ndlp->nlp_usg_map, ndlp);
4277 }
4278
4279
4280 if (state != NLP_STE_UNUSED_NODE)
4281 lpfc_nlp_set_state(vport, ndlp, state);
4282
4283 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4284 "node enable: did:x%x",
4285 ndlp->nlp_DID, 0, 0);
4286 return ndlp;
4287}
4288
4289void
4290lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4291{
4292
4293
4294
4295
4296
4297
4298
4299 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4300 return;
4301 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4302 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4303 lpfc_cleanup_vports_rrqs(vport, ndlp);
4304 lpfc_unreg_rpi(vport, ndlp);
4305 }
4306
4307 lpfc_nlp_put(ndlp);
4308 return;
4309}
4310
4311
4312
4313
4314void
4315lpfc_set_disctmo(struct lpfc_vport *vport)
4316{
4317 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4318 struct lpfc_hba *phba = vport->phba;
4319 uint32_t tmo;
4320
4321 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
4322
4323 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
4324 } else {
4325
4326
4327
4328 tmo = ((phba->fc_ratov * 3) + 3);
4329 }
4330
4331
4332 if (!timer_pending(&vport->fc_disctmo)) {
4333 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4334 "set disc timer: tmo:x%x state:x%x flg:x%x",
4335 tmo, vport->port_state, vport->fc_flag);
4336 }
4337
4338 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
4339 spin_lock_irq(shost->host_lock);
4340 vport->fc_flag |= FC_DISC_TMO;
4341 spin_unlock_irq(shost->host_lock);
4342
4343
4344 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4345 "0247 Start Discovery Timer state x%x "
4346 "Data: x%x x%lx x%x x%x\n",
4347 vport->port_state, tmo,
4348 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
4349 vport->fc_adisc_cnt);
4350
4351 return;
4352}
4353
4354
4355
4356
4357int
4358lpfc_can_disctmo(struct lpfc_vport *vport)
4359{
4360 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4361 unsigned long iflags;
4362
4363 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4364 "can disc timer: state:x%x rtry:x%x flg:x%x",
4365 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
4366
4367
4368 if (vport->fc_flag & FC_DISC_TMO) {
4369 spin_lock_irqsave(shost->host_lock, iflags);
4370 vport->fc_flag &= ~FC_DISC_TMO;
4371 spin_unlock_irqrestore(shost->host_lock, iflags);
4372 del_timer_sync(&vport->fc_disctmo);
4373 spin_lock_irqsave(&vport->work_port_lock, iflags);
4374 vport->work_port_events &= ~WORKER_DISC_TMO;
4375 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
4376 }
4377
4378
4379 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4380 "0248 Cancel Discovery Timer state x%x "
4381 "Data: x%x x%x x%x\n",
4382 vport->port_state, vport->fc_flag,
4383 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
4384 return 0;
4385}
4386
4387
4388
4389
4390
4391int
4392lpfc_check_sli_ndlp(struct lpfc_hba *phba,
4393 struct lpfc_sli_ring *pring,
4394 struct lpfc_iocbq *iocb,
4395 struct lpfc_nodelist *ndlp)
4396{
4397 struct lpfc_sli *psli = &phba->sli;
4398 IOCB_t *icmd = &iocb->iocb;
4399 struct lpfc_vport *vport = ndlp->vport;
4400
4401 if (iocb->vport != vport)
4402 return 0;
4403
4404 if (pring->ringno == LPFC_ELS_RING) {
4405 switch (icmd->ulpCommand) {
4406 case CMD_GEN_REQUEST64_CR:
4407 if (iocb->context_un.ndlp == ndlp)
4408 return 1;
4409 case CMD_ELS_REQUEST64_CR:
4410 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
4411 return 1;
4412 case CMD_XMIT_ELS_RSP64_CX:
4413 if (iocb->context1 == (uint8_t *) ndlp)
4414 return 1;
4415 }
4416 } else if (pring->ringno == psli->extra_ring) {
4417
4418 } else if (pring->ringno == psli->fcp_ring) {
4419
4420 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4421 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
4422 return 0;
4423 }
4424 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
4425 return 1;
4426 }
4427 } else if (pring->ringno == psli->next_ring) {
4428
4429 }
4430 return 0;
4431}
4432
4433
4434
4435
4436
4437static int
4438lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4439{
4440 LIST_HEAD(completions);
4441 struct lpfc_sli *psli;
4442 struct lpfc_sli_ring *pring;
4443 struct lpfc_iocbq *iocb, *next_iocb;
4444 uint32_t i;
4445
4446 lpfc_fabric_abort_nport(ndlp);
4447
4448
4449
4450
4451
4452 psli = &phba->sli;
4453 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4454
4455 for (i = 0; i < psli->num_rings; i++) {
4456 pring = &psli->ring[i];
4457
4458 spin_lock_irq(&phba->hbalock);
4459 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
4460 list) {
4461
4462
4463
4464
4465 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
4466 ndlp))) {
4467
4468
4469 list_move_tail(&iocb->list,
4470 &completions);
4471 }
4472 }
4473 spin_unlock_irq(&phba->hbalock);
4474 }
4475 }
4476
4477
4478 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4479 IOERR_SLI_ABORTED);
4480
4481 return 0;
4482}
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492static void
4493lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4494{
4495 struct lpfc_vport *vport = pmb->vport;
4496 struct lpfc_nodelist *ndlp;
4497
4498 ndlp = (struct lpfc_nodelist *)(pmb->context1);
4499 if (!ndlp)
4500 return;
4501 lpfc_issue_els_logo(vport, ndlp, 0);
4502 mempool_free(pmb, phba->mbox_mem_pool);
4503}
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514int
4515lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4516{
4517 struct lpfc_hba *phba = vport->phba;
4518 LPFC_MBOXQ_t *mbox;
4519 int rc, acc_plogi = 1;
4520 uint16_t rpi;
4521
4522 if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4523 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
4524 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
4525 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
4526 "3366 RPI x%x needs to be "
4527 "unregistered nlp_flag x%x "
4528 "did x%x\n",
4529 ndlp->nlp_rpi, ndlp->nlp_flag,
4530 ndlp->nlp_DID);
4531 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4532 if (mbox) {
4533
4534 rpi = ndlp->nlp_rpi;
4535 if (phba->sli_rev == LPFC_SLI_REV4)
4536 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4537
4538 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4539 mbox->vport = vport;
4540 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4541 mbox->context1 = ndlp;
4542 mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4543 } else {
4544 if (phba->sli_rev == LPFC_SLI_REV4 &&
4545 (!(vport->load_flag & FC_UNLOADING)) &&
4546 (bf_get(lpfc_sli_intf_if_type,
4547 &phba->sli4_hba.sli_intf) ==
4548 LPFC_SLI_INTF_IF_TYPE_2) &&
4549 (atomic_read(&ndlp->kref.refcount) > 0)) {
4550 mbox->context1 = lpfc_nlp_get(ndlp);
4551 mbox->mbox_cmpl =
4552 lpfc_sli4_unreg_rpi_cmpl_clr;
4553
4554
4555
4556 acc_plogi = 0;
4557 } else
4558 mbox->mbox_cmpl =
4559 lpfc_sli_def_mbox_cmpl;
4560 }
4561
4562 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4563 if (rc == MBX_NOT_FINISHED) {
4564 mempool_free(mbox, phba->mbox_mem_pool);
4565 acc_plogi = 1;
4566 }
4567 }
4568 lpfc_no_rpi(phba, ndlp);
4569
4570 if (phba->sli_rev != LPFC_SLI_REV4)
4571 ndlp->nlp_rpi = 0;
4572 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
4573 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4574 if (acc_plogi)
4575 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4576 return 1;
4577 }
4578 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4579 return 0;
4580}
4581
4582
4583
4584
4585
4586
4587
4588
4589void
4590lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
4591{
4592 struct lpfc_vport **vports;
4593 struct lpfc_nodelist *ndlp;
4594 struct Scsi_Host *shost;
4595 int i;
4596
4597 vports = lpfc_create_vport_work_array(phba);
4598 if (!vports) {
4599 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
4600 "2884 Vport array allocation failed \n");
4601 return;
4602 }
4603 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4604 shost = lpfc_shost_from_vport(vports[i]);
4605 spin_lock_irq(shost->host_lock);
4606 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4607 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4608
4609 spin_unlock_irq(shost->host_lock);
4610 lpfc_unreg_rpi(vports[i], ndlp);
4611 spin_lock_irq(shost->host_lock);
4612 }
4613 }
4614 spin_unlock_irq(shost->host_lock);
4615 }
4616 lpfc_destroy_vport_work_array(phba, vports);
4617}
4618
4619void
4620lpfc_unreg_all_rpis(struct lpfc_vport *vport)
4621{
4622 struct lpfc_hba *phba = vport->phba;
4623 LPFC_MBOXQ_t *mbox;
4624 int rc;
4625
4626 if (phba->sli_rev == LPFC_SLI_REV4) {
4627 lpfc_sli4_unreg_all_rpis(vport);
4628 return;
4629 }
4630
4631 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4632 if (mbox) {
4633 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
4634 mbox);
4635 mbox->vport = vport;
4636 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4637 mbox->context1 = NULL;
4638 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4639 if (rc != MBX_TIMEOUT)
4640 mempool_free(mbox, phba->mbox_mem_pool);
4641
4642 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
4643 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
4644 "1836 Could not issue "
4645 "unreg_login(all_rpis) status %d\n", rc);
4646 }
4647}
4648
4649void
4650lpfc_unreg_default_rpis(struct lpfc_vport *vport)
4651{
4652 struct lpfc_hba *phba = vport->phba;
4653 LPFC_MBOXQ_t *mbox;
4654 int rc;
4655
4656 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4657 if (mbox) {
4658 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
4659 mbox);
4660 mbox->vport = vport;
4661 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4662 mbox->context1 = NULL;
4663 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4664 if (rc != MBX_TIMEOUT)
4665 mempool_free(mbox, phba->mbox_mem_pool);
4666
4667 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
4668 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
4669 "1815 Could not issue "
4670 "unreg_did (default rpis) status %d\n",
4671 rc);
4672 }
4673}
4674
4675
4676
4677
4678
4679static int
4680lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4681{
4682 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4683 struct lpfc_hba *phba = vport->phba;
4684 LPFC_MBOXQ_t *mb, *nextmb;
4685 struct lpfc_dmabuf *mp;
4686
4687
4688 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4689 "0900 Cleanup node for NPort x%x "
4690 "Data: x%x x%x x%x\n",
4691 ndlp->nlp_DID, ndlp->nlp_flag,
4692 ndlp->nlp_state, ndlp->nlp_rpi);
4693 if (NLP_CHK_FREE_REQ(ndlp)) {
4694 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4695 "0280 lpfc_cleanup_node: ndlp:x%p "
4696 "usgmap:x%x refcnt:%d\n",
4697 (void *)ndlp, ndlp->nlp_usg_map,
4698 atomic_read(&ndlp->kref.refcount));
4699 lpfc_dequeue_node(vport, ndlp);
4700 } else {
4701 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4702 "0281 lpfc_cleanup_node: ndlp:x%p "
4703 "usgmap:x%x refcnt:%d\n",
4704 (void *)ndlp, ndlp->nlp_usg_map,
4705 atomic_read(&ndlp->kref.refcount));
4706 lpfc_disable_node(vport, ndlp);
4707 }
4708
4709
4710
4711
4712
4713 if ((mb = phba->sli.mbox_active)) {
4714 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4715 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4716 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4717 mb->context2 = NULL;
4718 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4719 }
4720 }
4721
4722 spin_lock_irq(&phba->hbalock);
4723
4724 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
4725 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
4726 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
4727 (ndlp != (struct lpfc_nodelist *) mb->context2))
4728 continue;
4729
4730 mb->context2 = NULL;
4731 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4732 }
4733
4734 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
4735 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4736 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4737 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4738 mp = (struct lpfc_dmabuf *) (mb->context1);
4739 if (mp) {
4740 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
4741 kfree(mp);
4742 }
4743 list_del(&mb->list);
4744 mempool_free(mb, phba->mbox_mem_pool);
4745
4746
4747
4748
4749 }
4750 }
4751 spin_unlock_irq(&phba->hbalock);
4752
4753 lpfc_els_abort(phba, ndlp);
4754
4755 spin_lock_irq(shost->host_lock);
4756 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
4757 spin_unlock_irq(shost->host_lock);
4758
4759 ndlp->nlp_last_elscmd = 0;
4760 del_timer_sync(&ndlp->nlp_delayfunc);
4761
4762 list_del_init(&ndlp->els_retry_evt.evt_listp);
4763 list_del_init(&ndlp->dev_loss_evt.evt_listp);
4764 lpfc_cleanup_vports_rrqs(vport, ndlp);
4765 lpfc_unreg_rpi(vport, ndlp);
4766
4767 return 0;
4768}
4769
4770
4771
4772
4773
4774
4775static void
4776lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4777{
4778 struct lpfc_hba *phba = vport->phba;
4779 struct lpfc_rport_data *rdata;
4780 struct fc_rport *rport;
4781 LPFC_MBOXQ_t *mbox;
4782 int rc;
4783
4784 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4785 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
4786 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
4787 !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
4788
4789
4790
4791 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4792 "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n",
4793 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4794 atomic_read(&ndlp->kref.refcount),
4795 ndlp->nlp_usg_map, ndlp);
4796 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
4797 != NULL) {
4798 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
4799 (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
4800 if (rc) {
4801 mempool_free(mbox, phba->mbox_mem_pool);
4802 }
4803 else {
4804 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
4805 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
4806 mbox->vport = vport;
4807 mbox->context2 = ndlp;
4808 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4809 if (rc == MBX_NOT_FINISHED) {
4810 mempool_free(mbox, phba->mbox_mem_pool);
4811 }
4812 }
4813 }
4814 }
4815 lpfc_cleanup_node(vport, ndlp);
4816
4817
4818
4819
4820
4821
4822 if (ndlp->rport) {
4823
4824
4825
4826
4827 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4828 "0940 removed node x%p DID x%x "
4829 " rport not null %p\n",
4830 ndlp, ndlp->nlp_DID, ndlp->rport);
4831 rport = ndlp->rport;
4832 rdata = rport->dd_data;
4833 rdata->pnode = NULL;
4834 ndlp->rport = NULL;
4835 put_device(&rport->dev);
4836 }
4837}
4838
4839static int
4840lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4841 uint32_t did)
4842{
4843 D_ID mydid, ndlpdid, matchdid;
4844
4845 if (did == Bcast_DID)
4846 return 0;
4847
4848
4849 if (ndlp->nlp_DID == did)
4850 return 1;
4851
4852
4853 mydid.un.word = vport->fc_myDID;
4854 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
4855 return 0;
4856 }
4857
4858 matchdid.un.word = did;
4859 ndlpdid.un.word = ndlp->nlp_DID;
4860 if (matchdid.un.b.id == ndlpdid.un.b.id) {
4861 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
4862 (mydid.un.b.area == matchdid.un.b.area)) {
4863
4864
4865
4866
4867
4868
4869
4870
4871 if ((ndlpdid.un.b.domain == 0) &&
4872 (ndlpdid.un.b.area == 0)) {
4873 if (ndlpdid.un.b.id &&
4874 vport->phba->fc_topology ==
4875 LPFC_TOPOLOGY_LOOP)
4876 return 1;
4877 }
4878 return 0;
4879 }
4880
4881 matchdid.un.word = ndlp->nlp_DID;
4882 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
4883 (mydid.un.b.area == ndlpdid.un.b.area)) {
4884 if ((matchdid.un.b.domain == 0) &&
4885 (matchdid.un.b.area == 0)) {
4886 if (matchdid.un.b.id)
4887 return 1;
4888 }
4889 }
4890 }
4891 return 0;
4892}
4893
4894
4895static struct lpfc_nodelist *
4896__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
4897{
4898 struct lpfc_nodelist *ndlp;
4899 uint32_t data1;
4900
4901 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4902 if (lpfc_matchdid(vport, ndlp, did)) {
4903 data1 = (((uint32_t) ndlp->nlp_state << 24) |
4904 ((uint32_t) ndlp->nlp_xri << 16) |
4905 ((uint32_t) ndlp->nlp_type << 8) |
4906 ((uint32_t) ndlp->nlp_rpi & 0xff));
4907 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4908 "0929 FIND node DID "
4909 "Data: x%p x%x x%x x%x %p\n",
4910 ndlp, ndlp->nlp_DID,
4911 ndlp->nlp_flag, data1,
4912 ndlp->active_rrqs_xri_bitmap);
4913 return ndlp;
4914 }
4915 }
4916
4917
4918 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4919 "0932 FIND node did x%x NOT FOUND.\n", did);
4920 return NULL;
4921}
4922
4923struct lpfc_nodelist *
4924lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
4925{
4926 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4927 struct lpfc_nodelist *ndlp;
4928 unsigned long iflags;
4929
4930 spin_lock_irqsave(shost->host_lock, iflags);
4931 ndlp = __lpfc_findnode_did(vport, did);
4932 spin_unlock_irqrestore(shost->host_lock, iflags);
4933 return ndlp;
4934}
4935
4936struct lpfc_nodelist *
4937lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
4938{
4939 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4940 struct lpfc_nodelist *ndlp;
4941
4942 ndlp = lpfc_findnode_did(vport, did);
4943 if (!ndlp) {
4944 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
4945 lpfc_rscn_payload_check(vport, did) == 0)
4946 return NULL;
4947 ndlp = (struct lpfc_nodelist *)
4948 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
4949 if (!ndlp)
4950 return NULL;
4951 lpfc_nlp_init(vport, ndlp, did);
4952 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4953 spin_lock_irq(shost->host_lock);
4954 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4955 spin_unlock_irq(shost->host_lock);
4956 return ndlp;
4957 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4958 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
4959 if (!ndlp)
4960 return NULL;
4961 spin_lock_irq(shost->host_lock);
4962 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4963 spin_unlock_irq(shost->host_lock);
4964 return ndlp;
4965 }
4966
4967 if ((vport->fc_flag & FC_RSCN_MODE) &&
4968 !(vport->fc_flag & FC_NDISC_ACTIVE)) {
4969 if (lpfc_rscn_payload_check(vport, did)) {
4970
4971
4972
4973 if (ndlp->nlp_flag & NLP_RCV_PLOGI)
4974 return NULL;
4975
4976
4977
4978
4979 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4980 spin_lock_irq(shost->host_lock);
4981 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4982 spin_unlock_irq(shost->host_lock);
4983 } else
4984 ndlp = NULL;
4985 } else {
4986
4987
4988
4989
4990 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
4991 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
4992 ndlp->nlp_flag & NLP_RCV_PLOGI)
4993 return NULL;
4994 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4995 spin_lock_irq(shost->host_lock);
4996 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4997 spin_unlock_irq(shost->host_lock);
4998 }
4999 return ndlp;
5000}
5001
5002
5003void
5004lpfc_disc_list_loopmap(struct lpfc_vport *vport)
5005{
5006 struct lpfc_hba *phba = vport->phba;
5007 int j;
5008 uint32_t alpa, index;
5009
5010 if (!lpfc_is_link_up(phba))
5011 return;
5012
5013 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5014 return;
5015
5016
5017 if (phba->alpa_map[0]) {
5018 for (j = 1; j <= phba->alpa_map[0]; j++) {
5019 alpa = phba->alpa_map[j];
5020 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
5021 continue;
5022 lpfc_setup_disc_node(vport, alpa);
5023 }
5024 } else {
5025
5026 for (j = 0; j < FC_MAXLOOP; j++) {
5027
5028
5029
5030 if (vport->cfg_scan_down)
5031 index = j;
5032 else
5033 index = FC_MAXLOOP - j - 1;
5034 alpa = lpfcAlpaArray[index];
5035 if ((vport->fc_myDID & 0xff) == alpa)
5036 continue;
5037 lpfc_setup_disc_node(vport, alpa);
5038 }
5039 }
5040 return;
5041}
5042
5043void
5044lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
5045{
5046 LPFC_MBOXQ_t *mbox;
5047 struct lpfc_sli *psli = &phba->sli;
5048 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
5049 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
5050 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
5051 int rc;
5052
5053
5054
5055
5056
5057 if ((phba->link_state >= LPFC_CLEAR_LA) ||
5058 (vport->port_type != LPFC_PHYSICAL_PORT) ||
5059 (phba->sli_rev == LPFC_SLI_REV4))
5060 return;
5061
5062
5063 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
5064 phba->link_state = LPFC_CLEAR_LA;
5065 lpfc_clear_la(phba, mbox);
5066 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
5067 mbox->vport = vport;
5068 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5069 if (rc == MBX_NOT_FINISHED) {
5070 mempool_free(mbox, phba->mbox_mem_pool);
5071 lpfc_disc_flush_list(vport);
5072 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5073 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5074 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5075 phba->link_state = LPFC_HBA_ERROR;
5076 }
5077 }
5078}
5079
5080
5081void
5082lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5083{
5084 LPFC_MBOXQ_t *regvpimbox;
5085
5086 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5087 if (regvpimbox) {
5088 lpfc_reg_vpi(vport, regvpimbox);
5089 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
5090 regvpimbox->vport = vport;
5091 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
5092 == MBX_NOT_FINISHED) {
5093 mempool_free(regvpimbox, phba->mbox_mem_pool);
5094 }
5095 }
5096}
5097
5098
5099void
5100lpfc_disc_start(struct lpfc_vport *vport)
5101{
5102 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5103 struct lpfc_hba *phba = vport->phba;
5104 uint32_t num_sent;
5105 uint32_t clear_la_pending;
5106
5107 if (!lpfc_is_link_up(phba)) {
5108 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5109 "3315 Link is not up %x\n",
5110 phba->link_state);
5111 return;
5112 }
5113
5114 if (phba->link_state == LPFC_CLEAR_LA)
5115 clear_la_pending = 1;
5116 else
5117 clear_la_pending = 0;
5118
5119 if (vport->port_state < LPFC_VPORT_READY)
5120 vport->port_state = LPFC_DISC_AUTH;
5121
5122 lpfc_set_disctmo(vport);
5123
5124 vport->fc_prevDID = vport->fc_myDID;
5125 vport->num_disc_nodes = 0;
5126
5127
5128 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5129 "0202 Start Discovery hba state x%x "
5130 "Data: x%x x%x x%x\n",
5131 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
5132 vport->fc_adisc_cnt);
5133
5134
5135 num_sent = lpfc_els_disc_adisc(vport);
5136
5137 if (num_sent)
5138 return;
5139
5140
5141 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5142 !(vport->fc_flag & FC_PT2PT) &&
5143 !(vport->fc_flag & FC_RSCN_MODE) &&
5144 (phba->sli_rev < LPFC_SLI_REV4)) {
5145 lpfc_issue_clear_la(phba, vport);
5146 lpfc_issue_reg_vpi(phba, vport);
5147 return;
5148 }
5149
5150
5151
5152
5153
5154 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
5155
5156 lpfc_issue_clear_la(phba, vport);
5157
5158 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
5159 vport->num_disc_nodes = 0;
5160
5161 if (vport->fc_npr_cnt)
5162 lpfc_els_disc_plogi(vport);
5163
5164 if (!vport->num_disc_nodes) {
5165 spin_lock_irq(shost->host_lock);
5166 vport->fc_flag &= ~FC_NDISC_ACTIVE;
5167 spin_unlock_irq(shost->host_lock);
5168 lpfc_can_disctmo(vport);
5169 }
5170 }
5171 vport->port_state = LPFC_VPORT_READY;
5172 } else {
5173
5174 num_sent = lpfc_els_disc_plogi(vport);
5175
5176 if (num_sent)
5177 return;
5178
5179 if (vport->fc_flag & FC_RSCN_MODE) {
5180
5181
5182
5183 if ((vport->fc_rscn_id_cnt == 0) &&
5184 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
5185 spin_lock_irq(shost->host_lock);
5186 vport->fc_flag &= ~FC_RSCN_MODE;
5187 spin_unlock_irq(shost->host_lock);
5188 lpfc_can_disctmo(vport);
5189 } else
5190 lpfc_els_handle_rscn(vport);
5191 }
5192 }
5193 return;
5194}
5195
5196
5197
5198
5199
5200static void
5201lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5202{
5203 LIST_HEAD(completions);
5204 struct lpfc_sli *psli;
5205 IOCB_t *icmd;
5206 struct lpfc_iocbq *iocb, *next_iocb;
5207 struct lpfc_sli_ring *pring;
5208
5209 psli = &phba->sli;
5210 pring = &psli->ring[LPFC_ELS_RING];
5211
5212
5213
5214
5215 spin_lock_irq(&phba->hbalock);
5216 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5217 if (iocb->context1 != ndlp) {
5218 continue;
5219 }
5220 icmd = &iocb->iocb;
5221 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
5222 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5223
5224 list_move_tail(&iocb->list, &completions);
5225 }
5226 }
5227
5228
5229 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
5230 if (iocb->context1 != ndlp) {
5231 continue;
5232 }
5233 icmd = &iocb->iocb;
5234 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
5235 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
5236 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
5237 }
5238 }
5239 spin_unlock_irq(&phba->hbalock);
5240
5241
5242 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5243 IOERR_SLI_ABORTED);
5244}
5245
5246static void
5247lpfc_disc_flush_list(struct lpfc_vport *vport)
5248{
5249 struct lpfc_nodelist *ndlp, *next_ndlp;
5250 struct lpfc_hba *phba = vport->phba;
5251
5252 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
5253 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5254 nlp_listp) {
5255 if (!NLP_CHK_NODE_ACT(ndlp))
5256 continue;
5257 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5258 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
5259 lpfc_free_tx(phba, ndlp);
5260 }
5261 }
5262 }
5263}
5264
5265void
5266lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
5267{
5268 lpfc_els_flush_rscn(vport);
5269 lpfc_els_flush_cmd(vport);
5270 lpfc_disc_flush_list(vport);
5271}
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288void
5289lpfc_disc_timeout(unsigned long ptr)
5290{
5291 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
5292 struct lpfc_hba *phba = vport->phba;
5293 uint32_t tmo_posted;
5294 unsigned long flags = 0;
5295
5296 if (unlikely(!phba))
5297 return;
5298
5299 spin_lock_irqsave(&vport->work_port_lock, flags);
5300 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
5301 if (!tmo_posted)
5302 vport->work_port_events |= WORKER_DISC_TMO;
5303 spin_unlock_irqrestore(&vport->work_port_lock, flags);
5304
5305 if (!tmo_posted)
5306 lpfc_worker_wake_up(phba);
5307 return;
5308}
5309
5310static void
5311lpfc_disc_timeout_handler(struct lpfc_vport *vport)
5312{
5313 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5314 struct lpfc_hba *phba = vport->phba;
5315 struct lpfc_sli *psli = &phba->sli;
5316 struct lpfc_nodelist *ndlp, *next_ndlp;
5317 LPFC_MBOXQ_t *initlinkmbox;
5318 int rc, clrlaerr = 0;
5319
5320 if (!(vport->fc_flag & FC_DISC_TMO))
5321 return;
5322
5323 spin_lock_irq(shost->host_lock);
5324 vport->fc_flag &= ~FC_DISC_TMO;
5325 spin_unlock_irq(shost->host_lock);
5326
5327 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5328 "disc timeout: state:x%x rtry:x%x flg:x%x",
5329 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5330
5331 switch (vport->port_state) {
5332
5333 case LPFC_LOCAL_CFG_LINK:
5334
5335
5336
5337
5338 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
5339 "0221 FAN timeout\n");
5340
5341 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5342 nlp_listp) {
5343 if (!NLP_CHK_NODE_ACT(ndlp))
5344 continue;
5345 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
5346 continue;
5347 if (ndlp->nlp_type & NLP_FABRIC) {
5348
5349 lpfc_drop_node(vport, ndlp);
5350
5351 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
5352
5353
5354
5355 lpfc_unreg_rpi(vport, ndlp);
5356 }
5357 }
5358 if (vport->port_state != LPFC_FLOGI) {
5359 if (phba->sli_rev <= LPFC_SLI_REV3)
5360 lpfc_initial_flogi(vport);
5361 else
5362 lpfc_issue_init_vfi(vport);
5363 return;
5364 }
5365 break;
5366
5367 case LPFC_FDISC:
5368 case LPFC_FLOGI:
5369
5370
5371 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5372 "0222 Initial %s timeout\n",
5373 vport->vpi ? "FDISC" : "FLOGI");
5374
5375
5376
5377
5378
5379
5380 lpfc_disc_list_loopmap(vport);
5381
5382
5383 lpfc_disc_start(vport);
5384 break;
5385
5386 case LPFC_FABRIC_CFG_LINK:
5387
5388
5389 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5390 "0223 Timeout while waiting for "
5391 "NameServer login\n");
5392
5393 ndlp = lpfc_findnode_did(vport, NameServer_DID);
5394 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5395 lpfc_els_abort(phba, ndlp);
5396
5397
5398 goto restart_disc;
5399
5400 case LPFC_NS_QRY:
5401
5402 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5403 "0224 NameServer Query timeout "
5404 "Data: x%x x%x\n",
5405 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5406
5407 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
5408
5409 vport->fc_ns_retry++;
5410 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
5411 vport->fc_ns_retry, 0);
5412 if (rc == 0)
5413 break;
5414 }
5415 vport->fc_ns_retry = 0;
5416
5417restart_disc:
5418
5419
5420
5421
5422
5423 if (phba->sli_rev < LPFC_SLI_REV4) {
5424 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5425 lpfc_issue_reg_vpi(phba, vport);
5426 else {
5427 lpfc_issue_clear_la(phba, vport);
5428 vport->port_state = LPFC_VPORT_READY;
5429 }
5430 }
5431
5432
5433 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5434 if (!initlinkmbox) {
5435 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5436 "0206 Device Discovery "
5437 "completion error\n");
5438 phba->link_state = LPFC_HBA_ERROR;
5439 break;
5440 }
5441
5442 lpfc_linkdown(phba);
5443 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
5444 phba->cfg_link_speed);
5445 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
5446 initlinkmbox->vport = vport;
5447 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5448 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
5449 lpfc_set_loopback_flag(phba);
5450 if (rc == MBX_NOT_FINISHED)
5451 mempool_free(initlinkmbox, phba->mbox_mem_pool);
5452
5453 break;
5454
5455 case LPFC_DISC_AUTH:
5456
5457 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5458 "0227 Node Authentication timeout\n");
5459 lpfc_disc_flush_list(vport);
5460
5461
5462
5463
5464
5465 if (phba->sli_rev < LPFC_SLI_REV4) {
5466 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5467 lpfc_issue_reg_vpi(phba, vport);
5468 else {
5469 lpfc_issue_clear_la(phba, vport);
5470 vport->port_state = LPFC_VPORT_READY;
5471 }
5472 }
5473 break;
5474
5475 case LPFC_VPORT_READY:
5476 if (vport->fc_flag & FC_RSCN_MODE) {
5477 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5478 "0231 RSCN timeout Data: x%x "
5479 "x%x\n",
5480 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5481
5482
5483 lpfc_els_flush_cmd(vport);
5484
5485 lpfc_els_flush_rscn(vport);
5486 lpfc_disc_flush_list(vport);
5487 }
5488 break;
5489
5490 default:
5491 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5492 "0273 Unexpected discovery timeout, "
5493 "vport State x%x\n", vport->port_state);
5494 break;
5495 }
5496
5497 switch (phba->link_state) {
5498 case LPFC_CLEAR_LA:
5499
5500 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5501 "0228 CLEAR LA timeout\n");
5502 clrlaerr = 1;
5503 break;
5504
5505 case LPFC_LINK_UP:
5506 lpfc_issue_clear_la(phba, vport);
5507
5508 case LPFC_LINK_UNKNOWN:
5509 case LPFC_WARM_START:
5510 case LPFC_INIT_START:
5511 case LPFC_INIT_MBX_CMDS:
5512 case LPFC_LINK_DOWN:
5513 case LPFC_HBA_ERROR:
5514 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5515 "0230 Unexpected timeout, hba link "
5516 "state x%x\n", phba->link_state);
5517 clrlaerr = 1;
5518 break;
5519
5520 case LPFC_HBA_READY:
5521 break;
5522 }
5523
5524 if (clrlaerr) {
5525 lpfc_disc_flush_list(vport);
5526 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
5527 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
5528 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
5529 vport->port_state = LPFC_VPORT_READY;
5530 }
5531
5532 return;
5533}
5534
5535
5536
5537
5538
5539
5540
5541void
5542lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5543{
5544 MAILBOX_t *mb = &pmb->u.mb;
5545 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
5546 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5547 struct lpfc_vport *vport = pmb->vport;
5548
5549 pmb->context1 = NULL;
5550 pmb->context2 = NULL;
5551
5552 if (phba->sli_rev < LPFC_SLI_REV4)
5553 ndlp->nlp_rpi = mb->un.varWords[0];
5554 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
5555 ndlp->nlp_type |= NLP_FABRIC;
5556 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
5557 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5558 "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n",
5559 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5560 atomic_read(&ndlp->kref.refcount),
5561 ndlp->nlp_usg_map, ndlp);
5562
5563
5564
5565
5566
5567
5568 if (vport->port_type == LPFC_PHYSICAL_PORT)
5569 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
5570 else
5571 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
5572
5573
5574
5575
5576
5577 lpfc_nlp_put(ndlp);
5578 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5579 kfree(mp);
5580 mempool_free(pmb, phba->mbox_mem_pool);
5581
5582 return;
5583}
5584
5585static int
5586lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
5587{
5588 uint16_t *rpi = param;
5589
5590
5591 if (!NLP_CHK_NODE_ACT(ndlp))
5592 return 0;
5593
5594 return ndlp->nlp_rpi == *rpi;
5595}
5596
5597static int
5598lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
5599{
5600 return memcmp(&ndlp->nlp_portname, param,
5601 sizeof(ndlp->nlp_portname)) == 0;
5602}
5603
5604static struct lpfc_nodelist *
5605__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
5606{
5607 struct lpfc_nodelist *ndlp;
5608
5609 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5610 if (filter(ndlp, param)) {
5611 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5612 "3185 FIND node filter %p DID "
5613 "Data: x%p x%x x%x\n",
5614 filter, ndlp, ndlp->nlp_DID,
5615 ndlp->nlp_flag);
5616 return ndlp;
5617 }
5618 }
5619 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5620 "3186 FIND node filter %p NOT FOUND.\n", filter);
5621 return NULL;
5622}
5623
5624
5625
5626
5627
5628struct lpfc_nodelist *
5629__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
5630{
5631 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
5632}
5633
5634
5635
5636
5637
5638struct lpfc_nodelist *
5639lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
5640{
5641 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5642 struct lpfc_nodelist *ndlp;
5643
5644 spin_lock_irq(shost->host_lock);
5645 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
5646 spin_unlock_irq(shost->host_lock);
5647 return ndlp;
5648}
5649
5650
5651
5652
5653
5654
5655struct lpfc_nodelist *
5656lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
5657{
5658 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5659 struct lpfc_nodelist *ndlp;
5660
5661 spin_lock_irq(shost->host_lock);
5662 ndlp = __lpfc_findnode_rpi(vport, rpi);
5663 spin_unlock_irq(shost->host_lock);
5664 return ndlp;
5665}
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680struct lpfc_vport *
5681lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5682{
5683 struct lpfc_vport *vport;
5684 unsigned long flags;
5685 int i = 0;
5686
5687
5688 if (vpi > 0) {
5689
5690
5691
5692
5693 for (i = 0; i < phba->max_vpi; i++) {
5694 if (vpi == phba->vpi_ids[i])
5695 break;
5696 }
5697
5698 if (i >= phba->max_vpi) {
5699 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
5700 "2936 Could not find Vport mapped "
5701 "to vpi %d\n", vpi);
5702 return NULL;
5703 }
5704 }
5705
5706 spin_lock_irqsave(&phba->hbalock, flags);
5707 list_for_each_entry(vport, &phba->port_list, listentry) {
5708 if (vport->vpi == i) {
5709 spin_unlock_irqrestore(&phba->hbalock, flags);
5710 return vport;
5711 }
5712 }
5713 spin_unlock_irqrestore(&phba->hbalock, flags);
5714 return NULL;
5715}
5716
5717void
5718lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5719 uint32_t did)
5720{
5721 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
5722
5723 lpfc_initialize_node(vport, ndlp, did);
5724 INIT_LIST_HEAD(&ndlp->nlp_listp);
5725 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
5726 ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
5727 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5728 "0007 rpi:%x DID:%x flg:%x refcnt:%d "
5729 "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID,
5730 ndlp->nlp_flag,
5731 atomic_read(&ndlp->kref.refcount),
5732 ndlp->nlp_usg_map, ndlp);
5733
5734 ndlp->active_rrqs_xri_bitmap =
5735 mempool_alloc(vport->phba->active_rrq_pool,
5736 GFP_KERNEL);
5737 if (ndlp->active_rrqs_xri_bitmap)
5738 memset(ndlp->active_rrqs_xri_bitmap, 0,
5739 ndlp->phba->cfg_rrq_xri_bitmap_sz);
5740 }
5741
5742
5743
5744 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
5745 "node init: did:x%x",
5746 ndlp->nlp_DID, 0, 0);
5747
5748 return;
5749}
5750
5751
5752
5753
5754static void
5755lpfc_nlp_release(struct kref *kref)
5756{
5757 struct lpfc_hba *phba;
5758 unsigned long flags;
5759 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
5760 kref);
5761
5762 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
5763 "node release: did:x%x flg:x%x type:x%x",
5764 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
5765
5766 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
5767 "0279 lpfc_nlp_release: ndlp:x%p did %x "
5768 "usgmap:x%x refcnt:%d rpi:%x\n",
5769 (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
5770 atomic_read(&ndlp->kref.refcount), ndlp->nlp_rpi);
5771
5772
5773 lpfc_nlp_remove(ndlp->vport, ndlp);
5774
5775
5776 phba = ndlp->phba;
5777 spin_lock_irqsave(&phba->ndlp_lock, flags);
5778 NLP_CLR_NODE_ACT(ndlp);
5779 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5780 if (phba->sli_rev == LPFC_SLI_REV4)
5781 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
5782
5783
5784 if (NLP_CHK_FREE_REQ(ndlp)) {
5785 kfree(ndlp->lat_data);
5786 if (phba->sli_rev == LPFC_SLI_REV4)
5787 mempool_free(ndlp->active_rrqs_xri_bitmap,
5788 ndlp->phba->active_rrq_pool);
5789 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
5790 }
5791}
5792
5793
5794
5795
5796
5797struct lpfc_nodelist *
5798lpfc_nlp_get(struct lpfc_nodelist *ndlp)
5799{
5800 struct lpfc_hba *phba;
5801 unsigned long flags;
5802
5803 if (ndlp) {
5804 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
5805 "node get: did:x%x flg:x%x refcnt:x%x",
5806 ndlp->nlp_DID, ndlp->nlp_flag,
5807 atomic_read(&ndlp->kref.refcount));
5808
5809
5810
5811
5812 phba = ndlp->phba;
5813 spin_lock_irqsave(&phba->ndlp_lock, flags);
5814 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
5815 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5816 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
5817 "0276 lpfc_nlp_get: ndlp:x%p "
5818 "usgmap:x%x refcnt:%d\n",
5819 (void *)ndlp, ndlp->nlp_usg_map,
5820 atomic_read(&ndlp->kref.refcount));
5821 return NULL;
5822 } else
5823 kref_get(&ndlp->kref);
5824 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5825 }
5826 return ndlp;
5827}
5828
5829
5830
5831
5832
5833
5834
5835int
5836lpfc_nlp_put(struct lpfc_nodelist *ndlp)
5837{
5838 struct lpfc_hba *phba;
5839 unsigned long flags;
5840
5841 if (!ndlp)
5842 return 1;
5843
5844 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
5845 "node put: did:x%x flg:x%x refcnt:x%x",
5846 ndlp->nlp_DID, ndlp->nlp_flag,
5847 atomic_read(&ndlp->kref.refcount));
5848 phba = ndlp->phba;
5849 spin_lock_irqsave(&phba->ndlp_lock, flags);
5850
5851
5852
5853
5854 if (NLP_CHK_FREE_ACK(ndlp)) {
5855 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5856 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
5857 "0274 lpfc_nlp_put: ndlp:x%p "
5858 "usgmap:x%x refcnt:%d\n",
5859 (void *)ndlp, ndlp->nlp_usg_map,
5860 atomic_read(&ndlp->kref.refcount));
5861 return 1;
5862 }
5863
5864
5865
5866
5867 if (NLP_CHK_IACT_REQ(ndlp)) {
5868 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5869 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
5870 "0275 lpfc_nlp_put: ndlp:x%p "
5871 "usgmap:x%x refcnt:%d\n",
5872 (void *)ndlp, ndlp->nlp_usg_map,
5873 atomic_read(&ndlp->kref.refcount));
5874 return 1;
5875 }
5876
5877
5878
5879
5880
5881 if (atomic_read(&ndlp->kref.refcount) == 1) {
5882
5883 NLP_SET_IACT_REQ(ndlp);
5884
5885 if (NLP_CHK_FREE_REQ(ndlp))
5886 NLP_SET_FREE_ACK(ndlp);
5887 }
5888 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5889
5890
5891
5892
5893
5894
5895 return kref_put(&ndlp->kref, lpfc_nlp_release);
5896}
5897
5898
5899
5900
5901
5902
5903int
5904lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
5905{
5906 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
5907 "node not used: did:x%x flg:x%x refcnt:x%x",
5908 ndlp->nlp_DID, ndlp->nlp_flag,
5909 atomic_read(&ndlp->kref.refcount));
5910 if (atomic_read(&ndlp->kref.refcount) == 1)
5911 if (lpfc_nlp_put(ndlp))
5912 return 1;
5913 return 0;
5914}
5915
5916
5917
5918
5919
5920
5921
5922
5923
5924
5925
5926static int
5927lpfc_fcf_inuse(struct lpfc_hba *phba)
5928{
5929 struct lpfc_vport **vports;
5930 int i, ret = 0;
5931 struct lpfc_nodelist *ndlp;
5932 struct Scsi_Host *shost;
5933
5934 vports = lpfc_create_vport_work_array(phba);
5935
5936
5937 if (!vports)
5938 return 1;
5939
5940 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5941 shost = lpfc_shost_from_vport(vports[i]);
5942 spin_lock_irq(shost->host_lock);
5943
5944
5945
5946
5947
5948
5949 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
5950 spin_unlock_irq(shost->host_lock);
5951 ret = 1;
5952 goto out;
5953 }
5954 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5955 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
5956 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
5957 ret = 1;
5958 spin_unlock_irq(shost->host_lock);
5959 goto out;
5960 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5961 ret = 1;
5962 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
5963 "2624 RPI %x DID %x flag %x "
5964 "still logged in\n",
5965 ndlp->nlp_rpi, ndlp->nlp_DID,
5966 ndlp->nlp_flag);
5967 }
5968 }
5969 spin_unlock_irq(shost->host_lock);
5970 }
5971out:
5972 lpfc_destroy_vport_work_array(phba, vports);
5973 return ret;
5974}
5975
5976
5977
5978
5979
5980
5981
5982
5983void
5984lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5985{
5986 struct lpfc_vport *vport = mboxq->vport;
5987 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5988
5989 if (mboxq->u.mb.mbxStatus) {
5990 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
5991 "2555 UNREG_VFI mbxStatus error x%x "
5992 "HBA state x%x\n",
5993 mboxq->u.mb.mbxStatus, vport->port_state);
5994 }
5995 spin_lock_irq(shost->host_lock);
5996 phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
5997 spin_unlock_irq(shost->host_lock);
5998 mempool_free(mboxq, phba->mbox_mem_pool);
5999 return;
6000}
6001
6002
6003
6004
6005
6006
6007
6008
6009static void
6010lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6011{
6012 struct lpfc_vport *vport = mboxq->vport;
6013
6014 if (mboxq->u.mb.mbxStatus) {
6015 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6016 "2550 UNREG_FCFI mbxStatus error x%x "
6017 "HBA state x%x\n",
6018 mboxq->u.mb.mbxStatus, vport->port_state);
6019 }
6020 mempool_free(mboxq, phba->mbox_mem_pool);
6021 return;
6022}
6023
6024
6025
6026
6027
6028
6029
6030
6031
6032int
6033lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
6034{
6035 struct lpfc_vport **vports;
6036 struct lpfc_nodelist *ndlp;
6037 struct Scsi_Host *shost;
6038 int i = 0, rc;
6039
6040
6041 if (lpfc_fcf_inuse(phba))
6042 lpfc_unreg_hba_rpis(phba);
6043
6044
6045 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
6046
6047
6048 vports = lpfc_create_vport_work_array(phba);
6049 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
6050 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6051
6052 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6053 if (ndlp)
6054 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6055 lpfc_cleanup_pending_mbox(vports[i]);
6056 if (phba->sli_rev == LPFC_SLI_REV4)
6057 lpfc_sli4_unreg_all_rpis(vports[i]);
6058 lpfc_mbx_unreg_vpi(vports[i]);
6059 shost = lpfc_shost_from_vport(vports[i]);
6060 spin_lock_irq(shost->host_lock);
6061 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6062 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
6063 spin_unlock_irq(shost->host_lock);
6064 }
6065 lpfc_destroy_vport_work_array(phba, vports);
6066 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
6067 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6068 if (ndlp)
6069 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
6070 lpfc_cleanup_pending_mbox(phba->pport);
6071 if (phba->sli_rev == LPFC_SLI_REV4)
6072 lpfc_sli4_unreg_all_rpis(phba->pport);
6073 lpfc_mbx_unreg_vpi(phba->pport);
6074 shost = lpfc_shost_from_vport(phba->pport);
6075 spin_lock_irq(shost->host_lock);
6076 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6077 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
6078 spin_unlock_irq(shost->host_lock);
6079 }
6080
6081
6082 lpfc_els_flush_all_cmd(phba);
6083
6084
6085 rc = lpfc_issue_unreg_vfi(phba->pport);
6086 return rc;
6087}
6088
6089
6090
6091
6092
6093
6094
6095
6096
6097
6098
6099int
6100lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
6101{
6102 LPFC_MBOXQ_t *mbox;
6103 int rc;
6104
6105 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6106 if (!mbox) {
6107 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6108 "2551 UNREG_FCFI mbox allocation failed"
6109 "HBA state x%x\n", phba->pport->port_state);
6110 return -ENOMEM;
6111 }
6112 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
6113 mbox->vport = phba->pport;
6114 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
6115 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6116
6117 if (rc == MBX_NOT_FINISHED) {
6118 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6119 "2552 Unregister FCFI command failed rc x%x "
6120 "HBA state x%x\n",
6121 rc, phba->pport->port_state);
6122 return -EINVAL;
6123 }
6124 return 0;
6125}
6126
6127
6128
6129
6130
6131
6132
6133
6134void
6135lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
6136{
6137 int rc;
6138
6139
6140 rc = lpfc_unregister_fcf_prep(phba);
6141 if (rc) {
6142 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
6143 "2748 Failed to prepare for unregistering "
6144 "HBA's FCF record: rc=%d\n", rc);
6145 return;
6146 }
6147
6148
6149 rc = lpfc_sli4_unregister_fcf(phba);
6150 if (rc)
6151 return;
6152
6153 phba->fcf.fcf_flag = 0;
6154 phba->fcf.current_rec.flag = 0;
6155
6156
6157
6158
6159
6160 if ((phba->pport->load_flag & FC_UNLOADING) ||
6161 (phba->link_state < LPFC_LINK_UP))
6162 return;
6163
6164
6165 spin_lock_irq(&phba->hbalock);
6166 phba->fcf.fcf_flag |= FCF_INIT_DISC;
6167 spin_unlock_irq(&phba->hbalock);
6168
6169
6170 lpfc_sli4_clear_fcf_rr_bmask(phba);
6171
6172 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6173
6174 if (rc) {
6175 spin_lock_irq(&phba->hbalock);
6176 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
6177 spin_unlock_irq(&phba->hbalock);
6178 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
6179 "2553 lpfc_unregister_unused_fcf failed "
6180 "to read FCF record HBA state x%x\n",
6181 phba->pport->port_state);
6182 }
6183}
6184
6185
6186
6187
6188
6189
6190
6191
6192void
6193lpfc_unregister_fcf(struct lpfc_hba *phba)
6194{
6195 int rc;
6196
6197
6198 rc = lpfc_unregister_fcf_prep(phba);
6199 if (rc) {
6200 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
6201 "2749 Failed to prepare for unregistering "
6202 "HBA's FCF record: rc=%d\n", rc);
6203 return;
6204 }
6205
6206
6207 rc = lpfc_sli4_unregister_fcf(phba);
6208 if (rc)
6209 return;
6210
6211 spin_lock_irq(&phba->hbalock);
6212 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
6213 spin_unlock_irq(&phba->hbalock);
6214}
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224void
6225lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
6226{
6227
6228
6229
6230
6231
6232 spin_lock_irq(&phba->hbalock);
6233 if (!(phba->hba_flag & HBA_FCOE_MODE) ||
6234 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
6235 !(phba->hba_flag & HBA_FIP_SUPPORT) ||
6236 (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
6237 (phba->pport->port_state == LPFC_FLOGI)) {
6238 spin_unlock_irq(&phba->hbalock);
6239 return;
6240 }
6241 spin_unlock_irq(&phba->hbalock);
6242
6243 if (lpfc_fcf_inuse(phba))
6244 return;
6245
6246 lpfc_unregister_fcf_rescan(phba);
6247}
6248
6249
6250
6251
6252
6253
6254
6255
6256
6257static void
6258lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
6259 uint8_t *buff)
6260{
6261 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6262 struct lpfc_fcf_conn_hdr *conn_hdr;
6263 struct lpfc_fcf_conn_rec *conn_rec;
6264 uint32_t record_count;
6265 int i;
6266
6267
6268 list_for_each_entry_safe(conn_entry, next_conn_entry,
6269 &phba->fcf_conn_rec_list, list) {
6270 list_del_init(&conn_entry->list);
6271 kfree(conn_entry);
6272 }
6273
6274 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
6275 record_count = conn_hdr->length * sizeof(uint32_t)/
6276 sizeof(struct lpfc_fcf_conn_rec);
6277
6278 conn_rec = (struct lpfc_fcf_conn_rec *)
6279 (buff + sizeof(struct lpfc_fcf_conn_hdr));
6280
6281 for (i = 0; i < record_count; i++) {
6282 if (!(conn_rec[i].flags & FCFCNCT_VALID))
6283 continue;
6284 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
6285 GFP_KERNEL);
6286 if (!conn_entry) {
6287 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6288 "2566 Failed to allocate connection"
6289 " table entry\n");
6290 return;
6291 }
6292
6293 memcpy(&conn_entry->conn_rec, &conn_rec[i],
6294 sizeof(struct lpfc_fcf_conn_rec));
6295 list_add_tail(&conn_entry->list,
6296 &phba->fcf_conn_rec_list);
6297 }
6298
6299 if (!list_empty(&phba->fcf_conn_rec_list)) {
6300 i = 0;
6301 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
6302 list) {
6303 conn_rec = &conn_entry->conn_rec;
6304 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6305 "3345 FCF connection list rec[%02d]: "
6306 "flags:x%04x, vtag:x%04x, "
6307 "fabric_name:x%02x:%02x:%02x:%02x:"
6308 "%02x:%02x:%02x:%02x, "
6309 "switch_name:x%02x:%02x:%02x:%02x:"
6310 "%02x:%02x:%02x:%02x\n", i++,
6311 conn_rec->flags, conn_rec->vlan_tag,
6312 conn_rec->fabric_name[0],
6313 conn_rec->fabric_name[1],
6314 conn_rec->fabric_name[2],
6315 conn_rec->fabric_name[3],
6316 conn_rec->fabric_name[4],
6317 conn_rec->fabric_name[5],
6318 conn_rec->fabric_name[6],
6319 conn_rec->fabric_name[7],
6320 conn_rec->switch_name[0],
6321 conn_rec->switch_name[1],
6322 conn_rec->switch_name[2],
6323 conn_rec->switch_name[3],
6324 conn_rec->switch_name[4],
6325 conn_rec->switch_name[5],
6326 conn_rec->switch_name[6],
6327 conn_rec->switch_name[7]);
6328 }
6329 }
6330}
6331
6332
6333
6334
6335
6336
6337
6338
6339
6340static void
6341lpfc_read_fcoe_param(struct lpfc_hba *phba,
6342 uint8_t *buff)
6343{
6344 struct lpfc_fip_param_hdr *fcoe_param_hdr;
6345 struct lpfc_fcoe_params *fcoe_param;
6346
6347 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
6348 buff;
6349 fcoe_param = (struct lpfc_fcoe_params *)
6350 (buff + sizeof(struct lpfc_fip_param_hdr));
6351
6352 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
6353 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
6354 return;
6355
6356 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
6357 phba->valid_vlan = 1;
6358 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
6359 0xFFF;
6360 }
6361
6362 phba->fc_map[0] = fcoe_param->fc_map[0];
6363 phba->fc_map[1] = fcoe_param->fc_map[1];
6364 phba->fc_map[2] = fcoe_param->fc_map[2];
6365 return;
6366}
6367
6368
6369
6370
6371
6372
6373
6374
6375
6376
6377
6378static uint8_t *
6379lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
6380{
6381 uint32_t offset = 0, rec_length;
6382
6383 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
6384 (size < sizeof(uint32_t)))
6385 return NULL;
6386
6387 rec_length = buff[offset + 1];
6388
6389
6390
6391
6392
6393 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
6394 <= size) {
6395 if (buff[offset] == rec_type)
6396 return &buff[offset];
6397
6398 if (buff[offset] == LPFC_REGION23_LAST_REC)
6399 return NULL;
6400
6401 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
6402 rec_length = buff[offset + 1];
6403 }
6404 return NULL;
6405}
6406
6407
6408
6409
6410
6411
6412
6413
6414
6415
6416void
6417lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
6418 uint8_t *buff,
6419 uint32_t size)
6420{
6421 uint32_t offset = 0;
6422 uint8_t *rec_ptr;
6423
6424
6425
6426
6427
6428 if (size < 2*sizeof(uint32_t))
6429 return;
6430
6431
6432 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
6433 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6434 "2567 Config region 23 has bad signature\n");
6435 return;
6436 }
6437
6438 offset += 4;
6439
6440
6441 if (buff[offset] != LPFC_REGION23_VERSION) {
6442 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6443 "2568 Config region 23 has bad version\n");
6444 return;
6445 }
6446 offset += 4;
6447
6448
6449 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6450 size - offset, FCOE_PARAM_TYPE);
6451 if (rec_ptr)
6452 lpfc_read_fcoe_param(phba, rec_ptr);
6453
6454
6455 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6456 size - offset, FCOE_CONN_TBL_TYPE);
6457 if (rec_ptr)
6458 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
6459
6460}
6461