1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27#include <linux/pci.h>
28#include <linux/kthread.h>
29#include <linux/interrupt.h>
30#include <linux/lockdep.h>
31#include <linux/utsname.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_hw4.h"
40#include "lpfc_hw.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc_sli.h"
44#include "lpfc_sli4.h"
45#include "lpfc.h"
46#include "lpfc_scsi.h"
47#include "lpfc_nvme.h"
48#include "lpfc_logmsg.h"
49#include "lpfc_crtn.h"
50#include "lpfc_vport.h"
51#include "lpfc_debugfs.h"
52
53
54static uint8_t lpfcAlpaArray[] = {
55 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
56 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
57 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
58 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
59 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
60 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
61 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
62 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
63 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
64 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
65 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
66 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
67 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
68};
69
70static void lpfc_disc_timeout_handler(struct lpfc_vport *);
71static void lpfc_disc_flush_list(struct lpfc_vport *vport);
72static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
73static int lpfc_fcf_inuse(struct lpfc_hba *);
74static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
75
76void
77lpfc_terminate_rport_io(struct fc_rport *rport)
78{
79 struct lpfc_rport_data *rdata;
80 struct lpfc_nodelist * ndlp;
81 struct lpfc_hba *phba;
82
83 rdata = rport->dd_data;
84 ndlp = rdata->pnode;
85
86 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
87 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
88 printk(KERN_ERR "Cannot find remote node"
89 " to terminate I/O Data x%x\n",
90 rport->port_id);
91 return;
92 }
93
94 phba = ndlp->phba;
95
96 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
97 "rport terminate: sid:x%x did:x%x flg:x%x",
98 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
99
100 if (ndlp->nlp_sid != NLP_NO_SID) {
101 lpfc_sli_abort_iocb(ndlp->vport,
102 &phba->sli.sli3_ring[LPFC_FCP_RING],
103 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
104 }
105}
106
107
108
109
110void
111lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
112{
113 struct lpfc_rport_data *rdata;
114 struct lpfc_nodelist * ndlp;
115 struct lpfc_vport *vport;
116 struct Scsi_Host *shost;
117 struct lpfc_hba *phba;
118 struct lpfc_work_evt *evtp;
119 int put_node;
120 int put_rport;
121 unsigned long iflags;
122
123 rdata = rport->dd_data;
124 ndlp = rdata->pnode;
125 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
126 return;
127
128 vport = ndlp->vport;
129 phba = vport->phba;
130
131 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
132 "rport devlosscb: sid:x%x did:x%x flg:x%x",
133 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
134
135 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
136 "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
137 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
138
139
140
141
142
143 if (vport->load_flag & FC_UNLOADING) {
144 put_node = rdata->pnode != NULL;
145 put_rport = ndlp->rport != NULL;
146 rdata->pnode = NULL;
147 ndlp->rport = NULL;
148 if (put_node)
149 lpfc_nlp_put(ndlp);
150 if (put_rport)
151 put_device(&rport->dev);
152 return;
153 }
154
155 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
156 return;
157
158 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
159 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
160 "6789 rport name %llx != node port name %llx",
161 rport->port_name,
162 wwn_to_u64(ndlp->nlp_portname.u.wwn));
163
164 evtp = &ndlp->dev_loss_evt;
165
166 if (!list_empty(&evtp->evt_listp)) {
167 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
168 "6790 rport name %llx dev_loss_evt pending",
169 rport->port_name);
170 return;
171 }
172
173 shost = lpfc_shost_from_vport(vport);
174 spin_lock_irqsave(shost->host_lock, iflags);
175 ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
176 spin_unlock_irqrestore(shost->host_lock, iflags);
177
178
179
180
181 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
182
183 spin_lock_irqsave(&phba->hbalock, iflags);
184 if (evtp->evt_arg1) {
185 evtp->evt = LPFC_EVT_DEV_LOSS;
186 list_add_tail(&evtp->evt_listp, &phba->work_list);
187 lpfc_worker_wake_up(phba);
188 }
189 spin_unlock_irqrestore(&phba->hbalock, iflags);
190
191 return;
192}
193
194
195
196
197
198
199
200
201
202
203
204static int
205lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
206{
207 struct lpfc_rport_data *rdata;
208 struct fc_rport *rport;
209 struct lpfc_vport *vport;
210 struct lpfc_hba *phba;
211 struct Scsi_Host *shost;
212 uint8_t *name;
213 int put_node;
214 int warn_on = 0;
215 int fcf_inuse = 0;
216 unsigned long iflags;
217
218 rport = ndlp->rport;
219 vport = ndlp->vport;
220 shost = lpfc_shost_from_vport(vport);
221
222 spin_lock_irqsave(shost->host_lock, iflags);
223 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
224 spin_unlock_irqrestore(shost->host_lock, iflags);
225
226 if (!rport)
227 return fcf_inuse;
228
229 name = (uint8_t *) &ndlp->nlp_portname;
230 phba = vport->phba;
231
232 if (phba->sli_rev == LPFC_SLI_REV4)
233 fcf_inuse = lpfc_fcf_inuse(phba);
234
235 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
236 "rport devlosstmo:did:x%x type:x%x id:x%x",
237 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
238
239 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
240 "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
241 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
242
243
244
245
246
247
248 rdata = rport->dd_data;
249
250
251
252
253
254 if (vport->load_flag & FC_UNLOADING) {
255 if (ndlp->nlp_sid != NLP_NO_SID) {
256
257 lpfc_sli_abort_iocb(vport,
258 &phba->sli.sli3_ring[LPFC_FCP_RING],
259 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
260 }
261 put_node = rdata->pnode != NULL;
262 rdata->pnode = NULL;
263 ndlp->rport = NULL;
264 if (put_node)
265 lpfc_nlp_put(ndlp);
266 put_device(&rport->dev);
267
268 return fcf_inuse;
269 }
270
271 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
272 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
273 "0284 Devloss timeout Ignored on "
274 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
275 "NPort x%x\n",
276 *name, *(name+1), *(name+2), *(name+3),
277 *(name+4), *(name+5), *(name+6), *(name+7),
278 ndlp->nlp_DID);
279 return fcf_inuse;
280 }
281
282 put_node = rdata->pnode != NULL;
283 rdata->pnode = NULL;
284 ndlp->rport = NULL;
285 if (put_node)
286 lpfc_nlp_put(ndlp);
287 put_device(&rport->dev);
288
289 if (ndlp->nlp_type & NLP_FABRIC)
290 return fcf_inuse;
291
292 if (ndlp->nlp_sid != NLP_NO_SID) {
293 warn_on = 1;
294 lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
295 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
296 }
297
298 if (warn_on) {
299 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
300 "0203 Devloss timeout on "
301 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
302 "NPort x%06x Data: x%x x%x x%x\n",
303 *name, *(name+1), *(name+2), *(name+3),
304 *(name+4), *(name+5), *(name+6), *(name+7),
305 ndlp->nlp_DID, ndlp->nlp_flag,
306 ndlp->nlp_state, ndlp->nlp_rpi);
307 } else {
308 lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
309 "0204 Devloss timeout on "
310 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
311 "NPort x%06x Data: x%x x%x x%x\n",
312 *name, *(name+1), *(name+2), *(name+3),
313 *(name+4), *(name+5), *(name+6), *(name+7),
314 ndlp->nlp_DID, ndlp->nlp_flag,
315 ndlp->nlp_state, ndlp->nlp_rpi);
316 }
317
318 if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
319 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
320 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
321 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
322 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
323 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
324
325 return fcf_inuse;
326}
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346static void
347lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
348 uint32_t nlp_did)
349{
350
351
352
353 if (!fcf_inuse)
354 return;
355
356 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
357 spin_lock_irq(&phba->hbalock);
358 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
359 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
360 spin_unlock_irq(&phba->hbalock);
361 return;
362 }
363 phba->hba_flag |= HBA_DEVLOSS_TMO;
364 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
365 "2847 Last remote node (x%x) using "
366 "FCF devloss tmo\n", nlp_did);
367 }
368 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
369 spin_unlock_irq(&phba->hbalock);
370 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
371 "2868 Devloss tmo to FCF rediscovery "
372 "in progress\n");
373 return;
374 }
375 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
376 spin_unlock_irq(&phba->hbalock);
377 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
378 "2869 Devloss tmo to idle FIP engine, "
379 "unreg in-use FCF and rescan.\n");
380
381 lpfc_unregister_fcf_rescan(phba);
382 return;
383 }
384 spin_unlock_irq(&phba->hbalock);
385 if (phba->hba_flag & FCF_TS_INPROG)
386 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
387 "2870 FCF table scan in progress\n");
388 if (phba->hba_flag & FCF_RR_INPROG)
389 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
390 "2871 FLOGI roundrobin FCF failover "
391 "in progress\n");
392 }
393 lpfc_unregister_unused_fcf(phba);
394}
395
396
397
398
399
400
401
402
403
404
405
406struct lpfc_fast_path_event *
407lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
408 struct lpfc_fast_path_event *ret;
409
410
411 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
412 return NULL;
413
414 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
415 GFP_ATOMIC);
416 if (ret) {
417 atomic_inc(&phba->fast_event_count);
418 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
419 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
420 }
421 return ret;
422}
423
424
425
426
427
428
429
430
431
432void
433lpfc_free_fast_evt(struct lpfc_hba *phba,
434 struct lpfc_fast_path_event *evt) {
435
436 atomic_dec(&phba->fast_event_count);
437 kfree(evt);
438}
439
440
441
442
443
444
445
446
447
448
449static void
450lpfc_send_fastpath_evt(struct lpfc_hba *phba,
451 struct lpfc_work_evt *evtp)
452{
453 unsigned long evt_category, evt_sub_category;
454 struct lpfc_fast_path_event *fast_evt_data;
455 char *evt_data;
456 uint32_t evt_data_size;
457 struct Scsi_Host *shost;
458
459 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
460 work_evt);
461
462 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
463 evt_sub_category = (unsigned long) fast_evt_data->un.
464 fabric_evt.subcategory;
465 shost = lpfc_shost_from_vport(fast_evt_data->vport);
466 if (evt_category == FC_REG_FABRIC_EVENT) {
467 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
468 evt_data = (char *) &fast_evt_data->un.read_check_error;
469 evt_data_size = sizeof(fast_evt_data->un.
470 read_check_error);
471 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
472 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
473 evt_data = (char *) &fast_evt_data->un.fabric_evt;
474 evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
475 } else {
476 lpfc_free_fast_evt(phba, fast_evt_data);
477 return;
478 }
479 } else if (evt_category == FC_REG_SCSI_EVENT) {
480 switch (evt_sub_category) {
481 case LPFC_EVENT_QFULL:
482 case LPFC_EVENT_DEVBSY:
483 evt_data = (char *) &fast_evt_data->un.scsi_evt;
484 evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
485 break;
486 case LPFC_EVENT_CHECK_COND:
487 evt_data = (char *) &fast_evt_data->un.check_cond_evt;
488 evt_data_size = sizeof(fast_evt_data->un.
489 check_cond_evt);
490 break;
491 case LPFC_EVENT_VARQUEDEPTH:
492 evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
493 evt_data_size = sizeof(fast_evt_data->un.
494 queue_depth_evt);
495 break;
496 default:
497 lpfc_free_fast_evt(phba, fast_evt_data);
498 return;
499 }
500 } else {
501 lpfc_free_fast_evt(phba, fast_evt_data);
502 return;
503 }
504
505 if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
506 fc_host_post_vendor_event(shost,
507 fc_get_event_number(),
508 evt_data_size,
509 evt_data,
510 LPFC_NL_VENDOR_ID);
511
512 lpfc_free_fast_evt(phba, fast_evt_data);
513 return;
514}
515
516static void
517lpfc_work_list_done(struct lpfc_hba *phba)
518{
519 struct lpfc_work_evt *evtp = NULL;
520 struct lpfc_nodelist *ndlp;
521 int free_evt;
522 int fcf_inuse;
523 uint32_t nlp_did;
524
525 spin_lock_irq(&phba->hbalock);
526 while (!list_empty(&phba->work_list)) {
527 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
528 evt_listp);
529 spin_unlock_irq(&phba->hbalock);
530 free_evt = 1;
531 switch (evtp->evt) {
532 case LPFC_EVT_ELS_RETRY:
533 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
534 lpfc_els_retry_delay_handler(ndlp);
535 free_evt = 0;
536
537
538
539 lpfc_nlp_put(ndlp);
540 break;
541 case LPFC_EVT_DEV_LOSS:
542 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
543 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
544 free_evt = 0;
545
546
547
548 nlp_did = ndlp->nlp_DID;
549 lpfc_nlp_put(ndlp);
550 if (phba->sli_rev == LPFC_SLI_REV4)
551 lpfc_sli4_post_dev_loss_tmo_handler(phba,
552 fcf_inuse,
553 nlp_did);
554 break;
555 case LPFC_EVT_ONLINE:
556 if (phba->link_state < LPFC_LINK_DOWN)
557 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
558 else
559 *(int *) (evtp->evt_arg1) = 0;
560 complete((struct completion *)(evtp->evt_arg2));
561 break;
562 case LPFC_EVT_OFFLINE_PREP:
563 if (phba->link_state >= LPFC_LINK_DOWN)
564 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
565 *(int *)(evtp->evt_arg1) = 0;
566 complete((struct completion *)(evtp->evt_arg2));
567 break;
568 case LPFC_EVT_OFFLINE:
569 lpfc_offline(phba);
570 lpfc_sli_brdrestart(phba);
571 *(int *)(evtp->evt_arg1) =
572 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
573 lpfc_unblock_mgmt_io(phba);
574 complete((struct completion *)(evtp->evt_arg2));
575 break;
576 case LPFC_EVT_WARM_START:
577 lpfc_offline(phba);
578 lpfc_reset_barrier(phba);
579 lpfc_sli_brdreset(phba);
580 lpfc_hba_down_post(phba);
581 *(int *)(evtp->evt_arg1) =
582 lpfc_sli_brdready(phba, HS_MBRDY);
583 lpfc_unblock_mgmt_io(phba);
584 complete((struct completion *)(evtp->evt_arg2));
585 break;
586 case LPFC_EVT_KILL:
587 lpfc_offline(phba);
588 *(int *)(evtp->evt_arg1)
589 = (phba->pport->stopped)
590 ? 0 : lpfc_sli_brdkill(phba);
591 lpfc_unblock_mgmt_io(phba);
592 complete((struct completion *)(evtp->evt_arg2));
593 break;
594 case LPFC_EVT_FASTPATH_MGMT_EVT:
595 lpfc_send_fastpath_evt(phba, evtp);
596 free_evt = 0;
597 break;
598 case LPFC_EVT_RESET_HBA:
599 if (!(phba->pport->load_flag & FC_UNLOADING))
600 lpfc_reset_hba(phba);
601 break;
602 }
603 if (free_evt)
604 kfree(evtp);
605 spin_lock_irq(&phba->hbalock);
606 }
607 spin_unlock_irq(&phba->hbalock);
608
609}
610
611static void
612lpfc_work_done(struct lpfc_hba *phba)
613{
614 struct lpfc_sli_ring *pring;
615 uint32_t ha_copy, status, control, work_port_events;
616 struct lpfc_vport **vports;
617 struct lpfc_vport *vport;
618 int i;
619
620 spin_lock_irq(&phba->hbalock);
621 ha_copy = phba->work_ha;
622 phba->work_ha = 0;
623 spin_unlock_irq(&phba->hbalock);
624
625
626 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
627 lpfc_sli4_post_async_mbox(phba);
628
629 if (ha_copy & HA_ERATT)
630
631 lpfc_handle_eratt(phba);
632
633 if (ha_copy & HA_MBATT)
634 lpfc_sli_handle_mb_event(phba);
635
636 if (ha_copy & HA_LATT)
637 lpfc_handle_latt(phba);
638
639
640 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
641 if (phba->hba_flag & HBA_RRQ_ACTIVE)
642 lpfc_handle_rrq_active(phba);
643 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
644 lpfc_sli4_els_xri_abort_event_proc(phba);
645 if (phba->hba_flag & ASYNC_EVENT)
646 lpfc_sli4_async_event_proc(phba);
647 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
648 spin_lock_irq(&phba->hbalock);
649 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
650 spin_unlock_irq(&phba->hbalock);
651 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
652 }
653 if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
654 lpfc_sli4_fcf_redisc_event_proc(phba);
655 }
656
657 vports = lpfc_create_vport_work_array(phba);
658 if (vports != NULL)
659 for (i = 0; i <= phba->max_vports; i++) {
660
661
662
663
664 if (vports[i] == NULL && i == 0)
665 vport = phba->pport;
666 else
667 vport = vports[i];
668 if (vport == NULL)
669 break;
670 spin_lock_irq(&vport->work_port_lock);
671 work_port_events = vport->work_port_events;
672 vport->work_port_events &= ~work_port_events;
673 spin_unlock_irq(&vport->work_port_lock);
674 if (work_port_events & WORKER_DISC_TMO)
675 lpfc_disc_timeout_handler(vport);
676 if (work_port_events & WORKER_ELS_TMO)
677 lpfc_els_timeout_handler(vport);
678 if (work_port_events & WORKER_HB_TMO)
679 lpfc_hb_timeout_handler(phba);
680 if (work_port_events & WORKER_MBOX_TMO)
681 lpfc_mbox_timeout_handler(phba);
682 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
683 lpfc_unblock_fabric_iocbs(phba);
684 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
685 lpfc_ramp_down_queue_handler(phba);
686 if (work_port_events & WORKER_DELAYED_DISC_TMO)
687 lpfc_delayed_disc_timeout_handler(vport);
688 }
689 lpfc_destroy_vport_work_array(phba, vports);
690
691 pring = lpfc_phba_elsring(phba);
692 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
693 status >>= (4*LPFC_ELS_RING);
694 if (pring && (status & HA_RXMASK ||
695 pring->flag & LPFC_DEFERRED_RING_EVENT ||
696 phba->hba_flag & HBA_SP_QUEUE_EVT)) {
697 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
698 pring->flag |= LPFC_DEFERRED_RING_EVENT;
699
700 if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
701 set_bit(LPFC_DATA_READY, &phba->data_flags);
702 } else {
703
704
705
706 if (phba->link_state >= LPFC_LINK_DOWN ||
707 phba->link_flag & LS_MDS_LOOPBACK) {
708 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
709 lpfc_sli_handle_slow_ring_event(phba, pring,
710 (status &
711 HA_RXMASK));
712 }
713 }
714 if (phba->sli_rev == LPFC_SLI_REV4)
715 lpfc_drain_txq(phba);
716
717
718
719 if (phba->sli_rev <= LPFC_SLI_REV3) {
720 spin_lock_irq(&phba->hbalock);
721 control = readl(phba->HCregaddr);
722 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
723 lpfc_debugfs_slow_ring_trc(phba,
724 "WRK Enable ring: cntl:x%x hacopy:x%x",
725 control, ha_copy, 0);
726
727 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
728 writel(control, phba->HCregaddr);
729 readl(phba->HCregaddr);
730 } else {
731 lpfc_debugfs_slow_ring_trc(phba,
732 "WRK Ring ok: cntl:x%x hacopy:x%x",
733 control, ha_copy, 0);
734 }
735 spin_unlock_irq(&phba->hbalock);
736 }
737 }
738 lpfc_work_list_done(phba);
739}
740
741int
742lpfc_do_work(void *p)
743{
744 struct lpfc_hba *phba = p;
745 int rc;
746
747 set_user_nice(current, MIN_NICE);
748 current->flags |= PF_NOFREEZE;
749 phba->data_flags = 0;
750
751 while (!kthread_should_stop()) {
752
753 rc = wait_event_interruptible(phba->work_waitq,
754 (test_and_clear_bit(LPFC_DATA_READY,
755 &phba->data_flags)
756 || kthread_should_stop()));
757
758 if (rc) {
759 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
760 "0433 Wakeup on signal: rc=x%x\n", rc);
761 break;
762 }
763
764
765 lpfc_work_done(phba);
766 }
767 phba->worker_thread = NULL;
768 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
769 "0432 Worker thread stopped.\n");
770 return 0;
771}
772
773
774
775
776
777
778int
779lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
780 uint32_t evt)
781{
782 struct lpfc_work_evt *evtp;
783 unsigned long flags;
784
785
786
787
788
789 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
790 if (!evtp)
791 return 0;
792
793 evtp->evt_arg1 = arg1;
794 evtp->evt_arg2 = arg2;
795 evtp->evt = evt;
796
797 spin_lock_irqsave(&phba->hbalock, flags);
798 list_add_tail(&evtp->evt_listp, &phba->work_list);
799 spin_unlock_irqrestore(&phba->hbalock, flags);
800
801 lpfc_worker_wake_up(phba);
802
803 return 1;
804}
805
806void
807lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
808{
809 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
810 struct lpfc_hba *phba = vport->phba;
811 struct lpfc_nodelist *ndlp, *next_ndlp;
812
813 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
814 if (!NLP_CHK_NODE_ACT(ndlp))
815 continue;
816 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
817 continue;
818 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
819 ((vport->port_type == LPFC_NPIV_PORT) &&
820 (ndlp->nlp_DID == NameServer_DID)))
821 lpfc_unreg_rpi(vport, ndlp);
822
823
824 if ((phba->sli_rev < LPFC_SLI_REV4) &&
825 (!remove && ndlp->nlp_type & NLP_FABRIC))
826 continue;
827
828
829 if (phba->nvmet_support &&
830 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
831 lpfc_nvmet_invalidate_host(phba, ndlp);
832
833 lpfc_disc_state_machine(vport, ndlp, NULL,
834 remove
835 ? NLP_EVT_DEVICE_RM
836 : NLP_EVT_DEVICE_RECOVERY);
837 }
838 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
839 if (phba->sli_rev == LPFC_SLI_REV4)
840 lpfc_sli4_unreg_all_rpis(vport);
841 lpfc_mbx_unreg_vpi(vport);
842 spin_lock_irq(shost->host_lock);
843 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
844 spin_unlock_irq(shost->host_lock);
845 }
846}
847
848void
849lpfc_port_link_failure(struct lpfc_vport *vport)
850{
851 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
852
853
854 lpfc_cleanup_rcv_buffers(vport);
855
856
857 lpfc_els_flush_rscn(vport);
858
859
860 lpfc_els_flush_cmd(vport);
861
862 lpfc_cleanup_rpis(vport, 0);
863
864
865 lpfc_can_disctmo(vport);
866}
867
868void
869lpfc_linkdown_port(struct lpfc_vport *vport)
870{
871 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
872
873 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
874 fc_host_post_event(shost, fc_get_event_number(),
875 FCH_EVT_LINKDOWN, 0);
876
877 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
878 "Link Down: state:x%x rtry:x%x flg:x%x",
879 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
880
881 lpfc_port_link_failure(vport);
882
883
884 spin_lock_irq(shost->host_lock);
885 vport->fc_flag &= ~FC_DISC_DELAYED;
886 spin_unlock_irq(shost->host_lock);
887 del_timer_sync(&vport->delayed_disc_tmo);
888}
889
890int
891lpfc_linkdown(struct lpfc_hba *phba)
892{
893 struct lpfc_vport *vport = phba->pport;
894 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
895 struct lpfc_vport **vports;
896 LPFC_MBOXQ_t *mb;
897 int i;
898
899 if (phba->link_state == LPFC_LINK_DOWN)
900 return 0;
901
902
903 lpfc_scsi_dev_block(phba);
904
905 phba->defer_flogi_acc_flag = false;
906
907 spin_lock_irq(&phba->hbalock);
908 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
909 spin_unlock_irq(&phba->hbalock);
910 if (phba->link_state > LPFC_LINK_DOWN) {
911 phba->link_state = LPFC_LINK_DOWN;
912 if (phba->sli4_hba.conf_trunk) {
913 phba->trunk_link.link0.state = 0;
914 phba->trunk_link.link1.state = 0;
915 phba->trunk_link.link2.state = 0;
916 phba->trunk_link.link3.state = 0;
917 phba->sli4_hba.link_state.logical_speed =
918 LPFC_LINK_SPEED_UNKNOWN;
919 }
920 spin_lock_irq(shost->host_lock);
921 phba->pport->fc_flag &= ~FC_LBIT;
922 spin_unlock_irq(shost->host_lock);
923 }
924 vports = lpfc_create_vport_work_array(phba);
925 if (vports != NULL) {
926 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
927
928 lpfc_linkdown_port(vports[i]);
929
930 vports[i]->fc_myDID = 0;
931
932 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
933 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
934 if (phba->nvmet_support)
935 lpfc_nvmet_update_targetport(phba);
936 else
937 lpfc_nvme_update_localport(vports[i]);
938 }
939 }
940 }
941 lpfc_destroy_vport_work_array(phba, vports);
942
943
944 if (phba->sli_rev > LPFC_SLI_REV3)
945 goto skip_unreg_did;
946
947 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
948 if (mb) {
949 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
950 mb->vport = vport;
951 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
952 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
953 == MBX_NOT_FINISHED) {
954 mempool_free(mb, phba->mbox_mem_pool);
955 }
956 }
957
958 skip_unreg_did:
959
960 if (phba->pport->fc_flag & FC_PT2PT) {
961 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
962 if (mb) {
963 lpfc_config_link(phba, mb);
964 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
965 mb->vport = vport;
966 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
967 == MBX_NOT_FINISHED) {
968 mempool_free(mb, phba->mbox_mem_pool);
969 }
970 }
971 spin_lock_irq(shost->host_lock);
972 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
973 phba->pport->rcv_flogi_cnt = 0;
974 spin_unlock_irq(shost->host_lock);
975 }
976 return 0;
977}
978
979static void
980lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
981{
982 struct lpfc_nodelist *ndlp;
983
984 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
985 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
986 if (!NLP_CHK_NODE_ACT(ndlp))
987 continue;
988 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
989 continue;
990 if (ndlp->nlp_type & NLP_FABRIC) {
991
992
993
994 if (ndlp->nlp_DID != Fabric_DID)
995 lpfc_unreg_rpi(vport, ndlp);
996 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
997 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
998
999
1000
1001 lpfc_unreg_rpi(vport, ndlp);
1002 }
1003 }
1004}
1005
1006static void
1007lpfc_linkup_port(struct lpfc_vport *vport)
1008{
1009 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1010 struct lpfc_hba *phba = vport->phba;
1011
1012 if ((vport->load_flag & FC_UNLOADING) != 0)
1013 return;
1014
1015 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1016 "Link Up: top:x%x speed:x%x flg:x%x",
1017 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
1018
1019
1020 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1021 (vport != phba->pport))
1022 return;
1023
1024 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
1025 fc_host_post_event(shost, fc_get_event_number(),
1026 FCH_EVT_LINKUP, 0);
1027
1028 spin_lock_irq(shost->host_lock);
1029 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
1030 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
1031 vport->fc_flag |= FC_NDISC_ACTIVE;
1032 vport->fc_ns_retry = 0;
1033 spin_unlock_irq(shost->host_lock);
1034
1035 if (vport->fc_flag & FC_LBIT)
1036 lpfc_linkup_cleanup_nodes(vport);
1037
1038}
1039
1040static int
1041lpfc_linkup(struct lpfc_hba *phba)
1042{
1043 struct lpfc_vport **vports;
1044 int i;
1045 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
1046
1047 phba->link_state = LPFC_LINK_UP;
1048
1049
1050 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
1051 del_timer_sync(&phba->fabric_block_timer);
1052
1053 vports = lpfc_create_vport_work_array(phba);
1054 if (vports != NULL)
1055 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1056 lpfc_linkup_port(vports[i]);
1057 lpfc_destroy_vport_work_array(phba, vports);
1058
1059
1060
1061
1062
1063 spin_lock_irq(shost->host_lock);
1064 phba->pport->rcv_flogi_cnt = 0;
1065 spin_unlock_irq(shost->host_lock);
1066
1067
1068 phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
1069 phba->defer_flogi_acc_flag = false;
1070
1071 return 0;
1072}
1073
1074
1075
1076
1077
1078
1079
1080static void
1081lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1082{
1083 struct lpfc_vport *vport = pmb->vport;
1084 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1085 struct lpfc_sli *psli = &phba->sli;
1086 MAILBOX_t *mb = &pmb->u.mb;
1087 uint32_t control;
1088
1089
1090 psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1091 psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1092
1093
1094 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1095
1096 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1097 "0320 CLEAR_LA mbxStatus error x%x hba "
1098 "state x%x\n",
1099 mb->mbxStatus, vport->port_state);
1100 phba->link_state = LPFC_HBA_ERROR;
1101 goto out;
1102 }
1103
1104 if (vport->port_type == LPFC_PHYSICAL_PORT)
1105 phba->link_state = LPFC_HBA_READY;
1106
1107 spin_lock_irq(&phba->hbalock);
1108 psli->sli_flag |= LPFC_PROCESS_LA;
1109 control = readl(phba->HCregaddr);
1110 control |= HC_LAINT_ENA;
1111 writel(control, phba->HCregaddr);
1112 readl(phba->HCregaddr);
1113 spin_unlock_irq(&phba->hbalock);
1114 mempool_free(pmb, phba->mbox_mem_pool);
1115 return;
1116
1117out:
1118
1119 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1120 "0225 Device Discovery completes\n");
1121 mempool_free(pmb, phba->mbox_mem_pool);
1122
1123 spin_lock_irq(shost->host_lock);
1124 vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1125 spin_unlock_irq(shost->host_lock);
1126
1127 lpfc_can_disctmo(vport);
1128
1129
1130
1131 spin_lock_irq(&phba->hbalock);
1132 psli->sli_flag |= LPFC_PROCESS_LA;
1133 control = readl(phba->HCregaddr);
1134 control |= HC_LAINT_ENA;
1135 writel(control, phba->HCregaddr);
1136 readl(phba->HCregaddr);
1137 spin_unlock_irq(&phba->hbalock);
1138
1139 return;
1140}
1141
1142void
1143lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1144{
1145 struct lpfc_vport *vport = pmb->vport;
1146 LPFC_MBOXQ_t *sparam_mb;
1147 struct lpfc_dmabuf *sparam_mp;
1148 int rc;
1149
1150 if (pmb->u.mb.mbxStatus)
1151 goto out;
1152
1153 mempool_free(pmb, phba->mbox_mem_pool);
1154
1155
1156 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1157 !(phba->hba_flag & HBA_FCOE_MODE) &&
1158 (phba->link_flag & LS_LOOPBACK_MODE))
1159 return;
1160
1161 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1162 vport->fc_flag & FC_PUBLIC_LOOP &&
1163 !(vport->fc_flag & FC_LBIT)) {
1164
1165
1166
1167
1168 lpfc_set_disctmo(vport);
1169 return;
1170 }
1171
1172
1173
1174
1175 if (vport->port_state != LPFC_FLOGI) {
1176
1177
1178
1179 if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
1180 !(phba->link_flag & LS_LOOPBACK_MODE)) {
1181 sparam_mb = mempool_alloc(phba->mbox_mem_pool,
1182 GFP_KERNEL);
1183 if (!sparam_mb)
1184 goto sparam_out;
1185
1186 rc = lpfc_read_sparam(phba, sparam_mb, 0);
1187 if (rc) {
1188 mempool_free(sparam_mb, phba->mbox_mem_pool);
1189 goto sparam_out;
1190 }
1191 sparam_mb->vport = vport;
1192 sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1193 rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
1194 if (rc == MBX_NOT_FINISHED) {
1195 sparam_mp = (struct lpfc_dmabuf *)
1196 sparam_mb->ctx_buf;
1197 lpfc_mbuf_free(phba, sparam_mp->virt,
1198 sparam_mp->phys);
1199 kfree(sparam_mp);
1200 sparam_mb->ctx_buf = NULL;
1201 mempool_free(sparam_mb, phba->mbox_mem_pool);
1202 goto sparam_out;
1203 }
1204
1205 phba->hba_flag |= HBA_DEFER_FLOGI;
1206 } else {
1207 lpfc_initial_flogi(vport);
1208 }
1209 } else {
1210 if (vport->fc_flag & FC_PT2PT)
1211 lpfc_disc_start(vport);
1212 }
1213 return;
1214
1215out:
1216 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1217 "0306 CONFIG_LINK mbxStatus error x%x "
1218 "HBA state x%x\n",
1219 pmb->u.mb.mbxStatus, vport->port_state);
1220sparam_out:
1221 mempool_free(pmb, phba->mbox_mem_pool);
1222
1223 lpfc_linkdown(phba);
1224
1225 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1226 "0200 CONFIG_LINK bad hba state x%x\n",
1227 vport->port_state);
1228
1229 lpfc_issue_clear_la(phba, vport);
1230 return;
1231}
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241void
1242lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1243{
1244 struct lpfc_fcf_pri *fcf_pri;
1245 struct lpfc_fcf_pri *next_fcf_pri;
1246 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1247 spin_lock_irq(&phba->hbalock);
1248 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1249 &phba->fcf.fcf_pri_list, list) {
1250 list_del_init(&fcf_pri->list);
1251 fcf_pri->fcf_rec.flag = 0;
1252 }
1253 spin_unlock_irq(&phba->hbalock);
1254}
1255static void
1256lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1257{
1258 struct lpfc_vport *vport = mboxq->vport;
1259
1260 if (mboxq->u.mb.mbxStatus) {
1261 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1262 "2017 REG_FCFI mbxStatus error x%x "
1263 "HBA state x%x\n", mboxq->u.mb.mbxStatus,
1264 vport->port_state);
1265 goto fail_out;
1266 }
1267
1268
1269 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1270
1271 spin_lock_irq(&phba->hbalock);
1272 phba->fcf.fcf_flag |= FCF_REGISTERED;
1273 spin_unlock_irq(&phba->hbalock);
1274
1275
1276 if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1277 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1278 goto fail_out;
1279
1280
1281 spin_lock_irq(&phba->hbalock);
1282 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1283 phba->hba_flag &= ~FCF_TS_INPROG;
1284 if (vport->port_state != LPFC_FLOGI) {
1285 phba->hba_flag |= FCF_RR_INPROG;
1286 spin_unlock_irq(&phba->hbalock);
1287 lpfc_issue_init_vfi(vport);
1288 goto out;
1289 }
1290 spin_unlock_irq(&phba->hbalock);
1291 goto out;
1292
1293fail_out:
1294 spin_lock_irq(&phba->hbalock);
1295 phba->hba_flag &= ~FCF_RR_INPROG;
1296 spin_unlock_irq(&phba->hbalock);
1297out:
1298 mempool_free(mboxq, phba->mbox_mem_pool);
1299}
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310static uint32_t
1311lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1312{
1313 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1314 return 0;
1315 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1316 return 0;
1317 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1318 return 0;
1319 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1320 return 0;
1321 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1322 return 0;
1323 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1324 return 0;
1325 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1326 return 0;
1327 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1328 return 0;
1329 return 1;
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341static uint32_t
1342lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1343{
1344 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1345 return 0;
1346 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1347 return 0;
1348 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1349 return 0;
1350 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1351 return 0;
1352 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1353 return 0;
1354 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1355 return 0;
1356 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1357 return 0;
1358 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1359 return 0;
1360 return 1;
1361}
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372static uint32_t
1373lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1374{
1375 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1376 return 0;
1377 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1378 return 0;
1379 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1380 return 0;
1381 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1382 return 0;
1383 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1384 return 0;
1385 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1386 return 0;
1387 return 1;
1388}
1389
1390static bool
1391lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1392{
1393 return (curr_vlan_id == new_vlan_id);
1394}
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406static void
1407__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1408 struct fcf_record *new_fcf_record
1409 )
1410{
1411 struct lpfc_fcf_pri *fcf_pri;
1412
1413 fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1414 fcf_pri->fcf_rec.fcf_index = fcf_index;
1415
1416 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1417
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428static void
1429lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1430 struct fcf_record *new_fcf_record)
1431{
1432
1433 fcf_rec->fabric_name[0] =
1434 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1435 fcf_rec->fabric_name[1] =
1436 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1437 fcf_rec->fabric_name[2] =
1438 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1439 fcf_rec->fabric_name[3] =
1440 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1441 fcf_rec->fabric_name[4] =
1442 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1443 fcf_rec->fabric_name[5] =
1444 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1445 fcf_rec->fabric_name[6] =
1446 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1447 fcf_rec->fabric_name[7] =
1448 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1449
1450 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1451 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1452 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1453 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1454 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1455 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1456
1457 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1458
1459 fcf_rec->priority = new_fcf_record->fip_priority;
1460
1461 fcf_rec->switch_name[0] =
1462 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1463 fcf_rec->switch_name[1] =
1464 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1465 fcf_rec->switch_name[2] =
1466 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1467 fcf_rec->switch_name[3] =
1468 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1469 fcf_rec->switch_name[4] =
1470 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1471 fcf_rec->switch_name[5] =
1472 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1473 fcf_rec->switch_name[6] =
1474 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1475 fcf_rec->switch_name[7] =
1476 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492static void
1493__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1494 struct fcf_record *new_fcf_record, uint32_t addr_mode,
1495 uint16_t vlan_id, uint32_t flag)
1496{
1497 lockdep_assert_held(&phba->hbalock);
1498
1499
1500 lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1501
1502 fcf_rec->addr_mode = addr_mode;
1503 fcf_rec->vlan_id = vlan_id;
1504 fcf_rec->flag |= (flag | RECORD_VALID);
1505 __lpfc_update_fcf_record_pri(phba,
1506 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1507 new_fcf_record);
1508}
1509
1510
1511
1512
1513
1514
1515
1516
1517static void
1518lpfc_register_fcf(struct lpfc_hba *phba)
1519{
1520 LPFC_MBOXQ_t *fcf_mbxq;
1521 int rc;
1522
1523 spin_lock_irq(&phba->hbalock);
1524
1525 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1526 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1527 spin_unlock_irq(&phba->hbalock);
1528 return;
1529 }
1530
1531
1532 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1533 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1534 phba->hba_flag &= ~FCF_TS_INPROG;
1535 if (phba->pport->port_state != LPFC_FLOGI &&
1536 phba->pport->fc_flag & FC_FABRIC) {
1537 phba->hba_flag |= FCF_RR_INPROG;
1538 spin_unlock_irq(&phba->hbalock);
1539 lpfc_initial_flogi(phba->pport);
1540 return;
1541 }
1542 spin_unlock_irq(&phba->hbalock);
1543 return;
1544 }
1545 spin_unlock_irq(&phba->hbalock);
1546
1547 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1548 if (!fcf_mbxq) {
1549 spin_lock_irq(&phba->hbalock);
1550 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1551 spin_unlock_irq(&phba->hbalock);
1552 return;
1553 }
1554
1555 lpfc_reg_fcfi(phba, fcf_mbxq);
1556 fcf_mbxq->vport = phba->pport;
1557 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1558 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1559 if (rc == MBX_NOT_FINISHED) {
1560 spin_lock_irq(&phba->hbalock);
1561 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1562 spin_unlock_irq(&phba->hbalock);
1563 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1564 }
1565
1566 return;
1567}
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587static int
1588lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1589 struct fcf_record *new_fcf_record,
1590 uint32_t *boot_flag, uint32_t *addr_mode,
1591 uint16_t *vlan_id)
1592{
1593 struct lpfc_fcf_conn_entry *conn_entry;
1594 int i, j, fcf_vlan_id = 0;
1595
1596
1597 for (i = 0; i < 512; i++) {
1598 if (new_fcf_record->vlan_bitmap[i]) {
1599 fcf_vlan_id = i * 8;
1600 j = 0;
1601 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1602 j++;
1603 fcf_vlan_id++;
1604 }
1605 break;
1606 }
1607 }
1608
1609
1610 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1611 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1612 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1613 return 0;
1614
1615 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1616 *boot_flag = 0;
1617 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1618 new_fcf_record);
1619 if (phba->valid_vlan)
1620 *vlan_id = phba->vlan_id;
1621 else
1622 *vlan_id = LPFC_FCOE_NULL_VID;
1623 return 1;
1624 }
1625
1626
1627
1628
1629
1630 if (list_empty(&phba->fcf_conn_rec_list)) {
1631 *boot_flag = 0;
1632 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1633 new_fcf_record);
1634
1635
1636
1637
1638
1639 if (*addr_mode & LPFC_FCF_FPMA)
1640 *addr_mode = LPFC_FCF_FPMA;
1641
1642
1643 if (fcf_vlan_id)
1644 *vlan_id = fcf_vlan_id;
1645 else
1646 *vlan_id = LPFC_FCOE_NULL_VID;
1647 return 1;
1648 }
1649
1650 list_for_each_entry(conn_entry,
1651 &phba->fcf_conn_rec_list, list) {
1652 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1653 continue;
1654
1655 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1656 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1657 new_fcf_record))
1658 continue;
1659 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1660 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1661 new_fcf_record))
1662 continue;
1663 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1664
1665
1666
1667
1668 if (!(new_fcf_record->vlan_bitmap
1669 [conn_entry->conn_rec.vlan_tag / 8] &
1670 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1671 continue;
1672 }
1673
1674
1675
1676
1677
1678 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1679 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1680 continue;
1681
1682
1683
1684
1685
1686 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1687 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1688
1689
1690
1691
1692 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1693 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1694 new_fcf_record) & LPFC_FCF_SPMA))
1695 continue;
1696
1697
1698
1699
1700 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1701 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1702 new_fcf_record) & LPFC_FCF_FPMA))
1703 continue;
1704 }
1705
1706
1707
1708
1709 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1710 *boot_flag = 1;
1711 else
1712 *boot_flag = 0;
1713
1714
1715
1716
1717
1718
1719 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1720 new_fcf_record);
1721
1722
1723
1724
1725 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1726 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1727 *addr_mode = (conn_entry->conn_rec.flags &
1728 FCFCNCT_AM_SPMA) ?
1729 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1730
1731
1732
1733
1734 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1735 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1736 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1737 (*addr_mode & LPFC_FCF_SPMA))
1738 *addr_mode = LPFC_FCF_SPMA;
1739 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1740 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1741 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1742 (*addr_mode & LPFC_FCF_FPMA))
1743 *addr_mode = LPFC_FCF_FPMA;
1744
1745
1746 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1747 *vlan_id = conn_entry->conn_rec.vlan_tag;
1748
1749
1750
1751
1752 else if (fcf_vlan_id)
1753 *vlan_id = fcf_vlan_id;
1754 else
1755 *vlan_id = LPFC_FCOE_NULL_VID;
1756
1757 return 1;
1758 }
1759
1760 return 0;
1761}
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772int
1773lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1774{
1775
1776
1777
1778
1779 if ((phba->link_state >= LPFC_LINK_UP) &&
1780 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1781 return 0;
1782
1783 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1784 "2768 Pending link or FCF event during current "
1785 "handling of the previous event: link_state:x%x, "
1786 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1787 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1788 phba->fcoe_eventtag);
1789
1790 spin_lock_irq(&phba->hbalock);
1791 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1792 spin_unlock_irq(&phba->hbalock);
1793
1794 if (phba->link_state >= LPFC_LINK_UP) {
1795 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1796 "2780 Restart FCF table scan due to "
1797 "pending FCF event:evt_tag_at_scan:x%x, "
1798 "evt_tag_current:x%x\n",
1799 phba->fcoe_eventtag_at_fcf_scan,
1800 phba->fcoe_eventtag);
1801 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1802 } else {
1803
1804
1805
1806
1807 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1808 "2833 Stop FCF discovery process due to link "
1809 "state change (x%x)\n", phba->link_state);
1810 spin_lock_irq(&phba->hbalock);
1811 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1812 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1813 spin_unlock_irq(&phba->hbalock);
1814 }
1815
1816
1817 if (unreg_fcf) {
1818 spin_lock_irq(&phba->hbalock);
1819 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1820 spin_unlock_irq(&phba->hbalock);
1821 lpfc_sli4_unregister_fcf(phba);
1822 }
1823 return 1;
1824}
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841static bool
1842lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1843{
1844 uint32_t rand_num;
1845
1846
1847 rand_num = 0xFFFF & prandom_u32();
1848
1849
1850 if ((fcf_cnt * rand_num) < 0xFFFF)
1851 return true;
1852 else
1853 return false;
1854}
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869static struct fcf_record *
1870lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1871 uint16_t *next_fcf_index)
1872{
1873 void *virt_addr;
1874 struct lpfc_mbx_sge sge;
1875 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1876 uint32_t shdr_status, shdr_add_status, if_type;
1877 union lpfc_sli4_cfg_shdr *shdr;
1878 struct fcf_record *new_fcf_record;
1879
1880
1881
1882
1883 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1884 if (unlikely(!mboxq->sge_array)) {
1885 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1886 "2524 Failed to get the non-embedded SGE "
1887 "virtual address\n");
1888 return NULL;
1889 }
1890 virt_addr = mboxq->sge_array->addr[0];
1891
1892 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1893 lpfc_sli_pcimem_bcopy(shdr, shdr,
1894 sizeof(union lpfc_sli4_cfg_shdr));
1895 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1896 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1897 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1898 if (shdr_status || shdr_add_status) {
1899 if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
1900 if_type == LPFC_SLI_INTF_IF_TYPE_2)
1901 lpfc_printf_log(phba, KERN_ERR,
1902 LOG_TRACE_EVENT,
1903 "2726 READ_FCF_RECORD Indicates empty "
1904 "FCF table.\n");
1905 else
1906 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1907 "2521 READ_FCF_RECORD mailbox failed "
1908 "with status x%x add_status x%x, "
1909 "mbx\n", shdr_status, shdr_add_status);
1910 return NULL;
1911 }
1912
1913
1914 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1915 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1916 sizeof(struct lpfc_mbx_read_fcf_tbl));
1917 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1918 new_fcf_record = (struct fcf_record *)(virt_addr +
1919 sizeof(struct lpfc_mbx_read_fcf_tbl));
1920 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1921 offsetof(struct fcf_record, vlan_bitmap));
1922 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1923 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
1924
1925 return new_fcf_record;
1926}
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938static void
1939lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1940 struct fcf_record *fcf_record,
1941 uint16_t vlan_id,
1942 uint16_t next_fcf_index)
1943{
1944 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1945 "2764 READ_FCF_RECORD:\n"
1946 "\tFCF_Index : x%x\n"
1947 "\tFCF_Avail : x%x\n"
1948 "\tFCF_Valid : x%x\n"
1949 "\tFCF_SOL : x%x\n"
1950 "\tFIP_Priority : x%x\n"
1951 "\tMAC_Provider : x%x\n"
1952 "\tLowest VLANID : x%x\n"
1953 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1954 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1955 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1956 "\tNext_FCF_Index: x%x\n",
1957 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1958 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1959 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1960 bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
1961 fcf_record->fip_priority,
1962 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1963 vlan_id,
1964 bf_get(lpfc_fcf_record_mac_0, fcf_record),
1965 bf_get(lpfc_fcf_record_mac_1, fcf_record),
1966 bf_get(lpfc_fcf_record_mac_2, fcf_record),
1967 bf_get(lpfc_fcf_record_mac_3, fcf_record),
1968 bf_get(lpfc_fcf_record_mac_4, fcf_record),
1969 bf_get(lpfc_fcf_record_mac_5, fcf_record),
1970 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1971 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1972 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1973 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1974 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1975 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1976 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1977 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1978 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1979 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1980 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1981 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1982 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1983 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1984 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1985 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1986 next_fcf_index);
1987}
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002static bool
2003lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
2004 struct lpfc_fcf_rec *fcf_rec,
2005 struct fcf_record *new_fcf_record,
2006 uint16_t new_vlan_id)
2007{
2008 if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
2009 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
2010 return false;
2011 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
2012 return false;
2013 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
2014 return false;
2015 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
2016 return false;
2017 if (fcf_rec->priority != new_fcf_record->fip_priority)
2018 return false;
2019 return true;
2020}
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
2034{
2035 struct lpfc_hba *phba = vport->phba;
2036 int rc;
2037
2038 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
2039 spin_lock_irq(&phba->hbalock);
2040 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2041 spin_unlock_irq(&phba->hbalock);
2042 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2043 "2872 Devloss tmo with no eligible "
2044 "FCF, unregister in-use FCF (x%x) "
2045 "and rescan FCF table\n",
2046 phba->fcf.current_rec.fcf_indx);
2047 lpfc_unregister_fcf_rescan(phba);
2048 goto stop_flogi_current_fcf;
2049 }
2050
2051 phba->hba_flag &= ~FCF_RR_INPROG;
2052
2053 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2054 spin_unlock_irq(&phba->hbalock);
2055 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2056 "2865 No FCF available, stop roundrobin FCF "
2057 "failover and change port state:x%x/x%x\n",
2058 phba->pport->port_state, LPFC_VPORT_UNKNOWN);
2059 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2060
2061 if (!phba->fcf.fcf_redisc_attempted) {
2062 lpfc_unregister_fcf(phba);
2063
2064 rc = lpfc_sli4_redisc_fcf_table(phba);
2065 if (!rc) {
2066 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2067 "3195 Rediscover FCF table\n");
2068 phba->fcf.fcf_redisc_attempted = 1;
2069 lpfc_sli4_clear_fcf_rr_bmask(phba);
2070 } else {
2071 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2072 "3196 Rediscover FCF table "
2073 "failed. Status:x%x\n", rc);
2074 }
2075 } else {
2076 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2077 "3197 Already rediscover FCF table "
2078 "attempted. No more retry\n");
2079 }
2080 goto stop_flogi_current_fcf;
2081 } else {
2082 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
2083 "2794 Try FLOGI roundrobin FCF failover to "
2084 "(x%x)\n", fcf_index);
2085 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
2086 if (rc)
2087 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
2088 "2761 FLOGI roundrobin FCF failover "
2089 "failed (rc:x%x) to read FCF (x%x)\n",
2090 rc, phba->fcf.current_rec.fcf_indx);
2091 else
2092 goto stop_flogi_current_fcf;
2093 }
2094 return 0;
2095
2096stop_flogi_current_fcf:
2097 lpfc_can_disctmo(vport);
2098 return 1;
2099}
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
2111 uint16_t fcf_index)
2112{
2113 struct lpfc_fcf_pri *new_fcf_pri;
2114
2115 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2116 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2117 "3058 deleting idx x%x pri x%x flg x%x\n",
2118 fcf_index, new_fcf_pri->fcf_rec.priority,
2119 new_fcf_pri->fcf_rec.flag);
2120 spin_lock_irq(&phba->hbalock);
2121 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
2122 if (phba->fcf.current_rec.priority ==
2123 new_fcf_pri->fcf_rec.priority)
2124 phba->fcf.eligible_fcf_cnt--;
2125 list_del_init(&new_fcf_pri->list);
2126 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2127 }
2128 spin_unlock_irq(&phba->hbalock);
2129}
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141void
2142lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2143{
2144 struct lpfc_fcf_pri *new_fcf_pri;
2145 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2146 spin_lock_irq(&phba->hbalock);
2147 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2148 spin_unlock_irq(&phba->hbalock);
2149}
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
2168 uint16_t fcf_index,
2169 struct fcf_record *new_fcf_record)
2170{
2171 uint16_t current_fcf_pri;
2172 uint16_t last_index;
2173 struct lpfc_fcf_pri *fcf_pri;
2174 struct lpfc_fcf_pri *next_fcf_pri;
2175 struct lpfc_fcf_pri *new_fcf_pri;
2176 int ret;
2177
2178 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2179 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2180 "3059 adding idx x%x pri x%x flg x%x\n",
2181 fcf_index, new_fcf_record->fip_priority,
2182 new_fcf_pri->fcf_rec.flag);
2183 spin_lock_irq(&phba->hbalock);
2184 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2185 list_del_init(&new_fcf_pri->list);
2186 new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2187 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2188 if (list_empty(&phba->fcf.fcf_pri_list)) {
2189 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2190 ret = lpfc_sli4_fcf_rr_index_set(phba,
2191 new_fcf_pri->fcf_rec.fcf_index);
2192 goto out;
2193 }
2194
2195 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2196 LPFC_SLI4_FCF_TBL_INDX_MAX);
2197 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2198 ret = 0;
2199 goto out;
2200 }
2201 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2202 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2203 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2204 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2205 memset(phba->fcf.fcf_rr_bmask, 0,
2206 sizeof(*phba->fcf.fcf_rr_bmask));
2207
2208 phba->fcf.eligible_fcf_cnt = 1;
2209 } else
2210
2211 phba->fcf.eligible_fcf_cnt++;
2212 ret = lpfc_sli4_fcf_rr_index_set(phba,
2213 new_fcf_pri->fcf_rec.fcf_index);
2214 goto out;
2215 }
2216
2217 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2218 &phba->fcf.fcf_pri_list, list) {
2219 if (new_fcf_pri->fcf_rec.priority <=
2220 fcf_pri->fcf_rec.priority) {
2221 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2222 list_add(&new_fcf_pri->list,
2223 &phba->fcf.fcf_pri_list);
2224 else
2225 list_add(&new_fcf_pri->list,
2226 &((struct lpfc_fcf_pri *)
2227 fcf_pri->list.prev)->list);
2228 ret = 0;
2229 goto out;
2230 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2231 || new_fcf_pri->fcf_rec.priority <
2232 next_fcf_pri->fcf_rec.priority) {
2233 list_add(&new_fcf_pri->list, &fcf_pri->list);
2234 ret = 0;
2235 goto out;
2236 }
2237 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2238 continue;
2239
2240 }
2241 ret = 1;
2242out:
2243
2244 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2245 spin_unlock_irq(&phba->hbalock);
2246 return ret;
2247}
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264void
2265lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2266{
2267 struct fcf_record *new_fcf_record;
2268 uint32_t boot_flag, addr_mode;
2269 uint16_t fcf_index, next_fcf_index;
2270 struct lpfc_fcf_rec *fcf_rec = NULL;
2271 uint16_t vlan_id = LPFC_FCOE_NULL_VID;
2272 bool select_new_fcf;
2273 int rc;
2274
2275
2276 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
2277 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2278 return;
2279 }
2280
2281
2282 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2283 &next_fcf_index);
2284 if (!new_fcf_record) {
2285 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2286 "2765 Mailbox command READ_FCF_RECORD "
2287 "failed to retrieve a FCF record.\n");
2288
2289 spin_lock_irq(&phba->hbalock);
2290 phba->hba_flag &= ~FCF_TS_INPROG;
2291 spin_unlock_irq(&phba->hbalock);
2292 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2293 return;
2294 }
2295
2296
2297 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2298 &addr_mode, &vlan_id);
2299
2300
2301 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2302 next_fcf_index);
2303
2304
2305
2306
2307
2308
2309 if (!rc) {
2310 lpfc_sli4_fcf_pri_list_del(phba,
2311 bf_get(lpfc_fcf_record_fcf_index,
2312 new_fcf_record));
2313 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2314 "2781 FCF (x%x) failed connection "
2315 "list check: (x%x/x%x/%x)\n",
2316 bf_get(lpfc_fcf_record_fcf_index,
2317 new_fcf_record),
2318 bf_get(lpfc_fcf_record_fcf_avail,
2319 new_fcf_record),
2320 bf_get(lpfc_fcf_record_fcf_valid,
2321 new_fcf_record),
2322 bf_get(lpfc_fcf_record_fcf_sol,
2323 new_fcf_record));
2324 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2325 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2326 new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
2327 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2328 phba->fcf.current_rec.fcf_indx) {
2329 lpfc_printf_log(phba, KERN_ERR,
2330 LOG_TRACE_EVENT,
2331 "2862 FCF (x%x) matches property "
2332 "of in-use FCF (x%x)\n",
2333 bf_get(lpfc_fcf_record_fcf_index,
2334 new_fcf_record),
2335 phba->fcf.current_rec.fcf_indx);
2336 goto read_next_fcf;
2337 }
2338
2339
2340
2341
2342
2343
2344 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2345 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2346 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2347 "2835 Invalid in-use FCF "
2348 "(x%x), enter FCF failover "
2349 "table scan.\n",
2350 phba->fcf.current_rec.fcf_indx);
2351 spin_lock_irq(&phba->hbalock);
2352 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2353 spin_unlock_irq(&phba->hbalock);
2354 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2355 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2356 LPFC_FCOE_FCF_GET_FIRST);
2357 return;
2358 }
2359 }
2360 goto read_next_fcf;
2361 } else {
2362 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2363 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2364 new_fcf_record);
2365 if (rc)
2366 goto read_next_fcf;
2367 }
2368
2369
2370
2371
2372
2373
2374
2375 spin_lock_irq(&phba->hbalock);
2376 if (phba->fcf.fcf_flag & FCF_IN_USE) {
2377 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2378 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2379 new_fcf_record, vlan_id)) {
2380 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2381 phba->fcf.current_rec.fcf_indx) {
2382 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2383 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2384
2385 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2386 phba);
2387 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2388
2389 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2390 spin_unlock_irq(&phba->hbalock);
2391 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2392 "2836 New FCF matches in-use "
2393 "FCF (x%x), port_state:x%x, "
2394 "fc_flag:x%x\n",
2395 phba->fcf.current_rec.fcf_indx,
2396 phba->pport->port_state,
2397 phba->pport->fc_flag);
2398 goto out;
2399 } else
2400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2401 "2863 New FCF (x%x) matches "
2402 "property of in-use FCF (x%x)\n",
2403 bf_get(lpfc_fcf_record_fcf_index,
2404 new_fcf_record),
2405 phba->fcf.current_rec.fcf_indx);
2406 }
2407
2408
2409
2410
2411
2412
2413
2414 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2415 spin_unlock_irq(&phba->hbalock);
2416 goto read_next_fcf;
2417 }
2418 }
2419
2420
2421
2422
2423 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2424 fcf_rec = &phba->fcf.failover_rec;
2425 else
2426 fcf_rec = &phba->fcf.current_rec;
2427
2428 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2429
2430
2431
2432
2433
2434 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2435
2436 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2437 "2837 Update current FCF record "
2438 "(x%x) with new FCF record (x%x)\n",
2439 fcf_rec->fcf_indx,
2440 bf_get(lpfc_fcf_record_fcf_index,
2441 new_fcf_record));
2442 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2443 addr_mode, vlan_id, BOOT_ENABLE);
2444 spin_unlock_irq(&phba->hbalock);
2445 goto read_next_fcf;
2446 }
2447
2448
2449
2450
2451
2452 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
2453 spin_unlock_irq(&phba->hbalock);
2454 goto read_next_fcf;
2455 }
2456
2457
2458
2459
2460 if (new_fcf_record->fip_priority < fcf_rec->priority) {
2461
2462 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2463 "2838 Update current FCF record "
2464 "(x%x) with new FCF record (x%x)\n",
2465 fcf_rec->fcf_indx,
2466 bf_get(lpfc_fcf_record_fcf_index,
2467 new_fcf_record));
2468 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2469 addr_mode, vlan_id, 0);
2470
2471 phba->fcf.eligible_fcf_cnt = 1;
2472 } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2473
2474 phba->fcf.eligible_fcf_cnt++;
2475 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2476 phba->fcf.eligible_fcf_cnt);
2477 if (select_new_fcf) {
2478 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2479 "2839 Update current FCF record "
2480 "(x%x) with new FCF record (x%x)\n",
2481 fcf_rec->fcf_indx,
2482 bf_get(lpfc_fcf_record_fcf_index,
2483 new_fcf_record));
2484
2485 __lpfc_update_fcf_record(phba, fcf_rec,
2486 new_fcf_record,
2487 addr_mode, vlan_id, 0);
2488 }
2489 }
2490 spin_unlock_irq(&phba->hbalock);
2491 goto read_next_fcf;
2492 }
2493
2494
2495
2496
2497 if (fcf_rec) {
2498 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2499 "2840 Update initial FCF candidate "
2500 "with FCF (x%x)\n",
2501 bf_get(lpfc_fcf_record_fcf_index,
2502 new_fcf_record));
2503 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2504 addr_mode, vlan_id, (boot_flag ?
2505 BOOT_ENABLE : 0));
2506 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2507
2508 phba->fcf.eligible_fcf_cnt = 1;
2509 }
2510 spin_unlock_irq(&phba->hbalock);
2511 goto read_next_fcf;
2512
2513read_next_fcf:
2514 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2515 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2516 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2517
2518
2519
2520
2521
2522
2523
2524
2525 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2526 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2527 "2782 No suitable FCF found: "
2528 "(x%x/x%x)\n",
2529 phba->fcoe_eventtag_at_fcf_scan,
2530 bf_get(lpfc_fcf_record_fcf_index,
2531 new_fcf_record));
2532 spin_lock_irq(&phba->hbalock);
2533 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2534 phba->hba_flag &= ~FCF_TS_INPROG;
2535 spin_unlock_irq(&phba->hbalock);
2536
2537 lpfc_printf_log(phba, KERN_INFO,
2538 LOG_FIP,
2539 "2864 On devloss tmo "
2540 "unreg in-use FCF and "
2541 "rescan FCF table\n");
2542 lpfc_unregister_fcf_rescan(phba);
2543 return;
2544 }
2545
2546
2547
2548 phba->hba_flag &= ~FCF_TS_INPROG;
2549 spin_unlock_irq(&phba->hbalock);
2550 return;
2551 }
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562 lpfc_unregister_fcf(phba);
2563
2564
2565 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2566 "2842 Replace in-use FCF (x%x) "
2567 "with failover FCF (x%x)\n",
2568 phba->fcf.current_rec.fcf_indx,
2569 phba->fcf.failover_rec.fcf_indx);
2570 memcpy(&phba->fcf.current_rec,
2571 &phba->fcf.failover_rec,
2572 sizeof(struct lpfc_fcf_rec));
2573
2574
2575
2576
2577
2578 spin_lock_irq(&phba->hbalock);
2579 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2580 spin_unlock_irq(&phba->hbalock);
2581
2582 lpfc_register_fcf(phba);
2583 } else {
2584
2585
2586
2587
2588 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2589 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2590 return;
2591
2592 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2593 phba->fcf.fcf_flag & FCF_IN_USE) {
2594
2595
2596
2597
2598
2599
2600 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2601 "2841 In-use FCF record (x%x) "
2602 "not reported, entering fast "
2603 "FCF failover mode scanning.\n",
2604 phba->fcf.current_rec.fcf_indx);
2605 spin_lock_irq(&phba->hbalock);
2606 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2607 spin_unlock_irq(&phba->hbalock);
2608 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2609 LPFC_FCOE_FCF_GET_FIRST);
2610 return;
2611 }
2612
2613 lpfc_register_fcf(phba);
2614 }
2615 } else
2616 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
2617 return;
2618
2619out:
2620 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2621 lpfc_register_fcf(phba);
2622
2623 return;
2624}
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641void
2642lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2643{
2644 struct fcf_record *new_fcf_record;
2645 uint32_t boot_flag, addr_mode;
2646 uint16_t next_fcf_index, fcf_index;
2647 uint16_t current_fcf_index;
2648 uint16_t vlan_id;
2649 int rc;
2650
2651
2652 if (phba->link_state < LPFC_LINK_UP) {
2653 spin_lock_irq(&phba->hbalock);
2654 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2655 phba->hba_flag &= ~FCF_RR_INPROG;
2656 spin_unlock_irq(&phba->hbalock);
2657 goto out;
2658 }
2659
2660
2661 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2662 &next_fcf_index);
2663 if (!new_fcf_record) {
2664 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2665 "2766 Mailbox command READ_FCF_RECORD "
2666 "failed to retrieve a FCF record. "
2667 "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
2668 phba->fcf.fcf_flag);
2669 lpfc_unregister_fcf_rescan(phba);
2670 goto out;
2671 }
2672
2673
2674 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2675 &addr_mode, &vlan_id);
2676
2677
2678 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2679 next_fcf_index);
2680
2681 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2682 if (!rc) {
2683 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2684 "2848 Remove ineligible FCF (x%x) from "
2685 "from roundrobin bmask\n", fcf_index);
2686
2687 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2688
2689 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2690 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2691 if (rc)
2692 goto out;
2693 goto error_out;
2694 }
2695
2696 if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2697 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2698 "2760 Perform FLOGI roundrobin FCF failover: "
2699 "FCF (x%x) back to FCF (x%x)\n",
2700 phba->fcf.current_rec.fcf_indx, fcf_index);
2701
2702 msleep(500);
2703 lpfc_issue_init_vfi(phba->pport);
2704 goto out;
2705 }
2706
2707
2708 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2709 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2710 phba->fcf.failover_rec.fcf_indx, fcf_index);
2711 spin_lock_irq(&phba->hbalock);
2712 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2713 new_fcf_record, addr_mode, vlan_id,
2714 (boot_flag ? BOOT_ENABLE : 0));
2715 spin_unlock_irq(&phba->hbalock);
2716
2717 current_fcf_index = phba->fcf.current_rec.fcf_indx;
2718
2719
2720 lpfc_unregister_fcf(phba);
2721
2722
2723 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
2724 sizeof(struct lpfc_fcf_rec));
2725
2726 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2727 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2728 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2729
2730error_out:
2731 lpfc_register_fcf(phba);
2732out:
2733 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2734}
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747void
2748lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2749{
2750 struct fcf_record *new_fcf_record;
2751 uint32_t boot_flag, addr_mode;
2752 uint16_t fcf_index, next_fcf_index;
2753 uint16_t vlan_id;
2754 int rc;
2755
2756
2757 if (phba->link_state < LPFC_LINK_UP)
2758 goto out;
2759
2760
2761 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
2762 goto out;
2763
2764
2765 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2766 &next_fcf_index);
2767 if (!new_fcf_record) {
2768 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2769 "2767 Mailbox command READ_FCF_RECORD "
2770 "failed to retrieve a FCF record.\n");
2771 goto out;
2772 }
2773
2774
2775 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2776 &addr_mode, &vlan_id);
2777
2778
2779 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2780 next_fcf_index);
2781
2782 if (!rc)
2783 goto out;
2784
2785
2786 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2787
2788 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2789
2790out:
2791 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2792}
2793
2794
2795
2796
2797
2798
2799
2800
2801static void
2802lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2803{
2804 struct lpfc_vport *vport = mboxq->vport;
2805
2806
2807
2808
2809
2810 if (mboxq->u.mb.mbxStatus &&
2811 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2812 LPFC_SLI_INTF_IF_TYPE_0) &&
2813 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2814 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2815 "2891 Init VFI mailbox failed 0x%x\n",
2816 mboxq->u.mb.mbxStatus);
2817 mempool_free(mboxq, phba->mbox_mem_pool);
2818 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2819 return;
2820 }
2821
2822 lpfc_initial_flogi(vport);
2823 mempool_free(mboxq, phba->mbox_mem_pool);
2824 return;
2825}
2826
2827
2828
2829
2830
2831
2832
2833
2834void
2835lpfc_issue_init_vfi(struct lpfc_vport *vport)
2836{
2837 LPFC_MBOXQ_t *mboxq;
2838 int rc;
2839 struct lpfc_hba *phba = vport->phba;
2840
2841 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2842 if (!mboxq) {
2843 lpfc_printf_vlog(vport, KERN_ERR,
2844 LOG_TRACE_EVENT, "2892 Failed to allocate "
2845 "init_vfi mailbox\n");
2846 return;
2847 }
2848 lpfc_init_vfi(mboxq, vport);
2849 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
2850 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
2851 if (rc == MBX_NOT_FINISHED) {
2852 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2853 "2893 Failed to issue init_vfi mailbox\n");
2854 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2855 }
2856}
2857
2858
2859
2860
2861
2862
2863
2864
2865void
2866lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2867{
2868 struct lpfc_vport *vport = mboxq->vport;
2869 struct lpfc_nodelist *ndlp;
2870 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2871
2872 if (mboxq->u.mb.mbxStatus) {
2873 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2874 "2609 Init VPI mailbox failed 0x%x\n",
2875 mboxq->u.mb.mbxStatus);
2876 mempool_free(mboxq, phba->mbox_mem_pool);
2877 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2878 return;
2879 }
2880 spin_lock_irq(shost->host_lock);
2881 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2882 spin_unlock_irq(shost->host_lock);
2883
2884
2885 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2886 ndlp = lpfc_findnode_did(vport, Fabric_DID);
2887 if (!ndlp)
2888 lpfc_printf_vlog(vport, KERN_ERR,
2889 LOG_TRACE_EVENT,
2890 "2731 Cannot find fabric "
2891 "controller node\n");
2892 else
2893 lpfc_register_new_vport(phba, vport, ndlp);
2894 mempool_free(mboxq, phba->mbox_mem_pool);
2895 return;
2896 }
2897
2898 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2899 lpfc_initial_fdisc(vport);
2900 else {
2901 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
2902 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2903 "2606 No NPIV Fabric support\n");
2904 }
2905 mempool_free(mboxq, phba->mbox_mem_pool);
2906 return;
2907}
2908
2909
2910
2911
2912
2913
2914
2915
2916void
2917lpfc_issue_init_vpi(struct lpfc_vport *vport)
2918{
2919 LPFC_MBOXQ_t *mboxq;
2920 int rc, vpi;
2921
2922 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
2923 vpi = lpfc_alloc_vpi(vport->phba);
2924 if (!vpi) {
2925 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2926 "3303 Failed to obtain vport vpi\n");
2927 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2928 return;
2929 }
2930 vport->vpi = vpi;
2931 }
2932
2933 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2934 if (!mboxq) {
2935 lpfc_printf_vlog(vport, KERN_ERR,
2936 LOG_TRACE_EVENT, "2607 Failed to allocate "
2937 "init_vpi mailbox\n");
2938 return;
2939 }
2940 lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2941 mboxq->vport = vport;
2942 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2943 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2944 if (rc == MBX_NOT_FINISHED) {
2945 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2946 "2608 Failed to issue init_vpi mailbox\n");
2947 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2948 }
2949}
2950
2951
2952
2953
2954
2955
2956
2957
2958void
2959lpfc_start_fdiscs(struct lpfc_hba *phba)
2960{
2961 struct lpfc_vport **vports;
2962 int i;
2963
2964 vports = lpfc_create_vport_work_array(phba);
2965 if (vports != NULL) {
2966 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2967 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
2968 continue;
2969
2970 if (vports[i]->vpi > phba->max_vpi) {
2971 lpfc_vport_set_state(vports[i],
2972 FC_VPORT_FAILED);
2973 continue;
2974 }
2975 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2976 lpfc_vport_set_state(vports[i],
2977 FC_VPORT_LINKDOWN);
2978 continue;
2979 }
2980 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
2981 lpfc_issue_init_vpi(vports[i]);
2982 continue;
2983 }
2984 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2985 lpfc_initial_fdisc(vports[i]);
2986 else {
2987 lpfc_vport_set_state(vports[i],
2988 FC_VPORT_NO_FABRIC_SUPP);
2989 lpfc_printf_vlog(vports[i], KERN_ERR,
2990 LOG_TRACE_EVENT,
2991 "0259 No NPIV "
2992 "Fabric support\n");
2993 }
2994 }
2995 }
2996 lpfc_destroy_vport_work_array(phba, vports);
2997}
2998
2999void
3000lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3001{
3002 struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
3003 struct lpfc_vport *vport = mboxq->vport;
3004 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3005
3006
3007
3008
3009
3010 if (mboxq->u.mb.mbxStatus &&
3011 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3012 LPFC_SLI_INTF_IF_TYPE_0) &&
3013 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
3014 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3015 "2018 REG_VFI mbxStatus error x%x "
3016 "HBA state x%x\n",
3017 mboxq->u.mb.mbxStatus, vport->port_state);
3018 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3019
3020 lpfc_disc_list_loopmap(vport);
3021
3022 lpfc_disc_start(vport);
3023 goto out_free_mem;
3024 }
3025 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3026 goto out_free_mem;
3027 }
3028
3029
3030
3031
3032
3033 if (vport->fc_flag & FC_VFI_REGISTERED)
3034 if (!(phba->sli_rev == LPFC_SLI_REV4 &&
3035 vport->fc_flag & FC_PT2PT))
3036 goto out_free_mem;
3037
3038
3039 spin_lock_irq(shost->host_lock);
3040 vport->vpi_state |= LPFC_VPI_REGISTERED;
3041 vport->fc_flag |= FC_VFI_REGISTERED;
3042 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3043 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
3044 spin_unlock_irq(shost->host_lock);
3045
3046
3047 if ((phba->sli_rev == LPFC_SLI_REV4) &&
3048 (phba->link_flag & LS_LOOPBACK_MODE)) {
3049 phba->link_state = LPFC_HBA_READY;
3050 goto out_free_mem;
3051 }
3052
3053 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3054 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
3055 "alpacnt:%d LinkState:%x topology:%x\n",
3056 vport->port_state, vport->fc_flag, vport->fc_myDID,
3057 vport->phba->alpa_map[0],
3058 phba->link_state, phba->fc_topology);
3059
3060 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3061
3062
3063
3064
3065 if ((vport->fc_flag & FC_PT2PT) ||
3066 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
3067 !(vport->fc_flag & FC_PUBLIC_LOOP))) {
3068
3069
3070 lpfc_disc_list_loopmap(vport);
3071
3072 if (vport->fc_flag & FC_PT2PT)
3073 vport->port_state = LPFC_VPORT_READY;
3074 else
3075 lpfc_disc_start(vport);
3076 } else {
3077 lpfc_start_fdiscs(phba);
3078 lpfc_do_scr_ns_plogi(phba, vport);
3079 }
3080 }
3081
3082out_free_mem:
3083 mempool_free(mboxq, phba->mbox_mem_pool);
3084 if (dmabuf) {
3085 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
3086 kfree(dmabuf);
3087 }
3088 return;
3089}
3090
3091static void
3092lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3093{
3094 MAILBOX_t *mb = &pmb->u.mb;
3095 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3096 struct lpfc_vport *vport = pmb->vport;
3097 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3098 struct serv_parm *sp = &vport->fc_sparam;
3099 uint32_t ed_tov;
3100
3101
3102 if (mb->mbxStatus) {
3103
3104 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3105 "0319 READ_SPARAM mbxStatus error x%x "
3106 "hba state x%x>\n",
3107 mb->mbxStatus, vport->port_state);
3108 lpfc_linkdown(phba);
3109 goto out;
3110 }
3111
3112 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
3113 sizeof (struct serv_parm));
3114
3115 ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
3116 if (sp->cmn.edtovResolution)
3117 ed_tov = (ed_tov + 999999) / 1000000;
3118
3119 phba->fc_edtov = ed_tov;
3120 phba->fc_ratov = (2 * ed_tov) / 1000;
3121 if (phba->fc_ratov < FF_DEF_RATOV) {
3122
3123 phba->fc_ratov = FF_DEF_RATOV;
3124 }
3125
3126 lpfc_update_vport_wwn(vport);
3127 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3128 if (vport->port_type == LPFC_PHYSICAL_PORT) {
3129 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
3130 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
3131 }
3132
3133 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3134 kfree(mp);
3135 mempool_free(pmb, phba->mbox_mem_pool);
3136
3137
3138
3139
3140 if (phba->hba_flag & HBA_DEFER_FLOGI) {
3141 lpfc_initial_flogi(vport);
3142 phba->hba_flag &= ~HBA_DEFER_FLOGI;
3143 }
3144 return;
3145
3146out:
3147 pmb->ctx_buf = NULL;
3148 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3149 kfree(mp);
3150 lpfc_issue_clear_la(phba, vport);
3151 mempool_free(pmb, phba->mbox_mem_pool);
3152 return;
3153}
3154
3155static void
3156lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3157{
3158 struct lpfc_vport *vport = phba->pport;
3159 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
3160 struct Scsi_Host *shost;
3161 int i;
3162 struct lpfc_dmabuf *mp;
3163 int rc;
3164 struct fcf_record *fcf_record;
3165 uint32_t fc_flags = 0;
3166 unsigned long iflags;
3167
3168 spin_lock_irqsave(&phba->hbalock, iflags);
3169 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
3170
3171 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3172 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
3173 case LPFC_LINK_SPEED_1GHZ:
3174 case LPFC_LINK_SPEED_2GHZ:
3175 case LPFC_LINK_SPEED_4GHZ:
3176 case LPFC_LINK_SPEED_8GHZ:
3177 case LPFC_LINK_SPEED_10GHZ:
3178 case LPFC_LINK_SPEED_16GHZ:
3179 case LPFC_LINK_SPEED_32GHZ:
3180 case LPFC_LINK_SPEED_64GHZ:
3181 case LPFC_LINK_SPEED_128GHZ:
3182 break;
3183 default:
3184 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
3185 break;
3186 }
3187 }
3188
3189 if (phba->fc_topology &&
3190 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3191 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3192 "3314 Toplogy changed was 0x%x is 0x%x\n",
3193 phba->fc_topology,
3194 bf_get(lpfc_mbx_read_top_topology, la));
3195 phba->fc_topology_changed = 1;
3196 }
3197
3198 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3199 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
3200
3201 shost = lpfc_shost_from_vport(vport);
3202 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3203 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
3204
3205
3206
3207
3208 if (phba->cfg_enable_npiv && phba->max_vpi)
3209 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3210 "1309 Link Up Event npiv not supported in loop "
3211 "topology\n");
3212
3213 if (bf_get(lpfc_mbx_read_top_il, la))
3214 fc_flags |= FC_LBIT;
3215
3216 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3217 i = la->lilpBde64.tus.f.bdeSize;
3218
3219 if (i == 0) {
3220 phba->alpa_map[0] = 0;
3221 } else {
3222 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
3223 int numalpa, j, k;
3224 union {
3225 uint8_t pamap[16];
3226 struct {
3227 uint32_t wd1;
3228 uint32_t wd2;
3229 uint32_t wd3;
3230 uint32_t wd4;
3231 } pa;
3232 } un;
3233 numalpa = phba->alpa_map[0];
3234 j = 0;
3235 while (j < numalpa) {
3236 memset(un.pamap, 0, 16);
3237 for (k = 1; j < numalpa; k++) {
3238 un.pamap[k - 1] =
3239 phba->alpa_map[j + 1];
3240 j++;
3241 if (k == 16)
3242 break;
3243 }
3244
3245 lpfc_printf_log(phba,
3246 KERN_WARNING,
3247 LOG_LINK_EVENT,
3248 "1304 Link Up Event "
3249 "ALPA map Data: x%x "
3250 "x%x x%x x%x\n",
3251 un.pa.wd1, un.pa.wd2,
3252 un.pa.wd3, un.pa.wd4);
3253 }
3254 }
3255 }
3256 } else {
3257 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3258 if (phba->max_vpi && phba->cfg_enable_npiv &&
3259 (phba->sli_rev >= LPFC_SLI_REV3))
3260 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3261 }
3262 vport->fc_myDID = phba->fc_pref_DID;
3263 fc_flags |= FC_LBIT;
3264 }
3265 spin_unlock_irqrestore(&phba->hbalock, iflags);
3266
3267 if (fc_flags) {
3268 spin_lock_irqsave(shost->host_lock, iflags);
3269 vport->fc_flag |= fc_flags;
3270 spin_unlock_irqrestore(shost->host_lock, iflags);
3271 }
3272
3273 lpfc_linkup(phba);
3274 sparam_mbox = NULL;
3275
3276 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3277 if (!sparam_mbox)
3278 goto out;
3279
3280 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3281 if (rc) {
3282 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3283 goto out;
3284 }
3285 sparam_mbox->vport = vport;
3286 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3287 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3288 if (rc == MBX_NOT_FINISHED) {
3289 mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
3290 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3291 kfree(mp);
3292 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3293 goto out;
3294 }
3295
3296 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3297 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3298 if (!cfglink_mbox)
3299 goto out;
3300 vport->port_state = LPFC_LOCAL_CFG_LINK;
3301 lpfc_config_link(phba, cfglink_mbox);
3302 cfglink_mbox->vport = vport;
3303 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3304 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3305 if (rc == MBX_NOT_FINISHED) {
3306 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3307 goto out;
3308 }
3309 } else {
3310 vport->port_state = LPFC_VPORT_UNKNOWN;
3311
3312
3313
3314
3315
3316 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
3317 fcf_record = kzalloc(sizeof(struct fcf_record),
3318 GFP_KERNEL);
3319 if (unlikely(!fcf_record)) {
3320 lpfc_printf_log(phba, KERN_ERR,
3321 LOG_TRACE_EVENT,
3322 "2554 Could not allocate memory for "
3323 "fcf record\n");
3324 rc = -ENODEV;
3325 goto out;
3326 }
3327
3328 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3329 LPFC_FCOE_FCF_DEF_INDEX);
3330 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3331 if (unlikely(rc)) {
3332 lpfc_printf_log(phba, KERN_ERR,
3333 LOG_TRACE_EVENT,
3334 "2013 Could not manually add FCF "
3335 "record 0, status %d\n", rc);
3336 rc = -ENODEV;
3337 kfree(fcf_record);
3338 goto out;
3339 }
3340 kfree(fcf_record);
3341 }
3342
3343
3344
3345
3346 spin_lock_irqsave(&phba->hbalock, iflags);
3347 if (phba->hba_flag & FCF_TS_INPROG) {
3348 spin_unlock_irqrestore(&phba->hbalock, iflags);
3349 return;
3350 }
3351
3352 phba->fcf.fcf_flag |= FCF_INIT_DISC;
3353 spin_unlock_irqrestore(&phba->hbalock, iflags);
3354 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3355 "2778 Start FCF table scan at linkup\n");
3356 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3357 LPFC_FCOE_FCF_GET_FIRST);
3358 if (rc) {
3359 spin_lock_irqsave(&phba->hbalock, iflags);
3360 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3361 spin_unlock_irqrestore(&phba->hbalock, iflags);
3362 goto out;
3363 }
3364
3365 lpfc_sli4_clear_fcf_rr_bmask(phba);
3366 }
3367
3368
3369 memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
3370 scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
3371 init_utsname()->nodename);
3372 return;
3373out:
3374 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3375 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3376 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
3377 vport->port_state, sparam_mbox, cfglink_mbox);
3378 lpfc_issue_clear_la(phba, vport);
3379 return;
3380}
3381
3382static void
3383lpfc_enable_la(struct lpfc_hba *phba)
3384{
3385 uint32_t control;
3386 struct lpfc_sli *psli = &phba->sli;
3387 spin_lock_irq(&phba->hbalock);
3388 psli->sli_flag |= LPFC_PROCESS_LA;
3389 if (phba->sli_rev <= LPFC_SLI_REV3) {
3390 control = readl(phba->HCregaddr);
3391 control |= HC_LAINT_ENA;
3392 writel(control, phba->HCregaddr);
3393 readl(phba->HCregaddr);
3394 }
3395 spin_unlock_irq(&phba->hbalock);
3396}
3397
3398static void
3399lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3400{
3401 lpfc_linkdown(phba);
3402 lpfc_enable_la(phba);
3403 lpfc_unregister_unused_fcf(phba);
3404
3405}
3406
3407
3408
3409
3410
3411
3412
3413
3414void
3415lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3416{
3417 struct lpfc_vport *vport = pmb->vport;
3418 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3419 struct lpfc_mbx_read_top *la;
3420 struct lpfc_sli_ring *pring;
3421 MAILBOX_t *mb = &pmb->u.mb;
3422 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3423 uint8_t attn_type;
3424 unsigned long iflags;
3425
3426
3427 pring = lpfc_phba_elsring(phba);
3428 if (pring)
3429 pring->flag &= ~LPFC_STOP_IOCB_EVENT;
3430
3431
3432 if (mb->mbxStatus) {
3433 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3434 "1307 READ_LA mbox error x%x state x%x\n",
3435 mb->mbxStatus, vport->port_state);
3436 lpfc_mbx_issue_link_down(phba);
3437 phba->link_state = LPFC_HBA_ERROR;
3438 goto lpfc_mbx_cmpl_read_topology_free_mbuf;
3439 }
3440
3441 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3442 attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
3443
3444 memcpy(&phba->alpa_map[0], mp->virt, 128);
3445
3446 spin_lock_irqsave(shost->host_lock, iflags);
3447 if (bf_get(lpfc_mbx_read_top_pb, la))
3448 vport->fc_flag |= FC_BYPASSED_MODE;
3449 else
3450 vport->fc_flag &= ~FC_BYPASSED_MODE;
3451 spin_unlock_irqrestore(shost->host_lock, iflags);
3452
3453 if (phba->fc_eventTag <= la->eventTag) {
3454 phba->fc_stat.LinkMultiEvent++;
3455 if (attn_type == LPFC_ATT_LINK_UP)
3456 if (phba->fc_eventTag != 0)
3457 lpfc_linkdown(phba);
3458 }
3459
3460 phba->fc_eventTag = la->eventTag;
3461 if (phba->sli_rev < LPFC_SLI_REV4) {
3462 spin_lock_irqsave(&phba->hbalock, iflags);
3463 if (bf_get(lpfc_mbx_read_top_mm, la))
3464 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3465 else
3466 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3467 spin_unlock_irqrestore(&phba->hbalock, iflags);
3468 }
3469
3470 phba->link_events++;
3471 if ((attn_type == LPFC_ATT_LINK_UP) &&
3472 !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
3473 phba->fc_stat.LinkUp++;
3474 if (phba->link_flag & LS_LOOPBACK_MODE) {
3475 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3476 "1306 Link Up Event in loop back mode "
3477 "x%x received Data: x%x x%x x%x x%x\n",
3478 la->eventTag, phba->fc_eventTag,
3479 bf_get(lpfc_mbx_read_top_alpa_granted,
3480 la),
3481 bf_get(lpfc_mbx_read_top_link_spd, la),
3482 phba->alpa_map[0]);
3483 } else {
3484 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3485 "1303 Link Up Event x%x received "
3486 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3487 la->eventTag, phba->fc_eventTag,
3488 bf_get(lpfc_mbx_read_top_alpa_granted,
3489 la),
3490 bf_get(lpfc_mbx_read_top_link_spd, la),
3491 phba->alpa_map[0],
3492 bf_get(lpfc_mbx_read_top_mm, la),
3493 bf_get(lpfc_mbx_read_top_fa, la),
3494 phba->wait_4_mlo_maint_flg);
3495 }
3496 lpfc_mbx_process_link_up(phba, la);
3497 } else if (attn_type == LPFC_ATT_LINK_DOWN ||
3498 attn_type == LPFC_ATT_UNEXP_WWPN) {
3499 phba->fc_stat.LinkDown++;
3500 if (phba->link_flag & LS_LOOPBACK_MODE)
3501 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3502 "1308 Link Down Event in loop back mode "
3503 "x%x received "
3504 "Data: x%x x%x x%x\n",
3505 la->eventTag, phba->fc_eventTag,
3506 phba->pport->port_state, vport->fc_flag);
3507 else if (attn_type == LPFC_ATT_UNEXP_WWPN)
3508 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3509 "1313 Link Down Unexpected FA WWPN Event x%x "
3510 "received Data: x%x x%x x%x x%x x%x\n",
3511 la->eventTag, phba->fc_eventTag,
3512 phba->pport->port_state, vport->fc_flag,
3513 bf_get(lpfc_mbx_read_top_mm, la),
3514 bf_get(lpfc_mbx_read_top_fa, la));
3515 else
3516 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3517 "1305 Link Down Event x%x received "
3518 "Data: x%x x%x x%x x%x x%x\n",
3519 la->eventTag, phba->fc_eventTag,
3520 phba->pport->port_state, vport->fc_flag,
3521 bf_get(lpfc_mbx_read_top_mm, la),
3522 bf_get(lpfc_mbx_read_top_fa, la));
3523 lpfc_mbx_issue_link_down(phba);
3524 }
3525 if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
3526 attn_type == LPFC_ATT_LINK_UP) {
3527 if (phba->link_state != LPFC_LINK_DOWN) {
3528 phba->fc_stat.LinkDown++;
3529 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3530 "1312 Link Down Event x%x received "
3531 "Data: x%x x%x x%x\n",
3532 la->eventTag, phba->fc_eventTag,
3533 phba->pport->port_state, vport->fc_flag);
3534 lpfc_mbx_issue_link_down(phba);
3535 } else
3536 lpfc_enable_la(phba);
3537
3538 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3539 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3540 "Data: x%x x%x x%x\n",
3541 la->eventTag, phba->fc_eventTag,
3542 phba->pport->port_state, vport->fc_flag);
3543
3544
3545
3546
3547
3548 if (phba->wait_4_mlo_maint_flg) {
3549 phba->wait_4_mlo_maint_flg = 0;
3550 wake_up_interruptible(&phba->wait_4_mlo_m_q);
3551 }
3552 }
3553
3554 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3555 bf_get(lpfc_mbx_read_top_fa, la)) {
3556 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
3557 lpfc_issue_clear_la(phba, vport);
3558 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3559 "1311 fa %d\n",
3560 bf_get(lpfc_mbx_read_top_fa, la));
3561 }
3562
3563lpfc_mbx_cmpl_read_topology_free_mbuf:
3564 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3565 kfree(mp);
3566 mempool_free(pmb, phba->mbox_mem_pool);
3567 return;
3568}
3569
3570
3571
3572
3573
3574
3575
3576void
3577lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3578{
3579 struct lpfc_vport *vport = pmb->vport;
3580 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3581 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3582 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3583
3584 pmb->ctx_buf = NULL;
3585 pmb->ctx_ndlp = NULL;
3586
3587 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3588 "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
3589 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3590 kref_read(&ndlp->kref),
3591 ndlp->nlp_usg_map, ndlp);
3592 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3593 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3594
3595 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3596 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3597
3598
3599
3600
3601
3602
3603
3604
3605 spin_lock_irq(shost->host_lock);
3606 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3607 spin_unlock_irq(shost->host_lock);
3608
3609
3610
3611
3612
3613
3614 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3615 lpfc_unreg_rpi(vport, ndlp);
3616 }
3617
3618
3619 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3620
3621 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3622 kfree(mp);
3623 mempool_free(pmb, phba->mbox_mem_pool);
3624
3625
3626
3627 lpfc_nlp_put(ndlp);
3628
3629 return;
3630}
3631
3632static void
3633lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3634{
3635 MAILBOX_t *mb = &pmb->u.mb;
3636 struct lpfc_vport *vport = pmb->vport;
3637 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3638
3639 switch (mb->mbxStatus) {
3640 case 0x0011:
3641 case 0x0020:
3642 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3643 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3644 mb->mbxStatus);
3645 break;
3646
3647 case 0x9700:
3648 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3649 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3650 vport->vpi, mb->mbxStatus);
3651 if (!(phba->pport->load_flag & FC_UNLOADING))
3652 lpfc_workq_post_event(phba, NULL, NULL,
3653 LPFC_EVT_RESET_HBA);
3654 }
3655 spin_lock_irq(shost->host_lock);
3656 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
3657 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3658 spin_unlock_irq(shost->host_lock);
3659 vport->unreg_vpi_cmpl = VPORT_OK;
3660 mempool_free(pmb, phba->mbox_mem_pool);
3661 lpfc_cleanup_vports_rrqs(vport, NULL);
3662
3663
3664
3665
3666 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
3667 scsi_host_put(shost);
3668}
3669
3670int
3671lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
3672{
3673 struct lpfc_hba *phba = vport->phba;
3674 LPFC_MBOXQ_t *mbox;
3675 int rc;
3676
3677 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3678 if (!mbox)
3679 return 1;
3680
3681 lpfc_unreg_vpi(phba, vport->vpi, mbox);
3682 mbox->vport = vport;
3683 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
3684 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3685 if (rc == MBX_NOT_FINISHED) {
3686 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3687 "1800 Could not issue unreg_vpi\n");
3688 mempool_free(mbox, phba->mbox_mem_pool);
3689 vport->unreg_vpi_cmpl = VPORT_ERROR;
3690 return rc;
3691 }
3692 return 0;
3693}
3694
3695static void
3696lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3697{
3698 struct lpfc_vport *vport = pmb->vport;
3699 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3700 MAILBOX_t *mb = &pmb->u.mb;
3701
3702 switch (mb->mbxStatus) {
3703 case 0x0011:
3704 case 0x9601:
3705 case 0x9602:
3706 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3707 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3708 mb->mbxStatus);
3709 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3710 spin_lock_irq(shost->host_lock);
3711 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3712 spin_unlock_irq(shost->host_lock);
3713 vport->fc_myDID = 0;
3714
3715 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3716 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3717 if (phba->nvmet_support)
3718 lpfc_nvmet_update_targetport(phba);
3719 else
3720 lpfc_nvme_update_localport(vport);
3721 }
3722 goto out;
3723 }
3724
3725 spin_lock_irq(shost->host_lock);
3726 vport->vpi_state |= LPFC_VPI_REGISTERED;
3727 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3728 spin_unlock_irq(shost->host_lock);
3729 vport->num_disc_nodes = 0;
3730
3731 if (vport->fc_npr_cnt)
3732 lpfc_els_disc_plogi(vport);
3733
3734 if (!vport->num_disc_nodes) {
3735 spin_lock_irq(shost->host_lock);
3736 vport->fc_flag &= ~FC_NDISC_ACTIVE;
3737 spin_unlock_irq(shost->host_lock);
3738 lpfc_can_disctmo(vport);
3739 }
3740 vport->port_state = LPFC_VPORT_READY;
3741
3742out:
3743 mempool_free(pmb, phba->mbox_mem_pool);
3744 return;
3745}
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755void
3756lpfc_create_static_vport(struct lpfc_hba *phba)
3757{
3758 LPFC_MBOXQ_t *pmb = NULL;
3759 MAILBOX_t *mb;
3760 struct static_vport_info *vport_info;
3761 int mbx_wait_rc = 0, i;
3762 struct fc_vport_identifiers vport_id;
3763 struct fc_vport *new_fc_vport;
3764 struct Scsi_Host *shost;
3765 struct lpfc_vport *vport;
3766 uint16_t offset = 0;
3767 uint8_t *vport_buff;
3768 struct lpfc_dmabuf *mp;
3769 uint32_t byte_count = 0;
3770
3771 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3772 if (!pmb) {
3773 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3774 "0542 lpfc_create_static_vport failed to"
3775 " allocate mailbox memory\n");
3776 return;
3777 }
3778 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
3779 mb = &pmb->u.mb;
3780
3781 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
3782 if (!vport_info) {
3783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3784 "0543 lpfc_create_static_vport failed to"
3785 " allocate vport_info\n");
3786 mempool_free(pmb, phba->mbox_mem_pool);
3787 return;
3788 }
3789
3790 vport_buff = (uint8_t *) vport_info;
3791 do {
3792
3793 if (pmb->ctx_buf) {
3794 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3795 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3796 kfree(mp);
3797 }
3798 if (lpfc_dump_static_vport(phba, pmb, offset))
3799 goto out;
3800
3801 pmb->vport = phba->pport;
3802 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3803 LPFC_MBOX_TMO);
3804
3805 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
3806 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3807 "0544 lpfc_create_static_vport failed to"
3808 " issue dump mailbox command ret 0x%x "
3809 "status 0x%x\n",
3810 mbx_wait_rc, mb->mbxStatus);
3811 goto out;
3812 }
3813
3814 if (phba->sli_rev == LPFC_SLI_REV4) {
3815 byte_count = pmb->u.mqe.un.mb_words[5];
3816 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3817 if (byte_count > sizeof(struct static_vport_info) -
3818 offset)
3819 byte_count = sizeof(struct static_vport_info)
3820 - offset;
3821 memcpy(vport_buff + offset, mp->virt, byte_count);
3822 offset += byte_count;
3823 } else {
3824 if (mb->un.varDmp.word_cnt >
3825 sizeof(struct static_vport_info) - offset)
3826 mb->un.varDmp.word_cnt =
3827 sizeof(struct static_vport_info)
3828 - offset;
3829 byte_count = mb->un.varDmp.word_cnt;
3830 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
3831 vport_buff + offset,
3832 byte_count);
3833
3834 offset += byte_count;
3835 }
3836
3837 } while (byte_count &&
3838 offset < sizeof(struct static_vport_info));
3839
3840
3841 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
3842 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
3843 != VPORT_INFO_REV)) {
3844 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3845 "0545 lpfc_create_static_vport bad"
3846 " information header 0x%x 0x%x\n",
3847 le32_to_cpu(vport_info->signature),
3848 le32_to_cpu(vport_info->rev) &
3849 VPORT_INFO_REV_MASK);
3850
3851 goto out;
3852 }
3853
3854 shost = lpfc_shost_from_vport(phba->pport);
3855
3856 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
3857 memset(&vport_id, 0, sizeof(vport_id));
3858 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
3859 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
3860 if (!vport_id.port_name || !vport_id.node_name)
3861 continue;
3862
3863 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
3864 vport_id.vport_type = FC_PORTTYPE_NPIV;
3865 vport_id.disable = false;
3866 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
3867
3868 if (!new_fc_vport) {
3869 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3870 "0546 lpfc_create_static_vport failed to"
3871 " create vport\n");
3872 continue;
3873 }
3874
3875 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
3876 vport->vport_flag |= STATIC_VPORT;
3877 }
3878
3879out:
3880 kfree(vport_info);
3881 if (mbx_wait_rc != MBX_TIMEOUT) {
3882 if (pmb->ctx_buf) {
3883 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3884 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3885 kfree(mp);
3886 }
3887 mempool_free(pmb, phba->mbox_mem_pool);
3888 }
3889
3890 return;
3891}
3892
3893
3894
3895
3896
3897
3898
3899void
3900lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3901{
3902 struct lpfc_vport *vport = pmb->vport;
3903 MAILBOX_t *mb = &pmb->u.mb;
3904 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3905 struct lpfc_nodelist *ndlp;
3906 struct Scsi_Host *shost;
3907
3908 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3909 pmb->ctx_ndlp = NULL;
3910 pmb->ctx_buf = NULL;
3911
3912 if (mb->mbxStatus) {
3913 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3914 "0258 Register Fabric login error: 0x%x\n",
3915 mb->mbxStatus);
3916 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3917 kfree(mp);
3918 mempool_free(pmb, phba->mbox_mem_pool);
3919
3920 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3921
3922 lpfc_disc_list_loopmap(vport);
3923
3924
3925 lpfc_disc_start(vport);
3926
3927
3928
3929 lpfc_nlp_put(ndlp);
3930 return;
3931 }
3932
3933 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3934
3935
3936
3937 lpfc_nlp_put(ndlp);
3938 return;
3939 }
3940
3941 if (phba->sli_rev < LPFC_SLI_REV4)
3942 ndlp->nlp_rpi = mb->un.varWords[0];
3943 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3944 ndlp->nlp_type |= NLP_FABRIC;
3945 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3946
3947 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3948
3949
3950 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3951 lpfc_start_fdiscs(phba);
3952 else {
3953 shost = lpfc_shost_from_vport(vport);
3954 spin_lock_irq(shost->host_lock);
3955 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
3956 spin_unlock_irq(shost->host_lock);
3957 }
3958 lpfc_do_scr_ns_plogi(phba, vport);
3959 }
3960
3961 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3962 kfree(mp);
3963 mempool_free(pmb, phba->mbox_mem_pool);
3964
3965
3966
3967
3968 lpfc_nlp_put(ndlp);
3969 return;
3970}
3971
3972
3973
3974
3975
3976int
3977lpfc_issue_gidft(struct lpfc_vport *vport)
3978{
3979
3980 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3981 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
3982 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
3983
3984
3985
3986 lpfc_printf_vlog(vport, KERN_ERR,
3987 LOG_TRACE_EVENT,
3988 "0604 %s FC TYPE %x %s\n",
3989 "Failed to issue GID_FT to ",
3990 FC_TYPE_FCP,
3991 "Finishing discovery.");
3992 return 0;
3993 }
3994 vport->gidft_inp++;
3995 }
3996
3997 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3998 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3999 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
4000
4001
4002
4003 lpfc_printf_vlog(vport, KERN_ERR,
4004 LOG_TRACE_EVENT,
4005 "0605 %s FC_TYPE %x %s %d\n",
4006 "Failed to issue GID_FT to ",
4007 FC_TYPE_NVME,
4008 "Finishing discovery: gidftinp ",
4009 vport->gidft_inp);
4010 if (vport->gidft_inp == 0)
4011 return 0;
4012 } else
4013 vport->gidft_inp++;
4014 }
4015 return vport->gidft_inp;
4016}
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028int
4029lpfc_issue_gidpt(struct lpfc_vport *vport)
4030{
4031
4032 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
4033
4034
4035
4036 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4037 "0606 %s Port TYPE %x %s\n",
4038 "Failed to issue GID_PT to ",
4039 GID_PT_N_PORT,
4040 "Finishing discovery.");
4041 return 0;
4042 }
4043 vport->gidft_inp++;
4044 return 1;
4045}
4046
4047
4048
4049
4050
4051
4052
4053void
4054lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4055{
4056 MAILBOX_t *mb = &pmb->u.mb;
4057 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
4058 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4059 struct lpfc_vport *vport = pmb->vport;
4060
4061 pmb->ctx_buf = NULL;
4062 pmb->ctx_ndlp = NULL;
4063 vport->gidft_inp = 0;
4064
4065 if (mb->mbxStatus) {
4066 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4067 "0260 Register NameServer error: 0x%x\n",
4068 mb->mbxStatus);
4069
4070out:
4071
4072
4073
4074 lpfc_nlp_put(ndlp);
4075 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4076 kfree(mp);
4077 mempool_free(pmb, phba->mbox_mem_pool);
4078
4079
4080 lpfc_nlp_not_used(ndlp);
4081
4082 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4083
4084
4085
4086
4087 lpfc_disc_list_loopmap(vport);
4088
4089
4090 lpfc_disc_start(vport);
4091 return;
4092 }
4093 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4094 return;
4095 }
4096
4097 if (phba->sli_rev < LPFC_SLI_REV4)
4098 ndlp->nlp_rpi = mb->un.varWords[0];
4099 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4100 ndlp->nlp_type |= NLP_FABRIC;
4101 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4102 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
4103 "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
4104 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4105 kref_read(&ndlp->kref),
4106 ndlp->nlp_usg_map, ndlp);
4107
4108 if (vport->port_state < LPFC_VPORT_READY) {
4109
4110 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
4111 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
4112 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
4113 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
4114
4115 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4116 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
4117 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
4118
4119 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4120 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
4121 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
4122 FC_TYPE_NVME);
4123
4124
4125 lpfc_issue_els_scr(vport, 0);
4126
4127 lpfc_issue_els_rdf(vport, 0);
4128 }
4129
4130 vport->fc_ns_retry = 0;
4131 if (lpfc_issue_gidft(vport) == 0)
4132 goto out;
4133
4134
4135
4136
4137
4138
4139
4140
4141 lpfc_nlp_put(ndlp);
4142 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4143 kfree(mp);
4144 mempool_free(pmb, phba->mbox_mem_pool);
4145
4146 return;
4147}
4148
4149static void
4150lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4151{
4152 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4153 struct fc_rport *rport;
4154 struct lpfc_rport_data *rdata;
4155 struct fc_rport_identifiers rport_ids;
4156 struct lpfc_hba *phba = vport->phba;
4157
4158 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4159 return;
4160
4161
4162 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4163 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
4164 rport_ids.port_id = ndlp->nlp_DID;
4165 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4166
4167
4168
4169
4170
4171
4172
4173
4174 rport = ndlp->rport;
4175 if (rport) {
4176 rdata = rport->dd_data;
4177
4178 ndlp->rport = NULL;
4179 if (rdata) {
4180 if (rdata->pnode == ndlp)
4181 lpfc_nlp_put(ndlp);
4182 rdata->pnode = NULL;
4183 }
4184
4185 put_device(&rport->dev);
4186 }
4187
4188 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4189 "rport add: did:x%x flg:x%x type x%x",
4190 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4191
4192
4193 if (vport->load_flag & FC_UNLOADING)
4194 return;
4195
4196 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
4197 if (!rport || !get_device(&rport->dev)) {
4198 dev_printk(KERN_WARNING, &phba->pcidev->dev,
4199 "Warning: fc_remote_port_add failed\n");
4200 return;
4201 }
4202
4203
4204 rport->maxframe_size = ndlp->nlp_maxframe;
4205 rport->supported_classes = ndlp->nlp_class_sup;
4206 rdata = rport->dd_data;
4207 rdata->pnode = lpfc_nlp_get(ndlp);
4208
4209 if (ndlp->nlp_type & NLP_FCP_TARGET)
4210 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
4211 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
4212 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4213 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
4214 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
4215 if (ndlp->nlp_type & NLP_NVME_TARGET)
4216 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
4217 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
4218 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
4219
4220 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
4221 fc_remote_port_rolechg(rport, rport_ids.roles);
4222
4223 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4224 "3183 rport register x%06x, rport x%px role x%x\n",
4225 ndlp->nlp_DID, rport, rport_ids.roles);
4226
4227 if ((rport->scsi_target_id != -1) &&
4228 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
4229 ndlp->nlp_sid = rport->scsi_target_id;
4230 }
4231 return;
4232}
4233
4234static void
4235lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4236{
4237 struct fc_rport *rport = ndlp->rport;
4238 struct lpfc_vport *vport = ndlp->vport;
4239
4240 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4241 return;
4242
4243 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4244 "rport delete: did:x%x flg:x%x type x%x",
4245 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4246
4247 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4248 "3184 rport unregister x%06x, rport x%px\n",
4249 ndlp->nlp_DID, rport);
4250
4251 fc_remote_port_delete(rport);
4252
4253 return;
4254}
4255
4256static void
4257lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
4258{
4259 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4260 unsigned long iflags;
4261
4262 spin_lock_irqsave(shost->host_lock, iflags);
4263 switch (state) {
4264 case NLP_STE_UNUSED_NODE:
4265 vport->fc_unused_cnt += count;
4266 break;
4267 case NLP_STE_PLOGI_ISSUE:
4268 vport->fc_plogi_cnt += count;
4269 break;
4270 case NLP_STE_ADISC_ISSUE:
4271 vport->fc_adisc_cnt += count;
4272 break;
4273 case NLP_STE_REG_LOGIN_ISSUE:
4274 vport->fc_reglogin_cnt += count;
4275 break;
4276 case NLP_STE_PRLI_ISSUE:
4277 vport->fc_prli_cnt += count;
4278 break;
4279 case NLP_STE_UNMAPPED_NODE:
4280 vport->fc_unmap_cnt += count;
4281 break;
4282 case NLP_STE_MAPPED_NODE:
4283 vport->fc_map_cnt += count;
4284 break;
4285 case NLP_STE_NPR_NODE:
4286 if (vport->fc_npr_cnt == 0 && count == -1)
4287 vport->fc_npr_cnt = 0;
4288 else
4289 vport->fc_npr_cnt += count;
4290 break;
4291 }
4292 spin_unlock_irqrestore(shost->host_lock, iflags);
4293}
4294
4295static void
4296lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4297 int old_state, int new_state)
4298{
4299 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4300
4301 if (new_state == NLP_STE_UNMAPPED_NODE) {
4302 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4303 ndlp->nlp_type |= NLP_FC_NODE;
4304 }
4305 if (new_state == NLP_STE_MAPPED_NODE)
4306 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4307 if (new_state == NLP_STE_NPR_NODE)
4308 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4309
4310
4311 if ((old_state == NLP_STE_MAPPED_NODE ||
4312 old_state == NLP_STE_UNMAPPED_NODE)) {
4313 if (ndlp->rport) {
4314 vport->phba->nport_event_cnt++;
4315 lpfc_unregister_remote_port(ndlp);
4316 }
4317
4318 if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4319 vport->phba->nport_event_cnt++;
4320 if (vport->phba->nvmet_support == 0) {
4321
4322 if (ndlp->nlp_type & NLP_NVME_TARGET)
4323 lpfc_nvme_unregister_port(vport, ndlp);
4324 } else {
4325
4326 lpfc_nlp_put(ndlp);
4327 }
4328 }
4329 }
4330
4331
4332
4333 if (new_state == NLP_STE_MAPPED_NODE ||
4334 new_state == NLP_STE_UNMAPPED_NODE) {
4335 if (ndlp->nlp_fc4_type ||
4336 ndlp->nlp_DID == Fabric_DID ||
4337 ndlp->nlp_DID == NameServer_DID ||
4338 ndlp->nlp_DID == FDMI_DID) {
4339 vport->phba->nport_event_cnt++;
4340
4341
4342
4343
4344 lpfc_register_remote_port(vport, ndlp);
4345 }
4346
4347 if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
4348 ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4349 if (vport->phba->nvmet_support == 0) {
4350
4351
4352
4353
4354 if (ndlp->nlp_type & NLP_NVME_TARGET) {
4355 vport->phba->nport_event_cnt++;
4356 lpfc_nvme_register_port(vport, ndlp);
4357 }
4358 } else {
4359
4360
4361
4362 lpfc_nlp_get(ndlp);
4363 }
4364 }
4365 }
4366
4367 if ((new_state == NLP_STE_MAPPED_NODE) &&
4368 (vport->stat_data_enabled)) {
4369
4370
4371
4372
4373 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
4374 sizeof(struct lpfc_scsicmd_bkt),
4375 GFP_KERNEL);
4376
4377 if (!ndlp->lat_data)
4378 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4379 "0286 lpfc_nlp_state_cleanup failed to "
4380 "allocate statistical data buffer DID "
4381 "0x%x\n", ndlp->nlp_DID);
4382 }
4383
4384
4385
4386
4387
4388
4389 if ((new_state == NLP_STE_MAPPED_NODE) &&
4390 (ndlp->nlp_type & NLP_FCP_TARGET) &&
4391 (!ndlp->rport ||
4392 ndlp->rport->scsi_target_id == -1 ||
4393 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4394 spin_lock_irq(shost->host_lock);
4395 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
4396 spin_unlock_irq(shost->host_lock);
4397 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4398 }
4399}
4400
4401static char *
4402lpfc_nlp_state_name(char *buffer, size_t size, int state)
4403{
4404 static char *states[] = {
4405 [NLP_STE_UNUSED_NODE] = "UNUSED",
4406 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
4407 [NLP_STE_ADISC_ISSUE] = "ADISC",
4408 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
4409 [NLP_STE_PRLI_ISSUE] = "PRLI",
4410 [NLP_STE_LOGO_ISSUE] = "LOGO",
4411 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
4412 [NLP_STE_MAPPED_NODE] = "MAPPED",
4413 [NLP_STE_NPR_NODE] = "NPR",
4414 };
4415
4416 if (state < NLP_STE_MAX_STATE && states[state])
4417 strlcpy(buffer, states[state], size);
4418 else
4419 snprintf(buffer, size, "unknown (%d)", state);
4420 return buffer;
4421}
4422
4423void
4424lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4425 int state)
4426{
4427 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4428 int old_state = ndlp->nlp_state;
4429 char name1[16], name2[16];
4430
4431 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4432 "0904 NPort state transition x%06x, %s -> %s\n",
4433 ndlp->nlp_DID,
4434 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4435 lpfc_nlp_state_name(name2, sizeof(name2), state));
4436
4437 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4438 "node statechg did:x%x old:%d ste:%d",
4439 ndlp->nlp_DID, old_state, state);
4440
4441 if (old_state == NLP_STE_NPR_NODE &&
4442 state != NLP_STE_NPR_NODE)
4443 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4444 if (old_state == NLP_STE_UNMAPPED_NODE) {
4445 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4446 ndlp->nlp_type &= ~NLP_FC_NODE;
4447 }
4448
4449 if (list_empty(&ndlp->nlp_listp)) {
4450 spin_lock_irq(shost->host_lock);
4451 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4452 spin_unlock_irq(shost->host_lock);
4453 } else if (old_state)
4454 lpfc_nlp_counters(vport, old_state, -1);
4455
4456 ndlp->nlp_state = state;
4457 lpfc_nlp_counters(vport, state, 1);
4458 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4459}
4460
4461void
4462lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4463{
4464 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4465
4466 if (list_empty(&ndlp->nlp_listp)) {
4467 spin_lock_irq(shost->host_lock);
4468 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4469 spin_unlock_irq(shost->host_lock);
4470 }
4471}
4472
4473void
4474lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4475{
4476 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4477
4478 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4479 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4480 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4481 spin_lock_irq(shost->host_lock);
4482 list_del_init(&ndlp->nlp_listp);
4483 spin_unlock_irq(shost->host_lock);
4484 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4485 NLP_STE_UNUSED_NODE);
4486}
4487
4488static void
4489lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4490{
4491 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4492 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4493 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4494 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4495 NLP_STE_UNUSED_NODE);
4496}
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511static inline void
4512lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4513 uint32_t did)
4514{
4515 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4516 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4517 timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
4518 ndlp->nlp_DID = did;
4519 ndlp->vport = vport;
4520 ndlp->phba = vport->phba;
4521 ndlp->nlp_sid = NLP_NO_SID;
4522 ndlp->nlp_fc4_type = NLP_FC4_NONE;
4523 kref_init(&ndlp->kref);
4524 NLP_INT_NODE_ACT(ndlp);
4525 atomic_set(&ndlp->cmd_pending, 0);
4526 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4527 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4528}
4529
4530struct lpfc_nodelist *
4531lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4532 int state)
4533{
4534 struct lpfc_hba *phba = vport->phba;
4535 uint32_t did, flag;
4536 unsigned long flags;
4537 unsigned long *active_rrqs_xri_bitmap = NULL;
4538 int rpi = LPFC_RPI_ALLOC_ERROR;
4539 uint32_t defer_did = 0;
4540
4541 if (!ndlp)
4542 return NULL;
4543
4544 if (phba->sli_rev == LPFC_SLI_REV4) {
4545 if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
4546 rpi = lpfc_sli4_alloc_rpi(vport->phba);
4547 else
4548 rpi = ndlp->nlp_rpi;
4549
4550 if (rpi == LPFC_RPI_ALLOC_ERROR) {
4551 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4552 "0359 %s: ndlp:x%px "
4553 "usgmap:x%x refcnt:%d FAILED RPI "
4554 " ALLOC\n",
4555 __func__,
4556 (void *)ndlp, ndlp->nlp_usg_map,
4557 kref_read(&ndlp->kref));
4558 return NULL;
4559 }
4560 }
4561
4562 spin_lock_irqsave(&phba->ndlp_lock, flags);
4563
4564 if (NLP_CHK_FREE_REQ(ndlp)) {
4565 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4566 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4567 "0277 %s: ndlp:x%px "
4568 "usgmap:x%x refcnt:%d\n",
4569 __func__, (void *)ndlp, ndlp->nlp_usg_map,
4570 kref_read(&ndlp->kref));
4571 goto free_rpi;
4572 }
4573
4574 if (NLP_CHK_NODE_ACT(ndlp)) {
4575 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4576 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4577 "0278 %s: ndlp:x%px "
4578 "usgmap:x%x refcnt:%d\n",
4579 __func__, (void *)ndlp, ndlp->nlp_usg_map,
4580 kref_read(&ndlp->kref));
4581 goto free_rpi;
4582 }
4583
4584
4585 did = ndlp->nlp_DID;
4586 flag = (ndlp->nlp_flag & NLP_UNREG_INP);
4587 if (flag & NLP_UNREG_INP)
4588 defer_did = ndlp->nlp_defer_did;
4589 if (phba->sli_rev == LPFC_SLI_REV4)
4590 active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
4591
4592
4593 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4594 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
4595
4596
4597 lpfc_initialize_node(vport, ndlp, did);
4598 ndlp->nlp_flag |= flag;
4599 if (flag & NLP_UNREG_INP)
4600 ndlp->nlp_defer_did = defer_did;
4601 if (phba->sli_rev == LPFC_SLI_REV4)
4602 ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
4603
4604 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4605 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4606 ndlp->nlp_rpi = rpi;
4607 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4608 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
4609 "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
4610 ndlp->nlp_flag,
4611 kref_read(&ndlp->kref),
4612 ndlp->nlp_usg_map, ndlp);
4613 }
4614
4615
4616 if (state != NLP_STE_UNUSED_NODE)
4617 lpfc_nlp_set_state(vport, ndlp, state);
4618 else
4619 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4620 "0013 rpi:%x DID:%x flg:%x refcnt:%d "
4621 "map:%x x%px STATE=UNUSED\n",
4622 ndlp->nlp_rpi, ndlp->nlp_DID,
4623 ndlp->nlp_flag,
4624 kref_read(&ndlp->kref),
4625 ndlp->nlp_usg_map, ndlp);
4626
4627 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4628 "node enable: did:x%x",
4629 ndlp->nlp_DID, 0, 0);
4630 return ndlp;
4631
4632free_rpi:
4633 if (phba->sli_rev == LPFC_SLI_REV4) {
4634 lpfc_sli4_free_rpi(vport->phba, rpi);
4635 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4636 }
4637 return NULL;
4638}
4639
4640void
4641lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4642{
4643
4644
4645
4646
4647
4648
4649
4650 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4651 return;
4652 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4653 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4654 lpfc_cleanup_vports_rrqs(vport, ndlp);
4655 lpfc_unreg_rpi(vport, ndlp);
4656 }
4657
4658 lpfc_nlp_put(ndlp);
4659 return;
4660}
4661
4662
4663
4664
4665void
4666lpfc_set_disctmo(struct lpfc_vport *vport)
4667{
4668 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4669 struct lpfc_hba *phba = vport->phba;
4670 uint32_t tmo;
4671
4672 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
4673
4674 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
4675 } else {
4676
4677
4678
4679 tmo = ((phba->fc_ratov * 3) + 3);
4680 }
4681
4682
4683 if (!timer_pending(&vport->fc_disctmo)) {
4684 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4685 "set disc timer: tmo:x%x state:x%x flg:x%x",
4686 tmo, vport->port_state, vport->fc_flag);
4687 }
4688
4689 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
4690 spin_lock_irq(shost->host_lock);
4691 vport->fc_flag |= FC_DISC_TMO;
4692 spin_unlock_irq(shost->host_lock);
4693
4694
4695 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4696 "0247 Start Discovery Timer state x%x "
4697 "Data: x%x x%lx x%x x%x\n",
4698 vport->port_state, tmo,
4699 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
4700 vport->fc_adisc_cnt);
4701
4702 return;
4703}
4704
4705
4706
4707
4708int
4709lpfc_can_disctmo(struct lpfc_vport *vport)
4710{
4711 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4712 unsigned long iflags;
4713
4714 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4715 "can disc timer: state:x%x rtry:x%x flg:x%x",
4716 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
4717
4718
4719 if (vport->fc_flag & FC_DISC_TMO) {
4720 spin_lock_irqsave(shost->host_lock, iflags);
4721 vport->fc_flag &= ~FC_DISC_TMO;
4722 spin_unlock_irqrestore(shost->host_lock, iflags);
4723 del_timer_sync(&vport->fc_disctmo);
4724 spin_lock_irqsave(&vport->work_port_lock, iflags);
4725 vport->work_port_events &= ~WORKER_DISC_TMO;
4726 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
4727 }
4728
4729
4730 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4731 "0248 Cancel Discovery Timer state x%x "
4732 "Data: x%x x%x x%x\n",
4733 vport->port_state, vport->fc_flag,
4734 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
4735 return 0;
4736}
4737
4738
4739
4740
4741
4742int
4743lpfc_check_sli_ndlp(struct lpfc_hba *phba,
4744 struct lpfc_sli_ring *pring,
4745 struct lpfc_iocbq *iocb,
4746 struct lpfc_nodelist *ndlp)
4747{
4748 IOCB_t *icmd = &iocb->iocb;
4749 struct lpfc_vport *vport = ndlp->vport;
4750
4751 if (iocb->vport != vport)
4752 return 0;
4753
4754 if (pring->ringno == LPFC_ELS_RING) {
4755 switch (icmd->ulpCommand) {
4756 case CMD_GEN_REQUEST64_CR:
4757 if (iocb->context_un.ndlp == ndlp)
4758 return 1;
4759 fallthrough;
4760 case CMD_ELS_REQUEST64_CR:
4761 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
4762 return 1;
4763 fallthrough;
4764 case CMD_XMIT_ELS_RSP64_CX:
4765 if (iocb->context1 == (uint8_t *) ndlp)
4766 return 1;
4767 }
4768 } else if (pring->ringno == LPFC_FCP_RING) {
4769
4770 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4771 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
4772 return 0;
4773 }
4774 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
4775 return 1;
4776 }
4777 }
4778 return 0;
4779}
4780
4781static void
4782__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
4783 struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
4784 struct list_head *dequeue_list)
4785{
4786 struct lpfc_iocbq *iocb, *next_iocb;
4787
4788 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
4789
4790 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
4791
4792 list_move_tail(&iocb->list, dequeue_list);
4793 }
4794}
4795
4796static void
4797lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
4798 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4799{
4800 struct lpfc_sli *psli = &phba->sli;
4801 uint32_t i;
4802
4803 spin_lock_irq(&phba->hbalock);
4804 for (i = 0; i < psli->num_rings; i++)
4805 __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
4806 dequeue_list);
4807 spin_unlock_irq(&phba->hbalock);
4808}
4809
4810static void
4811lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
4812 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4813{
4814 struct lpfc_sli_ring *pring;
4815 struct lpfc_queue *qp = NULL;
4816
4817 spin_lock_irq(&phba->hbalock);
4818 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
4819 pring = qp->pring;
4820 if (!pring)
4821 continue;
4822 spin_lock(&pring->ring_lock);
4823 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
4824 spin_unlock(&pring->ring_lock);
4825 }
4826 spin_unlock_irq(&phba->hbalock);
4827}
4828
4829
4830
4831
4832
4833static int
4834lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4835{
4836 LIST_HEAD(completions);
4837
4838 lpfc_fabric_abort_nport(ndlp);
4839
4840
4841
4842
4843
4844 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4845 if (phba->sli_rev != LPFC_SLI_REV4)
4846 lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
4847 else
4848 lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
4849 }
4850
4851
4852 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4853 IOERR_SLI_ABORTED);
4854
4855 return 0;
4856}
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866static void
4867lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4868{
4869 struct lpfc_vport *vport = pmb->vport;
4870 struct lpfc_nodelist *ndlp;
4871
4872 ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
4873 if (!ndlp)
4874 return;
4875 lpfc_issue_els_logo(vport, ndlp, 0);
4876 mempool_free(pmb, phba->mbox_mem_pool);
4877
4878
4879 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
4880 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
4881 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4882 "1434 UNREG cmpl deferred logo x%x "
4883 "on NPort x%x Data: x%x x%px\n",
4884 ndlp->nlp_rpi, ndlp->nlp_DID,
4885 ndlp->nlp_defer_did, ndlp);
4886
4887 ndlp->nlp_flag &= ~NLP_UNREG_INP;
4888 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4889 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4890 } else {
4891 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
4892 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
4893 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
4894 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4895 }
4896 ndlp->nlp_flag &= ~NLP_UNREG_INP;
4897 }
4898}
4899
4900
4901
4902
4903
4904
4905static void
4906lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
4907 struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
4908{
4909 unsigned long iflags;
4910
4911 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4912 mbox->ctx_ndlp = ndlp;
4913 mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4914
4915 } else if (phba->sli_rev == LPFC_SLI_REV4 &&
4916 (!(vport->load_flag & FC_UNLOADING)) &&
4917 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
4918 LPFC_SLI_INTF_IF_TYPE_2) &&
4919 (kref_read(&ndlp->kref) > 0)) {
4920 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
4921 mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
4922 } else {
4923 if (vport->load_flag & FC_UNLOADING) {
4924 if (phba->sli_rev == LPFC_SLI_REV4) {
4925 spin_lock_irqsave(&vport->phba->ndlp_lock,
4926 iflags);
4927 ndlp->nlp_flag |= NLP_RELEASE_RPI;
4928 spin_unlock_irqrestore(&vport->phba->ndlp_lock,
4929 iflags);
4930 }
4931 lpfc_nlp_get(ndlp);
4932 }
4933 mbox->ctx_ndlp = ndlp;
4934 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4935 }
4936}
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947int
4948lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4949{
4950 struct lpfc_hba *phba = vport->phba;
4951 LPFC_MBOXQ_t *mbox;
4952 int rc, acc_plogi = 1;
4953 uint16_t rpi;
4954
4955 if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4956 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
4957 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
4958 lpfc_printf_vlog(vport, KERN_INFO,
4959 LOG_NODE | LOG_DISCOVERY,
4960 "3366 RPI x%x needs to be "
4961 "unregistered nlp_flag x%x "
4962 "did x%x\n",
4963 ndlp->nlp_rpi, ndlp->nlp_flag,
4964 ndlp->nlp_DID);
4965
4966
4967
4968
4969 if (ndlp->nlp_flag & NLP_UNREG_INP) {
4970 lpfc_printf_vlog(vport, KERN_INFO,
4971 LOG_NODE | LOG_DISCOVERY,
4972 "1436 unreg_rpi SKIP UNREG x%x on "
4973 "NPort x%x deferred x%x flg x%x "
4974 "Data: x%px\n",
4975 ndlp->nlp_rpi, ndlp->nlp_DID,
4976 ndlp->nlp_defer_did,
4977 ndlp->nlp_flag, ndlp);
4978 goto out;
4979 }
4980
4981 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4982 if (mbox) {
4983
4984 rpi = ndlp->nlp_rpi;
4985 if (phba->sli_rev == LPFC_SLI_REV4)
4986 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4987
4988 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4989 mbox->vport = vport;
4990 lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
4991 if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
4992
4993
4994
4995 acc_plogi = 0;
4996 if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
4997 Fabric_DID_MASK) &&
4998 (!(vport->fc_flag & FC_OFFLINE_MODE)))
4999 ndlp->nlp_flag |= NLP_UNREG_INP;
5000
5001 lpfc_printf_vlog(vport, KERN_INFO,
5002 LOG_NODE | LOG_DISCOVERY,
5003 "1433 unreg_rpi UNREG x%x on "
5004 "NPort x%x deferred flg x%x "
5005 "Data:x%px\n",
5006 ndlp->nlp_rpi, ndlp->nlp_DID,
5007 ndlp->nlp_flag, ndlp);
5008
5009 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5010 if (rc == MBX_NOT_FINISHED) {
5011 mempool_free(mbox, phba->mbox_mem_pool);
5012 acc_plogi = 1;
5013 }
5014 }
5015 lpfc_no_rpi(phba, ndlp);
5016out:
5017 if (phba->sli_rev != LPFC_SLI_REV4)
5018 ndlp->nlp_rpi = 0;
5019 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
5020 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
5021 if (acc_plogi)
5022 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5023 return 1;
5024 }
5025 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5026 return 0;
5027}
5028
5029
5030
5031
5032
5033
5034
5035
5036void
5037lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
5038{
5039 struct lpfc_vport **vports;
5040 struct lpfc_nodelist *ndlp;
5041 struct Scsi_Host *shost;
5042 int i;
5043
5044 vports = lpfc_create_vport_work_array(phba);
5045 if (!vports) {
5046 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5047 "2884 Vport array allocation failed \n");
5048 return;
5049 }
5050 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5051 shost = lpfc_shost_from_vport(vports[i]);
5052 spin_lock_irq(shost->host_lock);
5053 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5054 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5055
5056 spin_unlock_irq(shost->host_lock);
5057 lpfc_unreg_rpi(vports[i], ndlp);
5058 spin_lock_irq(shost->host_lock);
5059 }
5060 }
5061 spin_unlock_irq(shost->host_lock);
5062 }
5063 lpfc_destroy_vport_work_array(phba, vports);
5064}
5065
5066void
5067lpfc_unreg_all_rpis(struct lpfc_vport *vport)
5068{
5069 struct lpfc_hba *phba = vport->phba;
5070 LPFC_MBOXQ_t *mbox;
5071 int rc;
5072
5073 if (phba->sli_rev == LPFC_SLI_REV4) {
5074 lpfc_sli4_unreg_all_rpis(vport);
5075 return;
5076 }
5077
5078 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5079 if (mbox) {
5080 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
5081 mbox);
5082 mbox->vport = vport;
5083 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5084 mbox->ctx_ndlp = NULL;
5085 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5086 if (rc != MBX_TIMEOUT)
5087 mempool_free(mbox, phba->mbox_mem_pool);
5088
5089 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5090 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5091 "1836 Could not issue "
5092 "unreg_login(all_rpis) status %d\n",
5093 rc);
5094 }
5095}
5096
5097void
5098lpfc_unreg_default_rpis(struct lpfc_vport *vport)
5099{
5100 struct lpfc_hba *phba = vport->phba;
5101 LPFC_MBOXQ_t *mbox;
5102 int rc;
5103
5104
5105 if (phba->sli_rev > LPFC_SLI_REV3)
5106 return;
5107
5108 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5109 if (mbox) {
5110 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
5111 mbox);
5112 mbox->vport = vport;
5113 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5114 mbox->ctx_ndlp = NULL;
5115 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5116 if (rc != MBX_TIMEOUT)
5117 mempool_free(mbox, phba->mbox_mem_pool);
5118
5119 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5120 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5121 "1815 Could not issue "
5122 "unreg_did (default rpis) status %d\n",
5123 rc);
5124 }
5125}
5126
5127
5128
5129
5130
5131static int
5132lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5133{
5134 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5135 struct lpfc_hba *phba = vport->phba;
5136 LPFC_MBOXQ_t *mb, *nextmb;
5137 struct lpfc_dmabuf *mp;
5138 unsigned long iflags;
5139
5140
5141 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5142 "0900 Cleanup node for NPort x%x "
5143 "Data: x%x x%x x%x\n",
5144 ndlp->nlp_DID, ndlp->nlp_flag,
5145 ndlp->nlp_state, ndlp->nlp_rpi);
5146 if (NLP_CHK_FREE_REQ(ndlp)) {
5147 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5148 "0280 %s: ndlp:x%px "
5149 "usgmap:x%x refcnt:%d\n",
5150 __func__, (void *)ndlp, ndlp->nlp_usg_map,
5151 kref_read(&ndlp->kref));
5152 lpfc_dequeue_node(vport, ndlp);
5153 } else {
5154 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5155 "0281 %s: ndlp:x%px "
5156 "usgmap:x%x refcnt:%d\n",
5157 __func__, (void *)ndlp, ndlp->nlp_usg_map,
5158 kref_read(&ndlp->kref));
5159 lpfc_disable_node(vport, ndlp);
5160 }
5161
5162
5163
5164
5165
5166 if ((mb = phba->sli.mbox_active)) {
5167 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5168 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5169 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5170 mb->ctx_ndlp = NULL;
5171 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5172 }
5173 }
5174
5175 spin_lock_irq(&phba->hbalock);
5176
5177 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
5178 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
5179 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
5180 (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
5181 continue;
5182
5183 mb->ctx_ndlp = NULL;
5184 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5185 }
5186
5187 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
5188 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5189 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5190 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5191 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
5192 if (mp) {
5193 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
5194 kfree(mp);
5195 }
5196 list_del(&mb->list);
5197 mempool_free(mb, phba->mbox_mem_pool);
5198
5199
5200
5201
5202 }
5203 }
5204 spin_unlock_irq(&phba->hbalock);
5205
5206 lpfc_els_abort(phba, ndlp);
5207
5208 spin_lock_irq(shost->host_lock);
5209 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
5210 spin_unlock_irq(shost->host_lock);
5211
5212 ndlp->nlp_last_elscmd = 0;
5213 del_timer_sync(&ndlp->nlp_delayfunc);
5214
5215 list_del_init(&ndlp->els_retry_evt.evt_listp);
5216 list_del_init(&ndlp->dev_loss_evt.evt_listp);
5217 lpfc_cleanup_vports_rrqs(vport, ndlp);
5218 if (phba->sli_rev == LPFC_SLI_REV4)
5219 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5220 if (!lpfc_unreg_rpi(vport, ndlp)) {
5221
5222 if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
5223 !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
5224 lpfc_sli4_free_rpi(vport->phba,
5225 ndlp->nlp_rpi);
5226 spin_lock_irqsave(&vport->phba->ndlp_lock,
5227 iflags);
5228 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5229 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5230 spin_unlock_irqrestore(&vport->phba->ndlp_lock,
5231 iflags);
5232 }
5233 }
5234 return 0;
5235}
5236
5237
5238
5239
5240
5241
5242static void
5243lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5244{
5245 struct lpfc_hba *phba = vport->phba;
5246 struct lpfc_rport_data *rdata;
5247 struct fc_rport *rport;
5248 LPFC_MBOXQ_t *mbox;
5249 int rc;
5250
5251 lpfc_cancel_retry_delay_tmo(vport, ndlp);
5252 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
5253 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
5254 !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
5255 phba->sli_rev != LPFC_SLI_REV4) {
5256
5257
5258
5259 lpfc_printf_vlog(vport, KERN_INFO,
5260 LOG_NODE | LOG_DISCOVERY,
5261 "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
5262 "ref %d map:x%x ndlp x%px\n",
5263 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5264 kref_read(&ndlp->kref),
5265 ndlp->nlp_usg_map, ndlp);
5266 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
5267 != NULL) {
5268 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
5269 (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
5270 if (rc) {
5271 mempool_free(mbox, phba->mbox_mem_pool);
5272 }
5273 else {
5274 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
5275 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
5276 mbox->vport = vport;
5277 mbox->ctx_ndlp = ndlp;
5278 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5279 if (rc == MBX_NOT_FINISHED) {
5280 mempool_free(mbox, phba->mbox_mem_pool);
5281 }
5282 }
5283 }
5284 }
5285 lpfc_cleanup_node(vport, ndlp);
5286
5287
5288
5289
5290
5291
5292 if (ndlp->rport) {
5293
5294
5295
5296
5297 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
5298 "0940 removed node x%px DID x%x "
5299 "rpi %d rport not null x%px\n",
5300 ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
5301 ndlp->rport);
5302 rport = ndlp->rport;
5303 rdata = rport->dd_data;
5304 rdata->pnode = NULL;
5305 ndlp->rport = NULL;
5306 put_device(&rport->dev);
5307 }
5308}
5309
5310static int
5311lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5312 uint32_t did)
5313{
5314 D_ID mydid, ndlpdid, matchdid;
5315
5316 if (did == Bcast_DID)
5317 return 0;
5318
5319
5320 if (ndlp->nlp_DID == did)
5321 return 1;
5322
5323
5324 mydid.un.word = vport->fc_myDID;
5325 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
5326 return 0;
5327 }
5328
5329 matchdid.un.word = did;
5330 ndlpdid.un.word = ndlp->nlp_DID;
5331 if (matchdid.un.b.id == ndlpdid.un.b.id) {
5332 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
5333 (mydid.un.b.area == matchdid.un.b.area)) {
5334
5335
5336
5337
5338
5339
5340
5341
5342 if ((ndlpdid.un.b.domain == 0) &&
5343 (ndlpdid.un.b.area == 0)) {
5344 if (ndlpdid.un.b.id &&
5345 vport->phba->fc_topology ==
5346 LPFC_TOPOLOGY_LOOP)
5347 return 1;
5348 }
5349 return 0;
5350 }
5351
5352 matchdid.un.word = ndlp->nlp_DID;
5353 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
5354 (mydid.un.b.area == ndlpdid.un.b.area)) {
5355 if ((matchdid.un.b.domain == 0) &&
5356 (matchdid.un.b.area == 0)) {
5357 if (matchdid.un.b.id)
5358 return 1;
5359 }
5360 }
5361 }
5362 return 0;
5363}
5364
5365
5366static struct lpfc_nodelist *
5367__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5368{
5369 struct lpfc_nodelist *ndlp;
5370 uint32_t data1;
5371
5372 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5373 if (lpfc_matchdid(vport, ndlp, did)) {
5374 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5375 ((uint32_t)ndlp->nlp_xri << 16) |
5376 ((uint32_t)ndlp->nlp_type << 8) |
5377 ((uint32_t)ndlp->nlp_usg_map & 0xff));
5378 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5379 "0929 FIND node DID "
5380 "Data: x%px x%x x%x x%x x%x x%px\n",
5381 ndlp, ndlp->nlp_DID,
5382 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
5383 ndlp->active_rrqs_xri_bitmap);
5384 return ndlp;
5385 }
5386 }
5387
5388
5389 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5390 "0932 FIND node did x%x NOT FOUND.\n", did);
5391 return NULL;
5392}
5393
5394struct lpfc_nodelist *
5395lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5396{
5397 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5398 struct lpfc_nodelist *ndlp;
5399 unsigned long iflags;
5400
5401 spin_lock_irqsave(shost->host_lock, iflags);
5402 ndlp = __lpfc_findnode_did(vport, did);
5403 spin_unlock_irqrestore(shost->host_lock, iflags);
5404 return ndlp;
5405}
5406
5407struct lpfc_nodelist *
5408lpfc_findnode_mapped(struct lpfc_vport *vport)
5409{
5410 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5411 struct lpfc_nodelist *ndlp;
5412 uint32_t data1;
5413 unsigned long iflags;
5414
5415 spin_lock_irqsave(shost->host_lock, iflags);
5416
5417 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5418 if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
5419 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
5420 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5421 ((uint32_t)ndlp->nlp_xri << 16) |
5422 ((uint32_t)ndlp->nlp_type << 8) |
5423 ((uint32_t)ndlp->nlp_rpi & 0xff));
5424 spin_unlock_irqrestore(shost->host_lock, iflags);
5425 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5426 "2025 FIND node DID "
5427 "Data: x%px x%x x%x x%x x%px\n",
5428 ndlp, ndlp->nlp_DID,
5429 ndlp->nlp_flag, data1,
5430 ndlp->active_rrqs_xri_bitmap);
5431 return ndlp;
5432 }
5433 }
5434 spin_unlock_irqrestore(shost->host_lock, iflags);
5435
5436
5437 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5438 "2026 FIND mapped did NOT FOUND.\n");
5439 return NULL;
5440}
5441
5442struct lpfc_nodelist *
5443lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
5444{
5445 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5446 struct lpfc_nodelist *ndlp;
5447
5448 ndlp = lpfc_findnode_did(vport, did);
5449 if (!ndlp) {
5450 if (vport->phba->nvmet_support)
5451 return NULL;
5452 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
5453 lpfc_rscn_payload_check(vport, did) == 0)
5454 return NULL;
5455 ndlp = lpfc_nlp_init(vport, did);
5456 if (!ndlp)
5457 return NULL;
5458 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5459
5460 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5461 "6453 Setup New Node 2B_DISC x%x "
5462 "Data:x%x x%x x%x\n",
5463 ndlp->nlp_DID, ndlp->nlp_flag,
5464 ndlp->nlp_state, vport->fc_flag);
5465
5466 spin_lock_irq(shost->host_lock);
5467 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5468 spin_unlock_irq(shost->host_lock);
5469 return ndlp;
5470 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
5471 if (vport->phba->nvmet_support)
5472 return NULL;
5473 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
5474 if (!ndlp) {
5475 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
5476 "0014 Could not enable ndlp\n");
5477 return NULL;
5478 }
5479 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5480 "6454 Setup Enabled Node 2B_DISC x%x "
5481 "Data:x%x x%x x%x\n",
5482 ndlp->nlp_DID, ndlp->nlp_flag,
5483 ndlp->nlp_state, vport->fc_flag);
5484
5485 spin_lock_irq(shost->host_lock);
5486 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5487 spin_unlock_irq(shost->host_lock);
5488 return ndlp;
5489 }
5490
5491
5492
5493
5494
5495 if ((vport->fc_flag & FC_RSCN_MODE) &&
5496 !(vport->fc_flag & FC_NDISC_ACTIVE)) {
5497 if (lpfc_rscn_payload_check(vport, did)) {
5498
5499
5500
5501
5502 lpfc_cancel_retry_delay_tmo(vport, ndlp);
5503
5504 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5505 "6455 Setup RSCN Node 2B_DISC x%x "
5506 "Data:x%x x%x x%x\n",
5507 ndlp->nlp_DID, ndlp->nlp_flag,
5508 ndlp->nlp_state, vport->fc_flag);
5509
5510
5511
5512
5513
5514
5515 if (vport->phba->nvmet_support)
5516 return ndlp;
5517
5518
5519
5520
5521 if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
5522 !(ndlp->nlp_type &
5523 (NLP_FCP_TARGET | NLP_NVME_TARGET)))
5524 return NULL;
5525
5526 ndlp->nlp_prev_state = ndlp->nlp_state;
5527 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5528
5529 spin_lock_irq(shost->host_lock);
5530 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5531 spin_unlock_irq(shost->host_lock);
5532 } else {
5533 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5534 "6456 Skip Setup RSCN Node x%x "
5535 "Data:x%x x%x x%x\n",
5536 ndlp->nlp_DID, ndlp->nlp_flag,
5537 ndlp->nlp_state, vport->fc_flag);
5538 ndlp = NULL;
5539 }
5540 } else {
5541 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5542 "6457 Setup Active Node 2B_DISC x%x "
5543 "Data:x%x x%x x%x\n",
5544 ndlp->nlp_DID, ndlp->nlp_flag,
5545 ndlp->nlp_state, vport->fc_flag);
5546
5547
5548
5549
5550
5551 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
5552 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5553 (!vport->phba->nvmet_support &&
5554 ndlp->nlp_flag & NLP_RCV_PLOGI))
5555 return NULL;
5556
5557 if (vport->phba->nvmet_support)
5558 return ndlp;
5559
5560
5561
5562
5563 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5564
5565 spin_lock_irq(shost->host_lock);
5566 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5567 spin_unlock_irq(shost->host_lock);
5568 }
5569 return ndlp;
5570}
5571
5572
5573void
5574lpfc_disc_list_loopmap(struct lpfc_vport *vport)
5575{
5576 struct lpfc_hba *phba = vport->phba;
5577 int j;
5578 uint32_t alpa, index;
5579
5580 if (!lpfc_is_link_up(phba))
5581 return;
5582
5583 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5584 return;
5585
5586
5587 if (phba->alpa_map[0]) {
5588 for (j = 1; j <= phba->alpa_map[0]; j++) {
5589 alpa = phba->alpa_map[j];
5590 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
5591 continue;
5592 lpfc_setup_disc_node(vport, alpa);
5593 }
5594 } else {
5595
5596 for (j = 0; j < FC_MAXLOOP; j++) {
5597
5598
5599
5600 if (vport->cfg_scan_down)
5601 index = j;
5602 else
5603 index = FC_MAXLOOP - j - 1;
5604 alpa = lpfcAlpaArray[index];
5605 if ((vport->fc_myDID & 0xff) == alpa)
5606 continue;
5607 lpfc_setup_disc_node(vport, alpa);
5608 }
5609 }
5610 return;
5611}
5612
5613
5614void
5615lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
5616{
5617 LPFC_MBOXQ_t *mbox;
5618 struct lpfc_sli *psli = &phba->sli;
5619 struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
5620 struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING];
5621 int rc;
5622
5623
5624
5625
5626
5627 if ((phba->link_state >= LPFC_CLEAR_LA) ||
5628 (vport->port_type != LPFC_PHYSICAL_PORT) ||
5629 (phba->sli_rev == LPFC_SLI_REV4))
5630 return;
5631
5632
5633 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
5634 phba->link_state = LPFC_CLEAR_LA;
5635 lpfc_clear_la(phba, mbox);
5636 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
5637 mbox->vport = vport;
5638 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5639 if (rc == MBX_NOT_FINISHED) {
5640 mempool_free(mbox, phba->mbox_mem_pool);
5641 lpfc_disc_flush_list(vport);
5642 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5643 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5644 phba->link_state = LPFC_HBA_ERROR;
5645 }
5646 }
5647}
5648
5649
5650void
5651lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5652{
5653 LPFC_MBOXQ_t *regvpimbox;
5654
5655 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5656 if (regvpimbox) {
5657 lpfc_reg_vpi(vport, regvpimbox);
5658 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
5659 regvpimbox->vport = vport;
5660 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
5661 == MBX_NOT_FINISHED) {
5662 mempool_free(regvpimbox, phba->mbox_mem_pool);
5663 }
5664 }
5665}
5666
5667
5668void
5669lpfc_disc_start(struct lpfc_vport *vport)
5670{
5671 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5672 struct lpfc_hba *phba = vport->phba;
5673 uint32_t num_sent;
5674 uint32_t clear_la_pending;
5675
5676 if (!lpfc_is_link_up(phba)) {
5677 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5678 "3315 Link is not up %x\n",
5679 phba->link_state);
5680 return;
5681 }
5682
5683 if (phba->link_state == LPFC_CLEAR_LA)
5684 clear_la_pending = 1;
5685 else
5686 clear_la_pending = 0;
5687
5688 if (vport->port_state < LPFC_VPORT_READY)
5689 vport->port_state = LPFC_DISC_AUTH;
5690
5691 lpfc_set_disctmo(vport);
5692
5693 vport->fc_prevDID = vport->fc_myDID;
5694 vport->num_disc_nodes = 0;
5695
5696
5697 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5698 "0202 Start Discovery port state x%x "
5699 "flg x%x Data: x%x x%x x%x\n",
5700 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
5701 vport->fc_adisc_cnt, vport->fc_npr_cnt);
5702
5703
5704 num_sent = lpfc_els_disc_adisc(vport);
5705
5706 if (num_sent)
5707 return;
5708
5709
5710 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5711 !(vport->fc_flag & FC_PT2PT) &&
5712 !(vport->fc_flag & FC_RSCN_MODE) &&
5713 (phba->sli_rev < LPFC_SLI_REV4)) {
5714 lpfc_issue_clear_la(phba, vport);
5715 lpfc_issue_reg_vpi(phba, vport);
5716 return;
5717 }
5718
5719
5720
5721
5722
5723 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
5724
5725 lpfc_issue_clear_la(phba, vport);
5726
5727 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
5728 vport->num_disc_nodes = 0;
5729
5730 if (vport->fc_npr_cnt)
5731 lpfc_els_disc_plogi(vport);
5732
5733 if (!vport->num_disc_nodes) {
5734 spin_lock_irq(shost->host_lock);
5735 vport->fc_flag &= ~FC_NDISC_ACTIVE;
5736 spin_unlock_irq(shost->host_lock);
5737 lpfc_can_disctmo(vport);
5738 }
5739 }
5740 vport->port_state = LPFC_VPORT_READY;
5741 } else {
5742
5743 num_sent = lpfc_els_disc_plogi(vport);
5744
5745 if (num_sent)
5746 return;
5747
5748 if (vport->fc_flag & FC_RSCN_MODE) {
5749
5750
5751
5752 if ((vport->fc_rscn_id_cnt == 0) &&
5753 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
5754 spin_lock_irq(shost->host_lock);
5755 vport->fc_flag &= ~FC_RSCN_MODE;
5756 spin_unlock_irq(shost->host_lock);
5757 lpfc_can_disctmo(vport);
5758 } else
5759 lpfc_els_handle_rscn(vport);
5760 }
5761 }
5762 return;
5763}
5764
5765
5766
5767
5768
5769static void
5770lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5771{
5772 LIST_HEAD(completions);
5773 IOCB_t *icmd;
5774 struct lpfc_iocbq *iocb, *next_iocb;
5775 struct lpfc_sli_ring *pring;
5776
5777 pring = lpfc_phba_elsring(phba);
5778 if (unlikely(!pring))
5779 return;
5780
5781
5782
5783
5784 spin_lock_irq(&phba->hbalock);
5785 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5786 if (iocb->context1 != ndlp) {
5787 continue;
5788 }
5789 icmd = &iocb->iocb;
5790 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
5791 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5792
5793 list_move_tail(&iocb->list, &completions);
5794 }
5795 }
5796
5797
5798 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
5799 if (iocb->context1 != ndlp) {
5800 continue;
5801 }
5802 icmd = &iocb->iocb;
5803 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
5804 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
5805 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
5806 }
5807 }
5808 spin_unlock_irq(&phba->hbalock);
5809
5810
5811 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5812 IOERR_SLI_ABORTED);
5813}
5814
5815static void
5816lpfc_disc_flush_list(struct lpfc_vport *vport)
5817{
5818 struct lpfc_nodelist *ndlp, *next_ndlp;
5819 struct lpfc_hba *phba = vport->phba;
5820
5821 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
5822 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5823 nlp_listp) {
5824 if (!NLP_CHK_NODE_ACT(ndlp))
5825 continue;
5826 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5827 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
5828 lpfc_free_tx(phba, ndlp);
5829 }
5830 }
5831 }
5832}
5833
5834void
5835lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
5836{
5837 lpfc_els_flush_rscn(vport);
5838 lpfc_els_flush_cmd(vport);
5839 lpfc_disc_flush_list(vport);
5840}
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857void
5858lpfc_disc_timeout(struct timer_list *t)
5859{
5860 struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
5861 struct lpfc_hba *phba = vport->phba;
5862 uint32_t tmo_posted;
5863 unsigned long flags = 0;
5864
5865 if (unlikely(!phba))
5866 return;
5867
5868 spin_lock_irqsave(&vport->work_port_lock, flags);
5869 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
5870 if (!tmo_posted)
5871 vport->work_port_events |= WORKER_DISC_TMO;
5872 spin_unlock_irqrestore(&vport->work_port_lock, flags);
5873
5874 if (!tmo_posted)
5875 lpfc_worker_wake_up(phba);
5876 return;
5877}
5878
5879static void
5880lpfc_disc_timeout_handler(struct lpfc_vport *vport)
5881{
5882 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5883 struct lpfc_hba *phba = vport->phba;
5884 struct lpfc_sli *psli = &phba->sli;
5885 struct lpfc_nodelist *ndlp, *next_ndlp;
5886 LPFC_MBOXQ_t *initlinkmbox;
5887 int rc, clrlaerr = 0;
5888
5889 if (!(vport->fc_flag & FC_DISC_TMO))
5890 return;
5891
5892 spin_lock_irq(shost->host_lock);
5893 vport->fc_flag &= ~FC_DISC_TMO;
5894 spin_unlock_irq(shost->host_lock);
5895
5896 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5897 "disc timeout: state:x%x rtry:x%x flg:x%x",
5898 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5899
5900 switch (vport->port_state) {
5901
5902 case LPFC_LOCAL_CFG_LINK:
5903
5904
5905
5906
5907 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
5908 "0221 FAN timeout\n");
5909
5910
5911 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5912 nlp_listp) {
5913 if (!NLP_CHK_NODE_ACT(ndlp))
5914 continue;
5915 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
5916 continue;
5917 if (ndlp->nlp_type & NLP_FABRIC) {
5918
5919 lpfc_drop_node(vport, ndlp);
5920
5921 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
5922
5923
5924
5925 lpfc_unreg_rpi(vport, ndlp);
5926 }
5927 }
5928 if (vport->port_state != LPFC_FLOGI) {
5929 if (phba->sli_rev <= LPFC_SLI_REV3)
5930 lpfc_initial_flogi(vport);
5931 else
5932 lpfc_issue_init_vfi(vport);
5933 return;
5934 }
5935 break;
5936
5937 case LPFC_FDISC:
5938 case LPFC_FLOGI:
5939
5940
5941 lpfc_printf_vlog(vport, KERN_ERR,
5942 LOG_TRACE_EVENT,
5943 "0222 Initial %s timeout\n",
5944 vport->vpi ? "FDISC" : "FLOGI");
5945
5946
5947
5948
5949
5950
5951 lpfc_disc_list_loopmap(vport);
5952
5953
5954 lpfc_disc_start(vport);
5955 break;
5956
5957 case LPFC_FABRIC_CFG_LINK:
5958
5959
5960 lpfc_printf_vlog(vport, KERN_ERR,
5961 LOG_TRACE_EVENT,
5962 "0223 Timeout while waiting for "
5963 "NameServer login\n");
5964
5965 ndlp = lpfc_findnode_did(vport, NameServer_DID);
5966 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5967 lpfc_els_abort(phba, ndlp);
5968
5969
5970 goto restart_disc;
5971
5972 case LPFC_NS_QRY:
5973
5974 lpfc_printf_vlog(vport, KERN_ERR,
5975 LOG_TRACE_EVENT,
5976 "0224 NameServer Query timeout "
5977 "Data: x%x x%x\n",
5978 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5979
5980 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
5981
5982 vport->fc_ns_retry++;
5983 vport->gidft_inp = 0;
5984 rc = lpfc_issue_gidft(vport);
5985 if (rc == 0)
5986 break;
5987 }
5988 vport->fc_ns_retry = 0;
5989
5990restart_disc:
5991
5992
5993
5994
5995
5996 if (phba->sli_rev < LPFC_SLI_REV4) {
5997 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5998 lpfc_issue_reg_vpi(phba, vport);
5999 else {
6000 lpfc_issue_clear_la(phba, vport);
6001 vport->port_state = LPFC_VPORT_READY;
6002 }
6003 }
6004
6005
6006 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6007 if (!initlinkmbox) {
6008 lpfc_printf_vlog(vport, KERN_ERR,
6009 LOG_TRACE_EVENT,
6010 "0206 Device Discovery "
6011 "completion error\n");
6012 phba->link_state = LPFC_HBA_ERROR;
6013 break;
6014 }
6015
6016 lpfc_linkdown(phba);
6017 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
6018 phba->cfg_link_speed);
6019 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
6020 initlinkmbox->vport = vport;
6021 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6022 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
6023 lpfc_set_loopback_flag(phba);
6024 if (rc == MBX_NOT_FINISHED)
6025 mempool_free(initlinkmbox, phba->mbox_mem_pool);
6026
6027 break;
6028
6029 case LPFC_DISC_AUTH:
6030
6031 lpfc_printf_vlog(vport, KERN_ERR,
6032 LOG_TRACE_EVENT,
6033 "0227 Node Authentication timeout\n");
6034 lpfc_disc_flush_list(vport);
6035
6036
6037
6038
6039
6040 if (phba->sli_rev < LPFC_SLI_REV4) {
6041 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
6042 lpfc_issue_reg_vpi(phba, vport);
6043 else {
6044 lpfc_issue_clear_la(phba, vport);
6045 vport->port_state = LPFC_VPORT_READY;
6046 }
6047 }
6048 break;
6049
6050 case LPFC_VPORT_READY:
6051 if (vport->fc_flag & FC_RSCN_MODE) {
6052 lpfc_printf_vlog(vport, KERN_ERR,
6053 LOG_TRACE_EVENT,
6054 "0231 RSCN timeout Data: x%x "
6055 "x%x\n",
6056 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
6057
6058
6059 lpfc_els_flush_cmd(vport);
6060
6061 lpfc_els_flush_rscn(vport);
6062 lpfc_disc_flush_list(vport);
6063 }
6064 break;
6065
6066 default:
6067 lpfc_printf_vlog(vport, KERN_ERR,
6068 LOG_TRACE_EVENT,
6069 "0273 Unexpected discovery timeout, "
6070 "vport State x%x\n", vport->port_state);
6071 break;
6072 }
6073
6074 switch (phba->link_state) {
6075 case LPFC_CLEAR_LA:
6076
6077 lpfc_printf_vlog(vport, KERN_ERR,
6078 LOG_TRACE_EVENT,
6079 "0228 CLEAR LA timeout\n");
6080 clrlaerr = 1;
6081 break;
6082
6083 case LPFC_LINK_UP:
6084 lpfc_issue_clear_la(phba, vport);
6085 fallthrough;
6086 case LPFC_LINK_UNKNOWN:
6087 case LPFC_WARM_START:
6088 case LPFC_INIT_START:
6089 case LPFC_INIT_MBX_CMDS:
6090 case LPFC_LINK_DOWN:
6091 case LPFC_HBA_ERROR:
6092 lpfc_printf_vlog(vport, KERN_ERR,
6093 LOG_TRACE_EVENT,
6094 "0230 Unexpected timeout, hba link "
6095 "state x%x\n", phba->link_state);
6096 clrlaerr = 1;
6097 break;
6098
6099 case LPFC_HBA_READY:
6100 break;
6101 }
6102
6103 if (clrlaerr) {
6104 lpfc_disc_flush_list(vport);
6105 if (phba->sli_rev != LPFC_SLI_REV4) {
6106 psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
6107 ~LPFC_STOP_IOCB_EVENT;
6108 psli->sli3_ring[LPFC_FCP_RING].flag &=
6109 ~LPFC_STOP_IOCB_EVENT;
6110 }
6111 vport->port_state = LPFC_VPORT_READY;
6112 }
6113 return;
6114}
6115
6116
6117
6118
6119
6120
6121
6122void
6123lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6124{
6125 MAILBOX_t *mb = &pmb->u.mb;
6126 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
6127 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
6128 struct lpfc_vport *vport = pmb->vport;
6129
6130 pmb->ctx_buf = NULL;
6131 pmb->ctx_ndlp = NULL;
6132
6133 if (phba->sli_rev < LPFC_SLI_REV4)
6134 ndlp->nlp_rpi = mb->un.varWords[0];
6135 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
6136 ndlp->nlp_type |= NLP_FABRIC;
6137 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
6138 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6139 "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
6140 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
6141 kref_read(&ndlp->kref),
6142 ndlp->nlp_usg_map, ndlp);
6143
6144
6145
6146
6147
6148
6149 if (vport->port_type == LPFC_PHYSICAL_PORT)
6150 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
6151 else
6152 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
6153
6154
6155
6156
6157
6158 lpfc_nlp_put(ndlp);
6159 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6160 kfree(mp);
6161 mempool_free(pmb, phba->mbox_mem_pool);
6162
6163 return;
6164}
6165
6166static int
6167lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
6168{
6169 uint16_t *rpi = param;
6170
6171
6172 if (!NLP_CHK_NODE_ACT(ndlp))
6173 return 0;
6174
6175 return ndlp->nlp_rpi == *rpi;
6176}
6177
6178static int
6179lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
6180{
6181 return memcmp(&ndlp->nlp_portname, param,
6182 sizeof(ndlp->nlp_portname)) == 0;
6183}
6184
6185static struct lpfc_nodelist *
6186__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
6187{
6188 struct lpfc_nodelist *ndlp;
6189
6190 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6191 if (filter(ndlp, param)) {
6192 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6193 "3185 FIND node filter %ps DID "
6194 "ndlp x%px did x%x flg x%x st x%x "
6195 "xri x%x type x%x rpi x%x\n",
6196 filter, ndlp, ndlp->nlp_DID,
6197 ndlp->nlp_flag, ndlp->nlp_state,
6198 ndlp->nlp_xri, ndlp->nlp_type,
6199 ndlp->nlp_rpi);
6200 return ndlp;
6201 }
6202 }
6203 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6204 "3186 FIND node filter %ps NOT FOUND.\n", filter);
6205 return NULL;
6206}
6207
6208
6209
6210
6211
6212struct lpfc_nodelist *
6213__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6214{
6215 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
6216}
6217
6218
6219
6220
6221
6222struct lpfc_nodelist *
6223lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
6224{
6225 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6226 struct lpfc_nodelist *ndlp;
6227
6228 spin_lock_irq(shost->host_lock);
6229 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
6230 spin_unlock_irq(shost->host_lock);
6231 return ndlp;
6232}
6233
6234
6235
6236
6237
6238
6239struct lpfc_nodelist *
6240lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6241{
6242 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6243 struct lpfc_nodelist *ndlp;
6244 unsigned long flags;
6245
6246 spin_lock_irqsave(shost->host_lock, flags);
6247 ndlp = __lpfc_findnode_rpi(vport, rpi);
6248 spin_unlock_irqrestore(shost->host_lock, flags);
6249 return ndlp;
6250}
6251
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265struct lpfc_vport *
6266lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6267{
6268 struct lpfc_vport *vport;
6269 unsigned long flags;
6270 int i = 0;
6271
6272
6273 if (vpi > 0) {
6274
6275
6276
6277
6278 for (i = 0; i < phba->max_vpi; i++) {
6279 if (vpi == phba->vpi_ids[i])
6280 break;
6281 }
6282
6283 if (i >= phba->max_vpi) {
6284 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6285 "2936 Could not find Vport mapped "
6286 "to vpi %d\n", vpi);
6287 return NULL;
6288 }
6289 }
6290
6291 spin_lock_irqsave(&phba->port_list_lock, flags);
6292 list_for_each_entry(vport, &phba->port_list, listentry) {
6293 if (vport->vpi == i) {
6294 spin_unlock_irqrestore(&phba->port_list_lock, flags);
6295 return vport;
6296 }
6297 }
6298 spin_unlock_irqrestore(&phba->port_list_lock, flags);
6299 return NULL;
6300}
6301
6302struct lpfc_nodelist *
6303lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
6304{
6305 struct lpfc_nodelist *ndlp;
6306 int rpi = LPFC_RPI_ALLOC_ERROR;
6307
6308 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6309 rpi = lpfc_sli4_alloc_rpi(vport->phba);
6310 if (rpi == LPFC_RPI_ALLOC_ERROR)
6311 return NULL;
6312 }
6313
6314 ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
6315 if (!ndlp) {
6316 if (vport->phba->sli_rev == LPFC_SLI_REV4)
6317 lpfc_sli4_free_rpi(vport->phba, rpi);
6318 return NULL;
6319 }
6320
6321 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
6322
6323 lpfc_initialize_node(vport, ndlp, did);
6324 INIT_LIST_HEAD(&ndlp->nlp_listp);
6325 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6326 ndlp->nlp_rpi = rpi;
6327 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6328 "0007 Init New ndlp x%px, rpi:x%x DID:%x "
6329 "flg:x%x refcnt:%d map:x%x\n",
6330 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
6331 ndlp->nlp_flag, kref_read(&ndlp->kref),
6332 ndlp->nlp_usg_map);
6333
6334 ndlp->active_rrqs_xri_bitmap =
6335 mempool_alloc(vport->phba->active_rrq_pool,
6336 GFP_KERNEL);
6337 if (ndlp->active_rrqs_xri_bitmap)
6338 memset(ndlp->active_rrqs_xri_bitmap, 0,
6339 ndlp->phba->cfg_rrq_xri_bitmap_sz);
6340 }
6341
6342
6343
6344 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
6345 "node init: did:x%x",
6346 ndlp->nlp_DID, 0, 0);
6347
6348 return ndlp;
6349}
6350
6351
6352
6353
6354static void
6355lpfc_nlp_release(struct kref *kref)
6356{
6357 struct lpfc_hba *phba;
6358 unsigned long flags;
6359 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6360 kref);
6361
6362 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6363 "node release: did:x%x flg:x%x type:x%x",
6364 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6365
6366 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
6367 "0279 %s: ndlp:x%px did %x "
6368 "usgmap:x%x refcnt:%d rpi:%x\n",
6369 __func__,
6370 (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
6371 kref_read(&ndlp->kref), ndlp->nlp_rpi);
6372
6373
6374 lpfc_nlp_remove(ndlp->vport, ndlp);
6375
6376
6377 phba = ndlp->phba;
6378 spin_lock_irqsave(&phba->ndlp_lock, flags);
6379 NLP_CLR_NODE_ACT(ndlp);
6380 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6381
6382
6383 if (NLP_CHK_FREE_REQ(ndlp)) {
6384 kfree(ndlp->lat_data);
6385 if (phba->sli_rev == LPFC_SLI_REV4)
6386 mempool_free(ndlp->active_rrqs_xri_bitmap,
6387 ndlp->phba->active_rrq_pool);
6388 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
6389 }
6390}
6391
6392
6393
6394
6395
6396struct lpfc_nodelist *
6397lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6398{
6399 struct lpfc_hba *phba;
6400 unsigned long flags;
6401
6402 if (ndlp) {
6403 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6404 "node get: did:x%x flg:x%x refcnt:x%x",
6405 ndlp->nlp_DID, ndlp->nlp_flag,
6406 kref_read(&ndlp->kref));
6407
6408
6409
6410
6411 phba = ndlp->phba;
6412 spin_lock_irqsave(&phba->ndlp_lock, flags);
6413 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
6414 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6415 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6416 "0276 %s: ndlp:x%px "
6417 "usgmap:x%x refcnt:%d\n",
6418 __func__, (void *)ndlp, ndlp->nlp_usg_map,
6419 kref_read(&ndlp->kref));
6420 return NULL;
6421 } else
6422 kref_get(&ndlp->kref);
6423 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6424 }
6425 return ndlp;
6426}
6427
6428
6429
6430
6431
6432
6433
6434int
6435lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6436{
6437 struct lpfc_hba *phba;
6438 unsigned long flags;
6439
6440 if (!ndlp)
6441 return 1;
6442
6443 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6444 "node put: did:x%x flg:x%x refcnt:x%x",
6445 ndlp->nlp_DID, ndlp->nlp_flag,
6446 kref_read(&ndlp->kref));
6447 phba = ndlp->phba;
6448 spin_lock_irqsave(&phba->ndlp_lock, flags);
6449
6450
6451
6452
6453 if (NLP_CHK_FREE_ACK(ndlp)) {
6454 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6455 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6456 "0274 %s: ndlp:x%px "
6457 "usgmap:x%x refcnt:%d\n",
6458 __func__, (void *)ndlp, ndlp->nlp_usg_map,
6459 kref_read(&ndlp->kref));
6460 return 1;
6461 }
6462
6463
6464
6465
6466 if (NLP_CHK_IACT_REQ(ndlp)) {
6467 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6468 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6469 "0275 %s: ndlp:x%px "
6470 "usgmap:x%x refcnt:%d\n",
6471 __func__, (void *)ndlp, ndlp->nlp_usg_map,
6472 kref_read(&ndlp->kref));
6473 return 1;
6474 }
6475
6476
6477
6478
6479
6480 if (kref_read(&ndlp->kref) == 1) {
6481
6482 NLP_SET_IACT_REQ(ndlp);
6483
6484 if (NLP_CHK_FREE_REQ(ndlp))
6485 NLP_SET_FREE_ACK(ndlp);
6486 }
6487 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
6488
6489
6490
6491
6492
6493
6494 return kref_put(&ndlp->kref, lpfc_nlp_release);
6495}
6496
6497
6498
6499
6500
6501
6502int
6503lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
6504{
6505 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6506 "node not used: did:x%x flg:x%x refcnt:x%x",
6507 ndlp->nlp_DID, ndlp->nlp_flag,
6508 kref_read(&ndlp->kref));
6509 if (kref_read(&ndlp->kref) == 1)
6510 if (lpfc_nlp_put(ndlp))
6511 return 1;
6512 return 0;
6513}
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525static int
6526lpfc_fcf_inuse(struct lpfc_hba *phba)
6527{
6528 struct lpfc_vport **vports;
6529 int i, ret = 0;
6530 struct lpfc_nodelist *ndlp;
6531 struct Scsi_Host *shost;
6532
6533 vports = lpfc_create_vport_work_array(phba);
6534
6535
6536 if (!vports)
6537 return 1;
6538
6539 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6540 shost = lpfc_shost_from_vport(vports[i]);
6541 spin_lock_irq(shost->host_lock);
6542
6543
6544
6545
6546
6547
6548 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
6549 spin_unlock_irq(shost->host_lock);
6550 ret = 1;
6551 goto out;
6552 }
6553 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6554 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
6555 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6556 ret = 1;
6557 spin_unlock_irq(shost->host_lock);
6558 goto out;
6559 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
6560 ret = 1;
6561 lpfc_printf_log(phba, KERN_INFO,
6562 LOG_NODE | LOG_DISCOVERY,
6563 "2624 RPI %x DID %x flag %x "
6564 "still logged in\n",
6565 ndlp->nlp_rpi, ndlp->nlp_DID,
6566 ndlp->nlp_flag);
6567 }
6568 }
6569 spin_unlock_irq(shost->host_lock);
6570 }
6571out:
6572 lpfc_destroy_vport_work_array(phba, vports);
6573 return ret;
6574}
6575
6576
6577
6578
6579
6580
6581
6582
6583void
6584lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6585{
6586 struct lpfc_vport *vport = mboxq->vport;
6587 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6588
6589 if (mboxq->u.mb.mbxStatus) {
6590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6591 "2555 UNREG_VFI mbxStatus error x%x "
6592 "HBA state x%x\n",
6593 mboxq->u.mb.mbxStatus, vport->port_state);
6594 }
6595 spin_lock_irq(shost->host_lock);
6596 phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
6597 spin_unlock_irq(shost->host_lock);
6598 mempool_free(mboxq, phba->mbox_mem_pool);
6599 return;
6600}
6601
6602
6603
6604
6605
6606
6607
6608
6609static void
6610lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6611{
6612 struct lpfc_vport *vport = mboxq->vport;
6613
6614 if (mboxq->u.mb.mbxStatus) {
6615 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6616 "2550 UNREG_FCFI mbxStatus error x%x "
6617 "HBA state x%x\n",
6618 mboxq->u.mb.mbxStatus, vport->port_state);
6619 }
6620 mempool_free(mboxq, phba->mbox_mem_pool);
6621 return;
6622}
6623
6624
6625
6626
6627
6628
6629
6630
6631
6632int
6633lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
6634{
6635 struct lpfc_vport **vports;
6636 struct lpfc_nodelist *ndlp;
6637 struct Scsi_Host *shost;
6638 int i = 0, rc;
6639
6640
6641 if (lpfc_fcf_inuse(phba))
6642 lpfc_unreg_hba_rpis(phba);
6643
6644
6645 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
6646
6647
6648 vports = lpfc_create_vport_work_array(phba);
6649 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
6650 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6651
6652 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6653 if (ndlp)
6654 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6655 lpfc_cleanup_pending_mbox(vports[i]);
6656 if (phba->sli_rev == LPFC_SLI_REV4)
6657 lpfc_sli4_unreg_all_rpis(vports[i]);
6658 lpfc_mbx_unreg_vpi(vports[i]);
6659 shost = lpfc_shost_from_vport(vports[i]);
6660 spin_lock_irq(shost->host_lock);
6661 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6662 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
6663 spin_unlock_irq(shost->host_lock);
6664 }
6665 lpfc_destroy_vport_work_array(phba, vports);
6666 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
6667 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6668 if (ndlp)
6669 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
6670 lpfc_cleanup_pending_mbox(phba->pport);
6671 if (phba->sli_rev == LPFC_SLI_REV4)
6672 lpfc_sli4_unreg_all_rpis(phba->pport);
6673 lpfc_mbx_unreg_vpi(phba->pport);
6674 shost = lpfc_shost_from_vport(phba->pport);
6675 spin_lock_irq(shost->host_lock);
6676 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6677 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
6678 spin_unlock_irq(shost->host_lock);
6679 }
6680
6681
6682 lpfc_els_flush_all_cmd(phba);
6683
6684
6685 rc = lpfc_issue_unreg_vfi(phba->pport);
6686 return rc;
6687}
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697
6698
6699int
6700lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
6701{
6702 LPFC_MBOXQ_t *mbox;
6703 int rc;
6704
6705 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6706 if (!mbox) {
6707 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6708 "2551 UNREG_FCFI mbox allocation failed"
6709 "HBA state x%x\n", phba->pport->port_state);
6710 return -ENOMEM;
6711 }
6712 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
6713 mbox->vport = phba->pport;
6714 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
6715 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6716
6717 if (rc == MBX_NOT_FINISHED) {
6718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6719 "2552 Unregister FCFI command failed rc x%x "
6720 "HBA state x%x\n",
6721 rc, phba->pport->port_state);
6722 return -EINVAL;
6723 }
6724 return 0;
6725}
6726
6727
6728
6729
6730
6731
6732
6733
6734void
6735lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
6736{
6737 int rc;
6738
6739
6740 rc = lpfc_unregister_fcf_prep(phba);
6741 if (rc) {
6742 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6743 "2748 Failed to prepare for unregistering "
6744 "HBA's FCF record: rc=%d\n", rc);
6745 return;
6746 }
6747
6748
6749 rc = lpfc_sli4_unregister_fcf(phba);
6750 if (rc)
6751 return;
6752
6753 phba->fcf.fcf_flag = 0;
6754 phba->fcf.current_rec.flag = 0;
6755
6756
6757
6758
6759
6760 if ((phba->pport->load_flag & FC_UNLOADING) ||
6761 (phba->link_state < LPFC_LINK_UP))
6762 return;
6763
6764
6765 spin_lock_irq(&phba->hbalock);
6766 phba->fcf.fcf_flag |= FCF_INIT_DISC;
6767 spin_unlock_irq(&phba->hbalock);
6768
6769
6770 lpfc_sli4_clear_fcf_rr_bmask(phba);
6771
6772 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6773
6774 if (rc) {
6775 spin_lock_irq(&phba->hbalock);
6776 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
6777 spin_unlock_irq(&phba->hbalock);
6778 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6779 "2553 lpfc_unregister_unused_fcf failed "
6780 "to read FCF record HBA state x%x\n",
6781 phba->pport->port_state);
6782 }
6783}
6784
6785
6786
6787
6788
6789
6790
6791
6792void
6793lpfc_unregister_fcf(struct lpfc_hba *phba)
6794{
6795 int rc;
6796
6797
6798 rc = lpfc_unregister_fcf_prep(phba);
6799 if (rc) {
6800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6801 "2749 Failed to prepare for unregistering "
6802 "HBA's FCF record: rc=%d\n", rc);
6803 return;
6804 }
6805
6806
6807 rc = lpfc_sli4_unregister_fcf(phba);
6808 if (rc)
6809 return;
6810
6811 spin_lock_irq(&phba->hbalock);
6812 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
6813 spin_unlock_irq(&phba->hbalock);
6814}
6815
6816
6817
6818
6819
6820
6821
6822
6823
6824void
6825lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
6826{
6827
6828
6829
6830
6831
6832 spin_lock_irq(&phba->hbalock);
6833 if (!(phba->hba_flag & HBA_FCOE_MODE) ||
6834 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
6835 !(phba->hba_flag & HBA_FIP_SUPPORT) ||
6836 (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
6837 (phba->pport->port_state == LPFC_FLOGI)) {
6838 spin_unlock_irq(&phba->hbalock);
6839 return;
6840 }
6841 spin_unlock_irq(&phba->hbalock);
6842
6843 if (lpfc_fcf_inuse(phba))
6844 return;
6845
6846 lpfc_unregister_fcf_rescan(phba);
6847}
6848
6849
6850
6851
6852
6853
6854
6855
6856
6857static void
6858lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
6859 uint8_t *buff)
6860{
6861 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6862 struct lpfc_fcf_conn_hdr *conn_hdr;
6863 struct lpfc_fcf_conn_rec *conn_rec;
6864 uint32_t record_count;
6865 int i;
6866
6867
6868 list_for_each_entry_safe(conn_entry, next_conn_entry,
6869 &phba->fcf_conn_rec_list, list) {
6870 list_del_init(&conn_entry->list);
6871 kfree(conn_entry);
6872 }
6873
6874 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
6875 record_count = conn_hdr->length * sizeof(uint32_t)/
6876 sizeof(struct lpfc_fcf_conn_rec);
6877
6878 conn_rec = (struct lpfc_fcf_conn_rec *)
6879 (buff + sizeof(struct lpfc_fcf_conn_hdr));
6880
6881 for (i = 0; i < record_count; i++) {
6882 if (!(conn_rec[i].flags & FCFCNCT_VALID))
6883 continue;
6884 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
6885 GFP_KERNEL);
6886 if (!conn_entry) {
6887 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6888 "2566 Failed to allocate connection"
6889 " table entry\n");
6890 return;
6891 }
6892
6893 memcpy(&conn_entry->conn_rec, &conn_rec[i],
6894 sizeof(struct lpfc_fcf_conn_rec));
6895 list_add_tail(&conn_entry->list,
6896 &phba->fcf_conn_rec_list);
6897 }
6898
6899 if (!list_empty(&phba->fcf_conn_rec_list)) {
6900 i = 0;
6901 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
6902 list) {
6903 conn_rec = &conn_entry->conn_rec;
6904 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6905 "3345 FCF connection list rec[%02d]: "
6906 "flags:x%04x, vtag:x%04x, "
6907 "fabric_name:x%02x:%02x:%02x:%02x:"
6908 "%02x:%02x:%02x:%02x, "
6909 "switch_name:x%02x:%02x:%02x:%02x:"
6910 "%02x:%02x:%02x:%02x\n", i++,
6911 conn_rec->flags, conn_rec->vlan_tag,
6912 conn_rec->fabric_name[0],
6913 conn_rec->fabric_name[1],
6914 conn_rec->fabric_name[2],
6915 conn_rec->fabric_name[3],
6916 conn_rec->fabric_name[4],
6917 conn_rec->fabric_name[5],
6918 conn_rec->fabric_name[6],
6919 conn_rec->fabric_name[7],
6920 conn_rec->switch_name[0],
6921 conn_rec->switch_name[1],
6922 conn_rec->switch_name[2],
6923 conn_rec->switch_name[3],
6924 conn_rec->switch_name[4],
6925 conn_rec->switch_name[5],
6926 conn_rec->switch_name[6],
6927 conn_rec->switch_name[7]);
6928 }
6929 }
6930}
6931
6932
6933
6934
6935
6936
6937
6938
6939
6940static void
6941lpfc_read_fcoe_param(struct lpfc_hba *phba,
6942 uint8_t *buff)
6943{
6944 struct lpfc_fip_param_hdr *fcoe_param_hdr;
6945 struct lpfc_fcoe_params *fcoe_param;
6946
6947 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
6948 buff;
6949 fcoe_param = (struct lpfc_fcoe_params *)
6950 (buff + sizeof(struct lpfc_fip_param_hdr));
6951
6952 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
6953 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
6954 return;
6955
6956 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
6957 phba->valid_vlan = 1;
6958 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
6959 0xFFF;
6960 }
6961
6962 phba->fc_map[0] = fcoe_param->fc_map[0];
6963 phba->fc_map[1] = fcoe_param->fc_map[1];
6964 phba->fc_map[2] = fcoe_param->fc_map[2];
6965 return;
6966}
6967
6968
6969
6970
6971
6972
6973
6974
6975
6976
6977
6978static uint8_t *
6979lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
6980{
6981 uint32_t offset = 0, rec_length;
6982
6983 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
6984 (size < sizeof(uint32_t)))
6985 return NULL;
6986
6987 rec_length = buff[offset + 1];
6988
6989
6990
6991
6992
6993 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
6994 <= size) {
6995 if (buff[offset] == rec_type)
6996 return &buff[offset];
6997
6998 if (buff[offset] == LPFC_REGION23_LAST_REC)
6999 return NULL;
7000
7001 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
7002 rec_length = buff[offset + 1];
7003 }
7004 return NULL;
7005}
7006
7007
7008
7009
7010
7011
7012
7013
7014
7015
7016void
7017lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
7018 uint8_t *buff,
7019 uint32_t size)
7020{
7021 uint32_t offset = 0;
7022 uint8_t *rec_ptr;
7023
7024
7025
7026
7027
7028 if (size < 2*sizeof(uint32_t))
7029 return;
7030
7031
7032 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
7033 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7034 "2567 Config region 23 has bad signature\n");
7035 return;
7036 }
7037
7038 offset += 4;
7039
7040
7041 if (buff[offset] != LPFC_REGION23_VERSION) {
7042 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7043 "2568 Config region 23 has bad version\n");
7044 return;
7045 }
7046 offset += 4;
7047
7048
7049 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7050 size - offset, FCOE_PARAM_TYPE);
7051 if (rec_ptr)
7052 lpfc_read_fcoe_param(phba, rec_ptr);
7053
7054
7055 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7056 size - offset, FCOE_CONN_TBL_TYPE);
7057 if (rec_ptr)
7058 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
7059
7060}
7061