1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27#include <linux/pci.h>
28#include <linux/kthread.h>
29#include <linux/interrupt.h>
30#include <linux/lockdep.h>
31#include <linux/utsname.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_hw4.h"
40#include "lpfc_hw.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc_sli.h"
44#include "lpfc_sli4.h"
45#include "lpfc.h"
46#include "lpfc_scsi.h"
47#include "lpfc_nvme.h"
48#include "lpfc_logmsg.h"
49#include "lpfc_crtn.h"
50#include "lpfc_vport.h"
51#include "lpfc_debugfs.h"
52
53
54static uint8_t lpfcAlpaArray[] = {
55 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
56 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
57 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
58 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
59 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
60 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
61 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
62 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
63 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
64 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
65 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
66 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
67 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
68};
69
70static void lpfc_disc_timeout_handler(struct lpfc_vport *);
71static void lpfc_disc_flush_list(struct lpfc_vport *vport);
72static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
73static int lpfc_fcf_inuse(struct lpfc_hba *);
74static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
75static void lpfc_check_inactive_vmid(struct lpfc_hba *phba);
76static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba);
77
78static int
79lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
80{
81 if (ndlp->nlp_fc4_type ||
82 ndlp->nlp_type & NLP_FABRIC)
83 return 1;
84 return 0;
85}
86
87
88
89
90
91
92static int
93lpfc_rport_invalid(struct fc_rport *rport)
94{
95 struct lpfc_rport_data *rdata;
96 struct lpfc_nodelist *ndlp;
97
98 if (!rport) {
99 pr_err("**** %s: NULL rport, exit.\n", __func__);
100 return -EINVAL;
101 }
102
103 rdata = rport->dd_data;
104 if (!rdata) {
105 pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n",
106 __func__, rport, rport->scsi_target_id);
107 return -EINVAL;
108 }
109
110 ndlp = rdata->pnode;
111 if (!rdata->pnode) {
112 pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n",
113 __func__, rport, rport->scsi_target_id);
114 return -EINVAL;
115 }
116
117 if (!ndlp->vport) {
118 pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px "
119 "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport,
120 rport->scsi_target_id);
121 return -EINVAL;
122 }
123 return 0;
124}
125
126void
127lpfc_terminate_rport_io(struct fc_rport *rport)
128{
129 struct lpfc_rport_data *rdata;
130 struct lpfc_nodelist *ndlp;
131 struct lpfc_vport *vport;
132
133 if (lpfc_rport_invalid(rport))
134 return;
135
136 rdata = rport->dd_data;
137 ndlp = rdata->pnode;
138 vport = ndlp->vport;
139 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
140 "rport terminate: sid:x%x did:x%x flg:x%x",
141 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
142
143 if (ndlp->nlp_sid != NLP_NO_SID)
144 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
145}
146
147
148
149
150void
151lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
152{
153 struct lpfc_nodelist *ndlp;
154 struct lpfc_vport *vport;
155 struct lpfc_hba *phba;
156 struct lpfc_work_evt *evtp;
157 unsigned long iflags;
158
159 ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
160 if (!ndlp)
161 return;
162
163 vport = ndlp->vport;
164 phba = vport->phba;
165
166 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
167 "rport devlosscb: sid:x%x did:x%x flg:x%x",
168 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
169
170 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
171 "3181 dev_loss_callbk x%06x, rport x%px flg x%x "
172 "load_flag x%x refcnt %d\n",
173 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
174 vport->load_flag, kref_read(&ndlp->kref));
175
176
177
178
179 if (vport->load_flag & FC_UNLOADING) {
180 ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
181 ndlp->rport = NULL;
182
183 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
184
185
186
187
188 lpfc_nlp_put(ndlp);
189 return;
190 }
191
192 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
193 return;
194
195 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
196 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
197 "6789 rport name %llx != node port name %llx",
198 rport->port_name,
199 wwn_to_u64(ndlp->nlp_portname.u.wwn));
200
201 evtp = &ndlp->dev_loss_evt;
202
203 if (!list_empty(&evtp->evt_listp)) {
204 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
205 "6790 rport name %llx dev_loss_evt pending\n",
206 rport->port_name);
207 return;
208 }
209
210 spin_lock_irqsave(&ndlp->lock, iflags);
211 ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
212
213
214
215
216 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE)
217 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
218
219
220
221
222
223 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
224 ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
225 ndlp->rport = NULL;
226 spin_unlock_irqrestore(&ndlp->lock, iflags);
227
228
229
230
231 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
232
233 spin_lock_irqsave(&phba->hbalock, iflags);
234 if (evtp->evt_arg1) {
235 evtp->evt = LPFC_EVT_DEV_LOSS;
236 list_add_tail(&evtp->evt_listp, &phba->work_list);
237 lpfc_worker_wake_up(phba);
238 }
239 spin_unlock_irqrestore(&phba->hbalock, iflags);
240
241 return;
242}
243
244
245
246
247
248
249
250
251static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport)
252{
253 u16 keep;
254 u32 difftime = 0, r, bucket;
255 u64 *lta;
256 int cpu;
257 struct lpfc_vmid *vmp;
258
259 write_lock(&vport->vmid_lock);
260
261 if (!vport->cur_vmid_cnt)
262 goto out;
263
264
265 hash_for_each(vport->hash_table, bucket, vmp, hnode) {
266 keep = 0;
267 if (vmp->flag & LPFC_VMID_REGISTERED) {
268
269
270 for_each_possible_cpu(cpu) {
271
272 lta = per_cpu_ptr(vmp->last_io_time, cpu);
273 if (!lta)
274 continue;
275 difftime = (jiffies) - (*lta);
276 if ((vport->vmid_inactivity_timeout *
277 JIFFIES_PER_HR) > difftime) {
278 keep = 1;
279 break;
280 }
281 }
282
283
284
285 if (!keep) {
286
287 vmp->flag = LPFC_VMID_DE_REGISTER;
288 write_unlock(&vport->vmid_lock);
289 if (vport->vmid_priority_tagging)
290 r = lpfc_vmid_uvem(vport, vmp, false);
291 else
292 r = lpfc_vmid_cmd(vport,
293 SLI_CTAS_DAPP_IDENT,
294 vmp);
295
296
297
298 write_lock(&vport->vmid_lock);
299 if (!r) {
300 struct lpfc_vmid *ht = vmp;
301
302 vport->cur_vmid_cnt--;
303 ht->flag = LPFC_VMID_SLOT_FREE;
304 free_percpu(ht->last_io_time);
305 ht->last_io_time = NULL;
306 hash_del(&ht->hnode);
307 }
308 }
309 }
310 }
311 out:
312 write_unlock(&vport->vmid_lock);
313}
314
315
316
317
318
319
320
321
322
323
324
325static void lpfc_check_inactive_vmid(struct lpfc_hba *phba)
326{
327 struct lpfc_vport *vport;
328 struct lpfc_vport **vports;
329 int i;
330
331 vports = lpfc_create_vport_work_array(phba);
332 if (!vports)
333 return;
334
335 for (i = 0; i <= phba->max_vports; i++) {
336 if ((!vports[i]) && (i == 0))
337 vport = phba->pport;
338 else
339 vport = vports[i];
340 if (!vport)
341 break;
342
343 lpfc_check_inactive_vmid_one(vport);
344 }
345 lpfc_destroy_vport_work_array(phba, vports);
346}
347
348
349
350
351
352
353
354
355
356
357void
358lpfc_check_nlp_post_devloss(struct lpfc_vport *vport,
359 struct lpfc_nodelist *ndlp)
360{
361 unsigned long iflags;
362
363 spin_lock_irqsave(&ndlp->lock, iflags);
364 if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) {
365 ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS;
366 spin_unlock_irqrestore(&ndlp->lock, iflags);
367 lpfc_nlp_get(ndlp);
368 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE,
369 "8438 Devloss timeout reversed on DID x%x "
370 "refcnt %d ndlp %p flag x%x "
371 "port_state = x%x\n",
372 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp,
373 ndlp->nlp_flag, vport->port_state);
374 spin_lock_irqsave(&ndlp->lock, iflags);
375 }
376 spin_unlock_irqrestore(&ndlp->lock, iflags);
377}
378
379
380
381
382
383
384
385
386
387
388
389static int
390lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
391{
392 struct lpfc_vport *vport;
393 struct lpfc_hba *phba;
394 uint8_t *name;
395 int warn_on = 0;
396 int fcf_inuse = 0;
397 bool recovering = false;
398 struct fc_vport *fc_vport = NULL;
399 unsigned long iflags;
400
401 vport = ndlp->vport;
402 name = (uint8_t *)&ndlp->nlp_portname;
403 phba = vport->phba;
404
405 spin_lock_irqsave(&ndlp->lock, iflags);
406 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
407 spin_unlock_irqrestore(&ndlp->lock, iflags);
408
409 if (phba->sli_rev == LPFC_SLI_REV4)
410 fcf_inuse = lpfc_fcf_inuse(phba);
411
412 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
413 "rport devlosstmo:did:x%x type:x%x id:x%x",
414 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid);
415
416 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
417 "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n",
418 __func__, ndlp->nlp_DID, ndlp->nlp_flag,
419 ndlp->fc4_xpt_flags, kref_read(&ndlp->kref));
420
421
422 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
423 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
424 "0284 Devloss timeout Ignored on "
425 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
426 "NPort x%x\n",
427 *name, *(name+1), *(name+2), *(name+3),
428 *(name+4), *(name+5), *(name+6), *(name+7),
429 ndlp->nlp_DID);
430 return fcf_inuse;
431 }
432
433
434 if (ndlp->nlp_type & NLP_FABRIC) {
435 spin_lock_irqsave(&ndlp->lock, iflags);
436
437
438
439
440 switch (ndlp->nlp_DID) {
441 case Fabric_DID:
442 fc_vport = vport->fc_vport;
443 if (fc_vport &&
444 fc_vport->vport_state == FC_VPORT_INITIALIZING)
445 recovering = true;
446 break;
447 case Fabric_Cntl_DID:
448 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
449 recovering = true;
450 break;
451 case FDMI_DID:
452 fallthrough;
453 case NameServer_DID:
454 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
455 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE)
456 recovering = true;
457 break;
458 }
459 spin_unlock_irqrestore(&ndlp->lock, iflags);
460
461
462
463
464
465 if (recovering) {
466 lpfc_printf_vlog(vport, KERN_INFO,
467 LOG_DISCOVERY | LOG_NODE,
468 "8436 Devloss timeout marked on "
469 "DID x%x refcnt %d ndlp %p "
470 "flag x%x port_state = x%x\n",
471 ndlp->nlp_DID, kref_read(&ndlp->kref),
472 ndlp, ndlp->nlp_flag,
473 vport->port_state);
474 spin_lock_irqsave(&ndlp->lock, iflags);
475 ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS;
476 spin_unlock_irqrestore(&ndlp->lock, iflags);
477 } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
478
479
480
481
482 lpfc_printf_vlog(vport, KERN_INFO,
483 LOG_DISCOVERY | LOG_NODE,
484 "8437 Devloss timeout ignored on "
485 "DID x%x refcnt %d ndlp %p "
486 "flag x%x port_state = x%x\n",
487 ndlp->nlp_DID, kref_read(&ndlp->kref),
488 ndlp, ndlp->nlp_flag,
489 vport->port_state);
490 return fcf_inuse;
491 }
492
493 lpfc_nlp_put(ndlp);
494 return fcf_inuse;
495 }
496
497 if (ndlp->nlp_sid != NLP_NO_SID) {
498 warn_on = 1;
499 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
500 }
501
502 if (warn_on) {
503 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
504 "0203 Devloss timeout on "
505 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
506 "NPort x%06x Data: x%x x%x x%x\n",
507 *name, *(name+1), *(name+2), *(name+3),
508 *(name+4), *(name+5), *(name+6), *(name+7),
509 ndlp->nlp_DID, ndlp->nlp_flag,
510 ndlp->nlp_state, ndlp->nlp_rpi);
511 } else {
512 lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
513 "0204 Devloss timeout on "
514 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
515 "NPort x%06x Data: x%x x%x x%x\n",
516 *name, *(name+1), *(name+2), *(name+3),
517 *(name+4), *(name+5), *(name+6), *(name+7),
518 ndlp->nlp_DID, ndlp->nlp_flag,
519 ndlp->nlp_state, ndlp->nlp_rpi);
520 }
521
522
523
524
525 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
526 ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) {
527 return fcf_inuse;
528 }
529
530 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
531 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
532
533 return fcf_inuse;
534}
535
536static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba)
537{
538 struct lpfc_vport *vport;
539 struct lpfc_vport **vports;
540 int i;
541
542 vports = lpfc_create_vport_work_array(phba);
543 if (!vports)
544 return;
545
546 for (i = 0; i <= phba->max_vports; i++) {
547 if ((!vports[i]) && (i == 0))
548 vport = phba->pport;
549 else
550 vport = vports[i];
551 if (!vport)
552 break;
553
554 if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) {
555 if (!lpfc_issue_els_qfpa(vport))
556 vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA;
557 }
558 }
559 lpfc_destroy_vport_work_array(phba, vports);
560}
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580static void
581lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
582 uint32_t nlp_did)
583{
584
585
586
587 if (!fcf_inuse)
588 return;
589
590 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
591 spin_lock_irq(&phba->hbalock);
592 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
593 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
594 spin_unlock_irq(&phba->hbalock);
595 return;
596 }
597 phba->hba_flag |= HBA_DEVLOSS_TMO;
598 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
599 "2847 Last remote node (x%x) using "
600 "FCF devloss tmo\n", nlp_did);
601 }
602 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
603 spin_unlock_irq(&phba->hbalock);
604 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
605 "2868 Devloss tmo to FCF rediscovery "
606 "in progress\n");
607 return;
608 }
609 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
610 spin_unlock_irq(&phba->hbalock);
611 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
612 "2869 Devloss tmo to idle FIP engine, "
613 "unreg in-use FCF and rescan.\n");
614
615 lpfc_unregister_fcf_rescan(phba);
616 return;
617 }
618 spin_unlock_irq(&phba->hbalock);
619 if (phba->hba_flag & FCF_TS_INPROG)
620 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
621 "2870 FCF table scan in progress\n");
622 if (phba->hba_flag & FCF_RR_INPROG)
623 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
624 "2871 FLOGI roundrobin FCF failover "
625 "in progress\n");
626 }
627 lpfc_unregister_unused_fcf(phba);
628}
629
630
631
632
633
634
635
636
637
638
639
640struct lpfc_fast_path_event *
641lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
642 struct lpfc_fast_path_event *ret;
643
644
645 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
646 return NULL;
647
648 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
649 GFP_ATOMIC);
650 if (ret) {
651 atomic_inc(&phba->fast_event_count);
652 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
653 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
654 }
655 return ret;
656}
657
658
659
660
661
662
663
664
665
666void
667lpfc_free_fast_evt(struct lpfc_hba *phba,
668 struct lpfc_fast_path_event *evt) {
669
670 atomic_dec(&phba->fast_event_count);
671 kfree(evt);
672}
673
674
675
676
677
678
679
680
681
682
683static void
684lpfc_send_fastpath_evt(struct lpfc_hba *phba,
685 struct lpfc_work_evt *evtp)
686{
687 unsigned long evt_category, evt_sub_category;
688 struct lpfc_fast_path_event *fast_evt_data;
689 char *evt_data;
690 uint32_t evt_data_size;
691 struct Scsi_Host *shost;
692
693 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
694 work_evt);
695
696 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
697 evt_sub_category = (unsigned long) fast_evt_data->un.
698 fabric_evt.subcategory;
699 shost = lpfc_shost_from_vport(fast_evt_data->vport);
700 if (evt_category == FC_REG_FABRIC_EVENT) {
701 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
702 evt_data = (char *) &fast_evt_data->un.read_check_error;
703 evt_data_size = sizeof(fast_evt_data->un.
704 read_check_error);
705 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
706 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
707 evt_data = (char *) &fast_evt_data->un.fabric_evt;
708 evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
709 } else {
710 lpfc_free_fast_evt(phba, fast_evt_data);
711 return;
712 }
713 } else if (evt_category == FC_REG_SCSI_EVENT) {
714 switch (evt_sub_category) {
715 case LPFC_EVENT_QFULL:
716 case LPFC_EVENT_DEVBSY:
717 evt_data = (char *) &fast_evt_data->un.scsi_evt;
718 evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
719 break;
720 case LPFC_EVENT_CHECK_COND:
721 evt_data = (char *) &fast_evt_data->un.check_cond_evt;
722 evt_data_size = sizeof(fast_evt_data->un.
723 check_cond_evt);
724 break;
725 case LPFC_EVENT_VARQUEDEPTH:
726 evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
727 evt_data_size = sizeof(fast_evt_data->un.
728 queue_depth_evt);
729 break;
730 default:
731 lpfc_free_fast_evt(phba, fast_evt_data);
732 return;
733 }
734 } else {
735 lpfc_free_fast_evt(phba, fast_evt_data);
736 return;
737 }
738
739 if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
740 fc_host_post_vendor_event(shost,
741 fc_get_event_number(),
742 evt_data_size,
743 evt_data,
744 LPFC_NL_VENDOR_ID);
745
746 lpfc_free_fast_evt(phba, fast_evt_data);
747 return;
748}
749
750static void
751lpfc_work_list_done(struct lpfc_hba *phba)
752{
753 struct lpfc_work_evt *evtp = NULL;
754 struct lpfc_nodelist *ndlp;
755 int free_evt;
756 int fcf_inuse;
757 uint32_t nlp_did;
758
759 spin_lock_irq(&phba->hbalock);
760 while (!list_empty(&phba->work_list)) {
761 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
762 evt_listp);
763 spin_unlock_irq(&phba->hbalock);
764 free_evt = 1;
765 switch (evtp->evt) {
766 case LPFC_EVT_ELS_RETRY:
767 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
768 lpfc_els_retry_delay_handler(ndlp);
769 free_evt = 0;
770
771
772
773 lpfc_nlp_put(ndlp);
774 break;
775 case LPFC_EVT_DEV_LOSS:
776 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
777 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
778 free_evt = 0;
779
780
781
782 nlp_did = ndlp->nlp_DID;
783 lpfc_nlp_put(ndlp);
784 if (phba->sli_rev == LPFC_SLI_REV4)
785 lpfc_sli4_post_dev_loss_tmo_handler(phba,
786 fcf_inuse,
787 nlp_did);
788 break;
789 case LPFC_EVT_RECOVER_PORT:
790 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
791 lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
792 free_evt = 0;
793
794
795
796 lpfc_nlp_put(ndlp);
797 break;
798 case LPFC_EVT_ONLINE:
799 if (phba->link_state < LPFC_LINK_DOWN)
800 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
801 else
802 *(int *) (evtp->evt_arg1) = 0;
803 complete((struct completion *)(evtp->evt_arg2));
804 break;
805 case LPFC_EVT_OFFLINE_PREP:
806 if (phba->link_state >= LPFC_LINK_DOWN)
807 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
808 *(int *)(evtp->evt_arg1) = 0;
809 complete((struct completion *)(evtp->evt_arg2));
810 break;
811 case LPFC_EVT_OFFLINE:
812 lpfc_offline(phba);
813 lpfc_sli_brdrestart(phba);
814 *(int *)(evtp->evt_arg1) =
815 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
816 lpfc_unblock_mgmt_io(phba);
817 complete((struct completion *)(evtp->evt_arg2));
818 break;
819 case LPFC_EVT_WARM_START:
820 lpfc_offline(phba);
821 lpfc_reset_barrier(phba);
822 lpfc_sli_brdreset(phba);
823 lpfc_hba_down_post(phba);
824 *(int *)(evtp->evt_arg1) =
825 lpfc_sli_brdready(phba, HS_MBRDY);
826 lpfc_unblock_mgmt_io(phba);
827 complete((struct completion *)(evtp->evt_arg2));
828 break;
829 case LPFC_EVT_KILL:
830 lpfc_offline(phba);
831 *(int *)(evtp->evt_arg1)
832 = (phba->pport->stopped)
833 ? 0 : lpfc_sli_brdkill(phba);
834 lpfc_unblock_mgmt_io(phba);
835 complete((struct completion *)(evtp->evt_arg2));
836 break;
837 case LPFC_EVT_FASTPATH_MGMT_EVT:
838 lpfc_send_fastpath_evt(phba, evtp);
839 free_evt = 0;
840 break;
841 case LPFC_EVT_RESET_HBA:
842 if (!(phba->pport->load_flag & FC_UNLOADING))
843 lpfc_reset_hba(phba);
844 break;
845 }
846 if (free_evt)
847 kfree(evtp);
848 spin_lock_irq(&phba->hbalock);
849 }
850 spin_unlock_irq(&phba->hbalock);
851
852}
853
854static void
855lpfc_work_done(struct lpfc_hba *phba)
856{
857 struct lpfc_sli_ring *pring;
858 uint32_t ha_copy, status, control, work_port_events;
859 struct lpfc_vport **vports;
860 struct lpfc_vport *vport;
861 int i;
862
863 spin_lock_irq(&phba->hbalock);
864 ha_copy = phba->work_ha;
865 phba->work_ha = 0;
866 spin_unlock_irq(&phba->hbalock);
867
868
869 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
870 lpfc_sli4_post_async_mbox(phba);
871
872 if (ha_copy & HA_ERATT)
873
874 lpfc_handle_eratt(phba);
875
876 if (ha_copy & HA_MBATT)
877 lpfc_sli_handle_mb_event(phba);
878
879 if (ha_copy & HA_LATT)
880 lpfc_handle_latt(phba);
881
882
883 if (lpfc_is_vmid_enabled(phba)) {
884 if (phba->pport->work_port_events &
885 WORKER_CHECK_VMID_ISSUE_QFPA) {
886 lpfc_check_vmid_qfpa_issue(phba);
887 phba->pport->work_port_events &=
888 ~WORKER_CHECK_VMID_ISSUE_QFPA;
889 }
890 if (phba->pport->work_port_events &
891 WORKER_CHECK_INACTIVE_VMID) {
892 lpfc_check_inactive_vmid(phba);
893 phba->pport->work_port_events &=
894 ~WORKER_CHECK_INACTIVE_VMID;
895 }
896 }
897
898
899 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
900 if (phba->hba_flag & HBA_RRQ_ACTIVE)
901 lpfc_handle_rrq_active(phba);
902 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
903 lpfc_sli4_els_xri_abort_event_proc(phba);
904 if (phba->hba_flag & ASYNC_EVENT)
905 lpfc_sli4_async_event_proc(phba);
906 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
907 spin_lock_irq(&phba->hbalock);
908 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
909 spin_unlock_irq(&phba->hbalock);
910 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
911 }
912 if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
913 lpfc_sli4_fcf_redisc_event_proc(phba);
914 }
915
916 vports = lpfc_create_vport_work_array(phba);
917 if (vports != NULL)
918 for (i = 0; i <= phba->max_vports; i++) {
919
920
921
922
923 if (vports[i] == NULL && i == 0)
924 vport = phba->pport;
925 else
926 vport = vports[i];
927 if (vport == NULL)
928 break;
929 spin_lock_irq(&vport->work_port_lock);
930 work_port_events = vport->work_port_events;
931 vport->work_port_events &= ~work_port_events;
932 spin_unlock_irq(&vport->work_port_lock);
933 if (work_port_events & WORKER_DISC_TMO)
934 lpfc_disc_timeout_handler(vport);
935 if (work_port_events & WORKER_ELS_TMO)
936 lpfc_els_timeout_handler(vport);
937 if (work_port_events & WORKER_HB_TMO)
938 lpfc_hb_timeout_handler(phba);
939 if (work_port_events & WORKER_MBOX_TMO)
940 lpfc_mbox_timeout_handler(phba);
941 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
942 lpfc_unblock_fabric_iocbs(phba);
943 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
944 lpfc_ramp_down_queue_handler(phba);
945 if (work_port_events & WORKER_DELAYED_DISC_TMO)
946 lpfc_delayed_disc_timeout_handler(vport);
947 }
948 lpfc_destroy_vport_work_array(phba, vports);
949
950 pring = lpfc_phba_elsring(phba);
951 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
952 status >>= (4*LPFC_ELS_RING);
953 if (pring && (status & HA_RXMASK ||
954 pring->flag & LPFC_DEFERRED_RING_EVENT ||
955 phba->hba_flag & HBA_SP_QUEUE_EVT)) {
956 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
957 pring->flag |= LPFC_DEFERRED_RING_EVENT;
958
959 if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
960 set_bit(LPFC_DATA_READY, &phba->data_flags);
961 } else {
962
963
964
965 if (phba->link_state >= LPFC_LINK_DOWN ||
966 phba->link_flag & LS_MDS_LOOPBACK) {
967 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
968 lpfc_sli_handle_slow_ring_event(phba, pring,
969 (status &
970 HA_RXMASK));
971 }
972 }
973 if (phba->sli_rev == LPFC_SLI_REV4)
974 lpfc_drain_txq(phba);
975
976
977
978 if (phba->sli_rev <= LPFC_SLI_REV3) {
979 spin_lock_irq(&phba->hbalock);
980 control = readl(phba->HCregaddr);
981 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
982 lpfc_debugfs_slow_ring_trc(phba,
983 "WRK Enable ring: cntl:x%x hacopy:x%x",
984 control, ha_copy, 0);
985
986 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
987 writel(control, phba->HCregaddr);
988 readl(phba->HCregaddr);
989 } else {
990 lpfc_debugfs_slow_ring_trc(phba,
991 "WRK Ring ok: cntl:x%x hacopy:x%x",
992 control, ha_copy, 0);
993 }
994 spin_unlock_irq(&phba->hbalock);
995 }
996 }
997 lpfc_work_list_done(phba);
998}
999
1000int
1001lpfc_do_work(void *p)
1002{
1003 struct lpfc_hba *phba = p;
1004 int rc;
1005
1006 set_user_nice(current, MIN_NICE);
1007 current->flags |= PF_NOFREEZE;
1008 phba->data_flags = 0;
1009
1010 while (!kthread_should_stop()) {
1011
1012 rc = wait_event_interruptible(phba->work_waitq,
1013 (test_and_clear_bit(LPFC_DATA_READY,
1014 &phba->data_flags)
1015 || kthread_should_stop()));
1016
1017 if (rc) {
1018 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1019 "0433 Wakeup on signal: rc=x%x\n", rc);
1020 break;
1021 }
1022
1023
1024 lpfc_work_done(phba);
1025 }
1026 phba->worker_thread = NULL;
1027 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1028 "0432 Worker thread stopped.\n");
1029 return 0;
1030}
1031
1032
1033
1034
1035
1036
1037int
1038lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
1039 uint32_t evt)
1040{
1041 struct lpfc_work_evt *evtp;
1042 unsigned long flags;
1043
1044
1045
1046
1047
1048 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
1049 if (!evtp)
1050 return 0;
1051
1052 evtp->evt_arg1 = arg1;
1053 evtp->evt_arg2 = arg2;
1054 evtp->evt = evt;
1055
1056 spin_lock_irqsave(&phba->hbalock, flags);
1057 list_add_tail(&evtp->evt_listp, &phba->work_list);
1058 spin_unlock_irqrestore(&phba->hbalock, flags);
1059
1060 lpfc_worker_wake_up(phba);
1061
1062 return 1;
1063}
1064
1065void
1066lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
1067{
1068 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1069 struct lpfc_hba *phba = vport->phba;
1070 struct lpfc_nodelist *ndlp, *next_ndlp;
1071
1072 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1073 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
1074
1075
1076
1077
1078
1079 if (ndlp->nlp_DID == Fabric_DID) {
1080 if (ndlp->nlp_prev_state ==
1081 NLP_STE_UNUSED_NODE &&
1082 !ndlp->fc4_xpt_flags)
1083 lpfc_nlp_put(ndlp);
1084 }
1085 continue;
1086 }
1087
1088 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
1089 ((vport->port_type == LPFC_NPIV_PORT) &&
1090 ((ndlp->nlp_DID == NameServer_DID) ||
1091 (ndlp->nlp_DID == FDMI_DID) ||
1092 (ndlp->nlp_DID == Fabric_Cntl_DID))))
1093 lpfc_unreg_rpi(vport, ndlp);
1094
1095
1096 if ((phba->sli_rev < LPFC_SLI_REV4) &&
1097 (!remove && ndlp->nlp_type & NLP_FABRIC))
1098 continue;
1099
1100
1101 if (phba->nvmet_support &&
1102 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
1103 lpfc_nvmet_invalidate_host(phba, ndlp);
1104
1105 lpfc_disc_state_machine(vport, ndlp, NULL,
1106 remove
1107 ? NLP_EVT_DEVICE_RM
1108 : NLP_EVT_DEVICE_RECOVERY);
1109 }
1110 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
1111 if (phba->sli_rev == LPFC_SLI_REV4)
1112 lpfc_sli4_unreg_all_rpis(vport);
1113 lpfc_mbx_unreg_vpi(vport);
1114 spin_lock_irq(shost->host_lock);
1115 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1116 spin_unlock_irq(shost->host_lock);
1117 }
1118}
1119
1120void
1121lpfc_port_link_failure(struct lpfc_vport *vport)
1122{
1123 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
1124
1125
1126 lpfc_cleanup_rcv_buffers(vport);
1127
1128
1129 lpfc_els_flush_rscn(vport);
1130
1131
1132 lpfc_els_flush_cmd(vport);
1133
1134 lpfc_cleanup_rpis(vport, 0);
1135
1136
1137 lpfc_can_disctmo(vport);
1138}
1139
1140void
1141lpfc_linkdown_port(struct lpfc_vport *vport)
1142{
1143 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1144
1145 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
1146 fc_host_post_event(shost, fc_get_event_number(),
1147 FCH_EVT_LINKDOWN, 0);
1148
1149 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1150 "Link Down: state:x%x rtry:x%x flg:x%x",
1151 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
1152
1153 lpfc_port_link_failure(vport);
1154
1155
1156 spin_lock_irq(shost->host_lock);
1157 vport->fc_flag &= ~FC_DISC_DELAYED;
1158 spin_unlock_irq(shost->host_lock);
1159 del_timer_sync(&vport->delayed_disc_tmo);
1160}
1161
1162int
1163lpfc_linkdown(struct lpfc_hba *phba)
1164{
1165 struct lpfc_vport *vport = phba->pport;
1166 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1167 struct lpfc_vport **vports;
1168 LPFC_MBOXQ_t *mb;
1169 int i;
1170
1171 if (phba->link_state == LPFC_LINK_DOWN)
1172 return 0;
1173
1174
1175 lpfc_scsi_dev_block(phba);
1176
1177 phba->defer_flogi_acc_flag = false;
1178
1179 spin_lock_irq(&phba->hbalock);
1180 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
1181 spin_unlock_irq(&phba->hbalock);
1182 if (phba->link_state > LPFC_LINK_DOWN) {
1183 phba->link_state = LPFC_LINK_DOWN;
1184 if (phba->sli4_hba.conf_trunk) {
1185 phba->trunk_link.link0.state = 0;
1186 phba->trunk_link.link1.state = 0;
1187 phba->trunk_link.link2.state = 0;
1188 phba->trunk_link.link3.state = 0;
1189 phba->sli4_hba.link_state.logical_speed =
1190 LPFC_LINK_SPEED_UNKNOWN;
1191 }
1192 spin_lock_irq(shost->host_lock);
1193 phba->pport->fc_flag &= ~FC_LBIT;
1194 spin_unlock_irq(shost->host_lock);
1195 }
1196 vports = lpfc_create_vport_work_array(phba);
1197 if (vports != NULL) {
1198 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1199
1200 lpfc_linkdown_port(vports[i]);
1201
1202 vports[i]->fc_myDID = 0;
1203
1204 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
1205 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
1206 if (phba->nvmet_support)
1207 lpfc_nvmet_update_targetport(phba);
1208 else
1209 lpfc_nvme_update_localport(vports[i]);
1210 }
1211 }
1212 }
1213 lpfc_destroy_vport_work_array(phba, vports);
1214
1215
1216 if (phba->sli_rev > LPFC_SLI_REV3)
1217 goto skip_unreg_did;
1218
1219 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1220 if (mb) {
1221 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
1222 mb->vport = vport;
1223 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1224 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
1225 == MBX_NOT_FINISHED) {
1226 mempool_free(mb, phba->mbox_mem_pool);
1227 }
1228 }
1229
1230 skip_unreg_did:
1231
1232 if (phba->pport->fc_flag & FC_PT2PT) {
1233 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1234 if (mb) {
1235 lpfc_config_link(phba, mb);
1236 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1237 mb->vport = vport;
1238 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
1239 == MBX_NOT_FINISHED) {
1240 mempool_free(mb, phba->mbox_mem_pool);
1241 }
1242 }
1243 spin_lock_irq(shost->host_lock);
1244 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
1245 phba->pport->rcv_flogi_cnt = 0;
1246 spin_unlock_irq(shost->host_lock);
1247 }
1248 return 0;
1249}
1250
1251static void
1252lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
1253{
1254 struct lpfc_nodelist *ndlp;
1255
1256 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1257 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
1258
1259 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1260 continue;
1261 if (ndlp->nlp_type & NLP_FABRIC) {
1262
1263
1264
1265 if (ndlp->nlp_DID != Fabric_DID)
1266 lpfc_unreg_rpi(vport, ndlp);
1267 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1268 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
1269
1270
1271
1272 lpfc_unreg_rpi(vport, ndlp);
1273 }
1274 }
1275}
1276
1277static void
1278lpfc_linkup_port(struct lpfc_vport *vport)
1279{
1280 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1281 struct lpfc_hba *phba = vport->phba;
1282
1283 if ((vport->load_flag & FC_UNLOADING) != 0)
1284 return;
1285
1286 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1287 "Link Up: top:x%x speed:x%x flg:x%x",
1288 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
1289
1290
1291 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1292 (vport != phba->pport))
1293 return;
1294
1295 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
1296 fc_host_post_event(shost, fc_get_event_number(),
1297 FCH_EVT_LINKUP, 0);
1298
1299 spin_lock_irq(shost->host_lock);
1300 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
1301 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
1302 vport->fc_flag |= FC_NDISC_ACTIVE;
1303 vport->fc_ns_retry = 0;
1304 spin_unlock_irq(shost->host_lock);
1305
1306 lpfc_linkup_cleanup_nodes(vport);
1307}
1308
1309static int
1310lpfc_linkup(struct lpfc_hba *phba)
1311{
1312 struct lpfc_vport **vports;
1313 int i;
1314 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
1315
1316 phba->link_state = LPFC_LINK_UP;
1317
1318
1319 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
1320 del_timer_sync(&phba->fabric_block_timer);
1321
1322 vports = lpfc_create_vport_work_array(phba);
1323 if (vports != NULL)
1324 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1325 lpfc_linkup_port(vports[i]);
1326 lpfc_destroy_vport_work_array(phba, vports);
1327
1328
1329
1330
1331
1332 spin_lock_irq(shost->host_lock);
1333 phba->pport->rcv_flogi_cnt = 0;
1334 spin_unlock_irq(shost->host_lock);
1335
1336
1337 phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
1338 phba->defer_flogi_acc_flag = false;
1339
1340 return 0;
1341}
1342
1343
1344
1345
1346
1347
1348
1349static void
1350lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1351{
1352 struct lpfc_vport *vport = pmb->vport;
1353 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1354 struct lpfc_sli *psli = &phba->sli;
1355 MAILBOX_t *mb = &pmb->u.mb;
1356 uint32_t control;
1357
1358
1359 psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1360 psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1361
1362
1363 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1364
1365 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1366 "0320 CLEAR_LA mbxStatus error x%x hba "
1367 "state x%x\n",
1368 mb->mbxStatus, vport->port_state);
1369 phba->link_state = LPFC_HBA_ERROR;
1370 goto out;
1371 }
1372
1373 if (vport->port_type == LPFC_PHYSICAL_PORT)
1374 phba->link_state = LPFC_HBA_READY;
1375
1376 spin_lock_irq(&phba->hbalock);
1377 psli->sli_flag |= LPFC_PROCESS_LA;
1378 control = readl(phba->HCregaddr);
1379 control |= HC_LAINT_ENA;
1380 writel(control, phba->HCregaddr);
1381 readl(phba->HCregaddr);
1382 spin_unlock_irq(&phba->hbalock);
1383 mempool_free(pmb, phba->mbox_mem_pool);
1384 return;
1385
1386out:
1387
1388 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1389 "0225 Device Discovery completes\n");
1390 mempool_free(pmb, phba->mbox_mem_pool);
1391
1392 spin_lock_irq(shost->host_lock);
1393 vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1394 spin_unlock_irq(shost->host_lock);
1395
1396 lpfc_can_disctmo(vport);
1397
1398
1399
1400 spin_lock_irq(&phba->hbalock);
1401 psli->sli_flag |= LPFC_PROCESS_LA;
1402 control = readl(phba->HCregaddr);
1403 control |= HC_LAINT_ENA;
1404 writel(control, phba->HCregaddr);
1405 readl(phba->HCregaddr);
1406 spin_unlock_irq(&phba->hbalock);
1407
1408 return;
1409}
1410
1411void
1412lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1413{
1414 struct lpfc_vport *vport = pmb->vport;
1415 LPFC_MBOXQ_t *sparam_mb;
1416 struct lpfc_dmabuf *sparam_mp;
1417 u16 status = pmb->u.mb.mbxStatus;
1418 int rc;
1419
1420 mempool_free(pmb, phba->mbox_mem_pool);
1421
1422 if (status)
1423 goto out;
1424
1425
1426 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1427 !(phba->hba_flag & HBA_FCOE_MODE) &&
1428 (phba->link_flag & LS_LOOPBACK_MODE))
1429 return;
1430
1431 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1432 vport->fc_flag & FC_PUBLIC_LOOP &&
1433 !(vport->fc_flag & FC_LBIT)) {
1434
1435
1436
1437
1438 lpfc_set_disctmo(vport);
1439 return;
1440 }
1441
1442
1443
1444
1445 if (vport->port_state != LPFC_FLOGI) {
1446
1447
1448
1449 if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
1450 !(phba->link_flag & LS_LOOPBACK_MODE)) {
1451 sparam_mb = mempool_alloc(phba->mbox_mem_pool,
1452 GFP_KERNEL);
1453 if (!sparam_mb)
1454 goto sparam_out;
1455
1456 rc = lpfc_read_sparam(phba, sparam_mb, 0);
1457 if (rc) {
1458 mempool_free(sparam_mb, phba->mbox_mem_pool);
1459 goto sparam_out;
1460 }
1461 sparam_mb->vport = vport;
1462 sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1463 rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
1464 if (rc == MBX_NOT_FINISHED) {
1465 sparam_mp = (struct lpfc_dmabuf *)
1466 sparam_mb->ctx_buf;
1467 lpfc_mbuf_free(phba, sparam_mp->virt,
1468 sparam_mp->phys);
1469 kfree(sparam_mp);
1470 sparam_mb->ctx_buf = NULL;
1471 mempool_free(sparam_mb, phba->mbox_mem_pool);
1472 goto sparam_out;
1473 }
1474
1475 phba->hba_flag |= HBA_DEFER_FLOGI;
1476 } else {
1477 lpfc_initial_flogi(vport);
1478 }
1479 } else {
1480 if (vport->fc_flag & FC_PT2PT)
1481 lpfc_disc_start(vport);
1482 }
1483 return;
1484
1485out:
1486 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1487 "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
1488 status, vport->port_state);
1489
1490sparam_out:
1491 lpfc_linkdown(phba);
1492
1493 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1494 "0200 CONFIG_LINK bad hba state x%x\n",
1495 vport->port_state);
1496
1497 lpfc_issue_clear_la(phba, vport);
1498 return;
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509void
1510lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1511{
1512 struct lpfc_fcf_pri *fcf_pri;
1513 struct lpfc_fcf_pri *next_fcf_pri;
1514 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1515 spin_lock_irq(&phba->hbalock);
1516 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1517 &phba->fcf.fcf_pri_list, list) {
1518 list_del_init(&fcf_pri->list);
1519 fcf_pri->fcf_rec.flag = 0;
1520 }
1521 spin_unlock_irq(&phba->hbalock);
1522}
1523static void
1524lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1525{
1526 struct lpfc_vport *vport = mboxq->vport;
1527
1528 if (mboxq->u.mb.mbxStatus) {
1529 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1530 "2017 REG_FCFI mbxStatus error x%x "
1531 "HBA state x%x\n", mboxq->u.mb.mbxStatus,
1532 vport->port_state);
1533 goto fail_out;
1534 }
1535
1536
1537 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1538
1539 spin_lock_irq(&phba->hbalock);
1540 phba->fcf.fcf_flag |= FCF_REGISTERED;
1541 spin_unlock_irq(&phba->hbalock);
1542
1543
1544 if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1545 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1546 goto fail_out;
1547
1548
1549 spin_lock_irq(&phba->hbalock);
1550 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1551 phba->hba_flag &= ~FCF_TS_INPROG;
1552 if (vport->port_state != LPFC_FLOGI) {
1553 phba->hba_flag |= FCF_RR_INPROG;
1554 spin_unlock_irq(&phba->hbalock);
1555 lpfc_issue_init_vfi(vport);
1556 goto out;
1557 }
1558 spin_unlock_irq(&phba->hbalock);
1559 goto out;
1560
1561fail_out:
1562 spin_lock_irq(&phba->hbalock);
1563 phba->hba_flag &= ~FCF_RR_INPROG;
1564 spin_unlock_irq(&phba->hbalock);
1565out:
1566 mempool_free(mboxq, phba->mbox_mem_pool);
1567}
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578static uint32_t
1579lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1580{
1581 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1582 return 0;
1583 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1584 return 0;
1585 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1586 return 0;
1587 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1588 return 0;
1589 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1590 return 0;
1591 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1592 return 0;
1593 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1594 return 0;
1595 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1596 return 0;
1597 return 1;
1598}
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609static uint32_t
1610lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1611{
1612 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1613 return 0;
1614 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1615 return 0;
1616 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1617 return 0;
1618 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1619 return 0;
1620 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1621 return 0;
1622 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1623 return 0;
1624 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1625 return 0;
1626 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1627 return 0;
1628 return 1;
1629}
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640static uint32_t
1641lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1642{
1643 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1644 return 0;
1645 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1646 return 0;
1647 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1648 return 0;
1649 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1650 return 0;
1651 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1652 return 0;
1653 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1654 return 0;
1655 return 1;
1656}
1657
1658static bool
1659lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1660{
1661 return (curr_vlan_id == new_vlan_id);
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674static void
1675__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1676 struct fcf_record *new_fcf_record
1677 )
1678{
1679 struct lpfc_fcf_pri *fcf_pri;
1680
1681 fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1682 fcf_pri->fcf_rec.fcf_index = fcf_index;
1683
1684 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1685
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696static void
1697lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1698 struct fcf_record *new_fcf_record)
1699{
1700
1701 fcf_rec->fabric_name[0] =
1702 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1703 fcf_rec->fabric_name[1] =
1704 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1705 fcf_rec->fabric_name[2] =
1706 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1707 fcf_rec->fabric_name[3] =
1708 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1709 fcf_rec->fabric_name[4] =
1710 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1711 fcf_rec->fabric_name[5] =
1712 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1713 fcf_rec->fabric_name[6] =
1714 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1715 fcf_rec->fabric_name[7] =
1716 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1717
1718 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1719 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1720 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1721 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1722 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1723 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1724
1725 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1726
1727 fcf_rec->priority = new_fcf_record->fip_priority;
1728
1729 fcf_rec->switch_name[0] =
1730 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1731 fcf_rec->switch_name[1] =
1732 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1733 fcf_rec->switch_name[2] =
1734 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1735 fcf_rec->switch_name[3] =
1736 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1737 fcf_rec->switch_name[4] =
1738 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1739 fcf_rec->switch_name[5] =
1740 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1741 fcf_rec->switch_name[6] =
1742 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1743 fcf_rec->switch_name[7] =
1744 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1745}
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760static void
1761__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1762 struct fcf_record *new_fcf_record, uint32_t addr_mode,
1763 uint16_t vlan_id, uint32_t flag)
1764{
1765 lockdep_assert_held(&phba->hbalock);
1766
1767
1768 lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1769
1770 fcf_rec->addr_mode = addr_mode;
1771 fcf_rec->vlan_id = vlan_id;
1772 fcf_rec->flag |= (flag | RECORD_VALID);
1773 __lpfc_update_fcf_record_pri(phba,
1774 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1775 new_fcf_record);
1776}
1777
1778
1779
1780
1781
1782
1783
1784
1785static void
1786lpfc_register_fcf(struct lpfc_hba *phba)
1787{
1788 LPFC_MBOXQ_t *fcf_mbxq;
1789 int rc;
1790
1791 spin_lock_irq(&phba->hbalock);
1792
1793 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1794 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1795 spin_unlock_irq(&phba->hbalock);
1796 return;
1797 }
1798
1799
1800 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1801 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1802 phba->hba_flag &= ~FCF_TS_INPROG;
1803 if (phba->pport->port_state != LPFC_FLOGI &&
1804 phba->pport->fc_flag & FC_FABRIC) {
1805 phba->hba_flag |= FCF_RR_INPROG;
1806 spin_unlock_irq(&phba->hbalock);
1807 lpfc_initial_flogi(phba->pport);
1808 return;
1809 }
1810 spin_unlock_irq(&phba->hbalock);
1811 return;
1812 }
1813 spin_unlock_irq(&phba->hbalock);
1814
1815 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1816 if (!fcf_mbxq) {
1817 spin_lock_irq(&phba->hbalock);
1818 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1819 spin_unlock_irq(&phba->hbalock);
1820 return;
1821 }
1822
1823 lpfc_reg_fcfi(phba, fcf_mbxq);
1824 fcf_mbxq->vport = phba->pport;
1825 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1826 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1827 if (rc == MBX_NOT_FINISHED) {
1828 spin_lock_irq(&phba->hbalock);
1829 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1830 spin_unlock_irq(&phba->hbalock);
1831 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1832 }
1833
1834 return;
1835}
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855static int
1856lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1857 struct fcf_record *new_fcf_record,
1858 uint32_t *boot_flag, uint32_t *addr_mode,
1859 uint16_t *vlan_id)
1860{
1861 struct lpfc_fcf_conn_entry *conn_entry;
1862 int i, j, fcf_vlan_id = 0;
1863
1864
1865 for (i = 0; i < 512; i++) {
1866 if (new_fcf_record->vlan_bitmap[i]) {
1867 fcf_vlan_id = i * 8;
1868 j = 0;
1869 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1870 j++;
1871 fcf_vlan_id++;
1872 }
1873 break;
1874 }
1875 }
1876
1877
1878 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1879 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1880 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1881 return 0;
1882
1883 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1884 *boot_flag = 0;
1885 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1886 new_fcf_record);
1887 if (phba->valid_vlan)
1888 *vlan_id = phba->vlan_id;
1889 else
1890 *vlan_id = LPFC_FCOE_NULL_VID;
1891 return 1;
1892 }
1893
1894
1895
1896
1897
1898 if (list_empty(&phba->fcf_conn_rec_list)) {
1899 *boot_flag = 0;
1900 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1901 new_fcf_record);
1902
1903
1904
1905
1906
1907 if (*addr_mode & LPFC_FCF_FPMA)
1908 *addr_mode = LPFC_FCF_FPMA;
1909
1910
1911 if (fcf_vlan_id)
1912 *vlan_id = fcf_vlan_id;
1913 else
1914 *vlan_id = LPFC_FCOE_NULL_VID;
1915 return 1;
1916 }
1917
1918 list_for_each_entry(conn_entry,
1919 &phba->fcf_conn_rec_list, list) {
1920 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1921 continue;
1922
1923 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1924 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1925 new_fcf_record))
1926 continue;
1927 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1928 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1929 new_fcf_record))
1930 continue;
1931 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1932
1933
1934
1935
1936 if (!(new_fcf_record->vlan_bitmap
1937 [conn_entry->conn_rec.vlan_tag / 8] &
1938 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1939 continue;
1940 }
1941
1942
1943
1944
1945
1946 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1947 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1948 continue;
1949
1950
1951
1952
1953
1954 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1955 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1956
1957
1958
1959
1960 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1961 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1962 new_fcf_record) & LPFC_FCF_SPMA))
1963 continue;
1964
1965
1966
1967
1968 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1969 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1970 new_fcf_record) & LPFC_FCF_FPMA))
1971 continue;
1972 }
1973
1974
1975
1976
1977 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1978 *boot_flag = 1;
1979 else
1980 *boot_flag = 0;
1981
1982
1983
1984
1985
1986
1987 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1988 new_fcf_record);
1989
1990
1991
1992
1993 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1994 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1995 *addr_mode = (conn_entry->conn_rec.flags &
1996 FCFCNCT_AM_SPMA) ?
1997 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1998
1999
2000
2001
2002 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
2003 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
2004 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
2005 (*addr_mode & LPFC_FCF_SPMA))
2006 *addr_mode = LPFC_FCF_SPMA;
2007 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
2008 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
2009 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
2010 (*addr_mode & LPFC_FCF_FPMA))
2011 *addr_mode = LPFC_FCF_FPMA;
2012
2013
2014 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
2015 *vlan_id = conn_entry->conn_rec.vlan_tag;
2016
2017
2018
2019
2020 else if (fcf_vlan_id)
2021 *vlan_id = fcf_vlan_id;
2022 else
2023 *vlan_id = LPFC_FCOE_NULL_VID;
2024
2025 return 1;
2026 }
2027
2028 return 0;
2029}
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040int
2041lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
2042{
2043
2044
2045
2046
2047 if ((phba->link_state >= LPFC_LINK_UP) &&
2048 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
2049 return 0;
2050
2051 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2052 "2768 Pending link or FCF event during current "
2053 "handling of the previous event: link_state:x%x, "
2054 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
2055 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
2056 phba->fcoe_eventtag);
2057
2058 spin_lock_irq(&phba->hbalock);
2059 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
2060 spin_unlock_irq(&phba->hbalock);
2061
2062 if (phba->link_state >= LPFC_LINK_UP) {
2063 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
2064 "2780 Restart FCF table scan due to "
2065 "pending FCF event:evt_tag_at_scan:x%x, "
2066 "evt_tag_current:x%x\n",
2067 phba->fcoe_eventtag_at_fcf_scan,
2068 phba->fcoe_eventtag);
2069 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
2070 } else {
2071
2072
2073
2074
2075 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
2076 "2833 Stop FCF discovery process due to link "
2077 "state change (x%x)\n", phba->link_state);
2078 spin_lock_irq(&phba->hbalock);
2079 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
2080 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
2081 spin_unlock_irq(&phba->hbalock);
2082 }
2083
2084
2085 if (unreg_fcf) {
2086 spin_lock_irq(&phba->hbalock);
2087 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
2088 spin_unlock_irq(&phba->hbalock);
2089 lpfc_sli4_unregister_fcf(phba);
2090 }
2091 return 1;
2092}
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109static bool
2110lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
2111{
2112 uint32_t rand_num;
2113
2114
2115 rand_num = 0xFFFF & prandom_u32();
2116
2117
2118 if ((fcf_cnt * rand_num) < 0xFFFF)
2119 return true;
2120 else
2121 return false;
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137static struct fcf_record *
2138lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
2139 uint16_t *next_fcf_index)
2140{
2141 void *virt_addr;
2142 struct lpfc_mbx_sge sge;
2143 struct lpfc_mbx_read_fcf_tbl *read_fcf;
2144 uint32_t shdr_status, shdr_add_status, if_type;
2145 union lpfc_sli4_cfg_shdr *shdr;
2146 struct fcf_record *new_fcf_record;
2147
2148
2149
2150
2151 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
2152 if (unlikely(!mboxq->sge_array)) {
2153 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2154 "2524 Failed to get the non-embedded SGE "
2155 "virtual address\n");
2156 return NULL;
2157 }
2158 virt_addr = mboxq->sge_array->addr[0];
2159
2160 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
2161 lpfc_sli_pcimem_bcopy(shdr, shdr,
2162 sizeof(union lpfc_sli4_cfg_shdr));
2163 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2164 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2165 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2166 if (shdr_status || shdr_add_status) {
2167 if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
2168 if_type == LPFC_SLI_INTF_IF_TYPE_2)
2169 lpfc_printf_log(phba, KERN_ERR,
2170 LOG_TRACE_EVENT,
2171 "2726 READ_FCF_RECORD Indicates empty "
2172 "FCF table.\n");
2173 else
2174 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2175 "2521 READ_FCF_RECORD mailbox failed "
2176 "with status x%x add_status x%x, "
2177 "mbx\n", shdr_status, shdr_add_status);
2178 return NULL;
2179 }
2180
2181
2182 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
2183 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
2184 sizeof(struct lpfc_mbx_read_fcf_tbl));
2185 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
2186 new_fcf_record = (struct fcf_record *)(virt_addr +
2187 sizeof(struct lpfc_mbx_read_fcf_tbl));
2188 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
2189 offsetof(struct fcf_record, vlan_bitmap));
2190 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
2191 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
2192
2193 return new_fcf_record;
2194}
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206static void
2207lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
2208 struct fcf_record *fcf_record,
2209 uint16_t vlan_id,
2210 uint16_t next_fcf_index)
2211{
2212 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2213 "2764 READ_FCF_RECORD:\n"
2214 "\tFCF_Index : x%x\n"
2215 "\tFCF_Avail : x%x\n"
2216 "\tFCF_Valid : x%x\n"
2217 "\tFCF_SOL : x%x\n"
2218 "\tFIP_Priority : x%x\n"
2219 "\tMAC_Provider : x%x\n"
2220 "\tLowest VLANID : x%x\n"
2221 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
2222 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
2223 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
2224 "\tNext_FCF_Index: x%x\n",
2225 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
2226 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
2227 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
2228 bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
2229 fcf_record->fip_priority,
2230 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
2231 vlan_id,
2232 bf_get(lpfc_fcf_record_mac_0, fcf_record),
2233 bf_get(lpfc_fcf_record_mac_1, fcf_record),
2234 bf_get(lpfc_fcf_record_mac_2, fcf_record),
2235 bf_get(lpfc_fcf_record_mac_3, fcf_record),
2236 bf_get(lpfc_fcf_record_mac_4, fcf_record),
2237 bf_get(lpfc_fcf_record_mac_5, fcf_record),
2238 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
2239 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
2240 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
2241 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
2242 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
2243 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
2244 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
2245 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
2246 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
2247 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
2248 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
2249 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
2250 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
2251 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
2252 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
2253 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
2254 next_fcf_index);
2255}
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270static bool
2271lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
2272 struct lpfc_fcf_rec *fcf_rec,
2273 struct fcf_record *new_fcf_record,
2274 uint16_t new_vlan_id)
2275{
2276 if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
2277 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
2278 return false;
2279 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
2280 return false;
2281 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
2282 return false;
2283 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
2284 return false;
2285 if (fcf_rec->priority != new_fcf_record->fip_priority)
2286 return false;
2287 return true;
2288}
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
2302{
2303 struct lpfc_hba *phba = vport->phba;
2304 int rc;
2305
2306 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
2307 spin_lock_irq(&phba->hbalock);
2308 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2309 spin_unlock_irq(&phba->hbalock);
2310 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2311 "2872 Devloss tmo with no eligible "
2312 "FCF, unregister in-use FCF (x%x) "
2313 "and rescan FCF table\n",
2314 phba->fcf.current_rec.fcf_indx);
2315 lpfc_unregister_fcf_rescan(phba);
2316 goto stop_flogi_current_fcf;
2317 }
2318
2319 phba->hba_flag &= ~FCF_RR_INPROG;
2320
2321 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2322 spin_unlock_irq(&phba->hbalock);
2323 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2324 "2865 No FCF available, stop roundrobin FCF "
2325 "failover and change port state:x%x/x%x\n",
2326 phba->pport->port_state, LPFC_VPORT_UNKNOWN);
2327 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2328
2329 if (!phba->fcf.fcf_redisc_attempted) {
2330 lpfc_unregister_fcf(phba);
2331
2332 rc = lpfc_sli4_redisc_fcf_table(phba);
2333 if (!rc) {
2334 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2335 "3195 Rediscover FCF table\n");
2336 phba->fcf.fcf_redisc_attempted = 1;
2337 lpfc_sli4_clear_fcf_rr_bmask(phba);
2338 } else {
2339 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2340 "3196 Rediscover FCF table "
2341 "failed. Status:x%x\n", rc);
2342 }
2343 } else {
2344 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2345 "3197 Already rediscover FCF table "
2346 "attempted. No more retry\n");
2347 }
2348 goto stop_flogi_current_fcf;
2349 } else {
2350 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
2351 "2794 Try FLOGI roundrobin FCF failover to "
2352 "(x%x)\n", fcf_index);
2353 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
2354 if (rc)
2355 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
2356 "2761 FLOGI roundrobin FCF failover "
2357 "failed (rc:x%x) to read FCF (x%x)\n",
2358 rc, phba->fcf.current_rec.fcf_indx);
2359 else
2360 goto stop_flogi_current_fcf;
2361 }
2362 return 0;
2363
2364stop_flogi_current_fcf:
2365 lpfc_can_disctmo(vport);
2366 return 1;
2367}
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
2379 uint16_t fcf_index)
2380{
2381 struct lpfc_fcf_pri *new_fcf_pri;
2382
2383 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2384 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2385 "3058 deleting idx x%x pri x%x flg x%x\n",
2386 fcf_index, new_fcf_pri->fcf_rec.priority,
2387 new_fcf_pri->fcf_rec.flag);
2388 spin_lock_irq(&phba->hbalock);
2389 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
2390 if (phba->fcf.current_rec.priority ==
2391 new_fcf_pri->fcf_rec.priority)
2392 phba->fcf.eligible_fcf_cnt--;
2393 list_del_init(&new_fcf_pri->list);
2394 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2395 }
2396 spin_unlock_irq(&phba->hbalock);
2397}
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409void
2410lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2411{
2412 struct lpfc_fcf_pri *new_fcf_pri;
2413 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2414 spin_lock_irq(&phba->hbalock);
2415 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2416 spin_unlock_irq(&phba->hbalock);
2417}
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
2436 uint16_t fcf_index,
2437 struct fcf_record *new_fcf_record)
2438{
2439 uint16_t current_fcf_pri;
2440 uint16_t last_index;
2441 struct lpfc_fcf_pri *fcf_pri;
2442 struct lpfc_fcf_pri *next_fcf_pri;
2443 struct lpfc_fcf_pri *new_fcf_pri;
2444 int ret;
2445
2446 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2447 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2448 "3059 adding idx x%x pri x%x flg x%x\n",
2449 fcf_index, new_fcf_record->fip_priority,
2450 new_fcf_pri->fcf_rec.flag);
2451 spin_lock_irq(&phba->hbalock);
2452 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2453 list_del_init(&new_fcf_pri->list);
2454 new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2455 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2456 if (list_empty(&phba->fcf.fcf_pri_list)) {
2457 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2458 ret = lpfc_sli4_fcf_rr_index_set(phba,
2459 new_fcf_pri->fcf_rec.fcf_index);
2460 goto out;
2461 }
2462
2463 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2464 LPFC_SLI4_FCF_TBL_INDX_MAX);
2465 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2466 ret = 0;
2467 goto out;
2468 }
2469 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2470 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2471 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2472 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2473 memset(phba->fcf.fcf_rr_bmask, 0,
2474 sizeof(*phba->fcf.fcf_rr_bmask));
2475
2476 phba->fcf.eligible_fcf_cnt = 1;
2477 } else
2478
2479 phba->fcf.eligible_fcf_cnt++;
2480 ret = lpfc_sli4_fcf_rr_index_set(phba,
2481 new_fcf_pri->fcf_rec.fcf_index);
2482 goto out;
2483 }
2484
2485 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2486 &phba->fcf.fcf_pri_list, list) {
2487 if (new_fcf_pri->fcf_rec.priority <=
2488 fcf_pri->fcf_rec.priority) {
2489 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2490 list_add(&new_fcf_pri->list,
2491 &phba->fcf.fcf_pri_list);
2492 else
2493 list_add(&new_fcf_pri->list,
2494 &((struct lpfc_fcf_pri *)
2495 fcf_pri->list.prev)->list);
2496 ret = 0;
2497 goto out;
2498 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2499 || new_fcf_pri->fcf_rec.priority <
2500 next_fcf_pri->fcf_rec.priority) {
2501 list_add(&new_fcf_pri->list, &fcf_pri->list);
2502 ret = 0;
2503 goto out;
2504 }
2505 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2506 continue;
2507
2508 }
2509 ret = 1;
2510out:
2511
2512 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2513 spin_unlock_irq(&phba->hbalock);
2514 return ret;
2515}
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532void
2533lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2534{
2535 struct fcf_record *new_fcf_record;
2536 uint32_t boot_flag, addr_mode;
2537 uint16_t fcf_index, next_fcf_index;
2538 struct lpfc_fcf_rec *fcf_rec = NULL;
2539 uint16_t vlan_id = LPFC_FCOE_NULL_VID;
2540 bool select_new_fcf;
2541 int rc;
2542
2543
2544 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
2545 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2546 return;
2547 }
2548
2549
2550 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2551 &next_fcf_index);
2552 if (!new_fcf_record) {
2553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2554 "2765 Mailbox command READ_FCF_RECORD "
2555 "failed to retrieve a FCF record.\n");
2556
2557 spin_lock_irq(&phba->hbalock);
2558 phba->hba_flag &= ~FCF_TS_INPROG;
2559 spin_unlock_irq(&phba->hbalock);
2560 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2561 return;
2562 }
2563
2564
2565 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2566 &addr_mode, &vlan_id);
2567
2568
2569 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2570 next_fcf_index);
2571
2572
2573
2574
2575
2576
2577 if (!rc) {
2578 lpfc_sli4_fcf_pri_list_del(phba,
2579 bf_get(lpfc_fcf_record_fcf_index,
2580 new_fcf_record));
2581 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2582 "2781 FCF (x%x) failed connection "
2583 "list check: (x%x/x%x/%x)\n",
2584 bf_get(lpfc_fcf_record_fcf_index,
2585 new_fcf_record),
2586 bf_get(lpfc_fcf_record_fcf_avail,
2587 new_fcf_record),
2588 bf_get(lpfc_fcf_record_fcf_valid,
2589 new_fcf_record),
2590 bf_get(lpfc_fcf_record_fcf_sol,
2591 new_fcf_record));
2592 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2593 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2594 new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
2595 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2596 phba->fcf.current_rec.fcf_indx) {
2597 lpfc_printf_log(phba, KERN_ERR,
2598 LOG_TRACE_EVENT,
2599 "2862 FCF (x%x) matches property "
2600 "of in-use FCF (x%x)\n",
2601 bf_get(lpfc_fcf_record_fcf_index,
2602 new_fcf_record),
2603 phba->fcf.current_rec.fcf_indx);
2604 goto read_next_fcf;
2605 }
2606
2607
2608
2609
2610
2611
2612 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2613 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2614 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2615 "2835 Invalid in-use FCF "
2616 "(x%x), enter FCF failover "
2617 "table scan.\n",
2618 phba->fcf.current_rec.fcf_indx);
2619 spin_lock_irq(&phba->hbalock);
2620 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2621 spin_unlock_irq(&phba->hbalock);
2622 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2623 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2624 LPFC_FCOE_FCF_GET_FIRST);
2625 return;
2626 }
2627 }
2628 goto read_next_fcf;
2629 } else {
2630 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2631 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2632 new_fcf_record);
2633 if (rc)
2634 goto read_next_fcf;
2635 }
2636
2637
2638
2639
2640
2641
2642
2643 spin_lock_irq(&phba->hbalock);
2644 if (phba->fcf.fcf_flag & FCF_IN_USE) {
2645 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2646 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2647 new_fcf_record, vlan_id)) {
2648 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2649 phba->fcf.current_rec.fcf_indx) {
2650 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2651 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2652
2653 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2654 phba);
2655 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2656
2657 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2658 spin_unlock_irq(&phba->hbalock);
2659 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2660 "2836 New FCF matches in-use "
2661 "FCF (x%x), port_state:x%x, "
2662 "fc_flag:x%x\n",
2663 phba->fcf.current_rec.fcf_indx,
2664 phba->pport->port_state,
2665 phba->pport->fc_flag);
2666 goto out;
2667 } else
2668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2669 "2863 New FCF (x%x) matches "
2670 "property of in-use FCF (x%x)\n",
2671 bf_get(lpfc_fcf_record_fcf_index,
2672 new_fcf_record),
2673 phba->fcf.current_rec.fcf_indx);
2674 }
2675
2676
2677
2678
2679
2680
2681
2682 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2683 spin_unlock_irq(&phba->hbalock);
2684 goto read_next_fcf;
2685 }
2686 }
2687
2688
2689
2690
2691 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2692 fcf_rec = &phba->fcf.failover_rec;
2693 else
2694 fcf_rec = &phba->fcf.current_rec;
2695
2696 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2697
2698
2699
2700
2701
2702 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2703
2704 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2705 "2837 Update current FCF record "
2706 "(x%x) with new FCF record (x%x)\n",
2707 fcf_rec->fcf_indx,
2708 bf_get(lpfc_fcf_record_fcf_index,
2709 new_fcf_record));
2710 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2711 addr_mode, vlan_id, BOOT_ENABLE);
2712 spin_unlock_irq(&phba->hbalock);
2713 goto read_next_fcf;
2714 }
2715
2716
2717
2718
2719
2720 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
2721 spin_unlock_irq(&phba->hbalock);
2722 goto read_next_fcf;
2723 }
2724
2725
2726
2727
2728 if (new_fcf_record->fip_priority < fcf_rec->priority) {
2729
2730 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2731 "2838 Update current FCF record "
2732 "(x%x) with new FCF record (x%x)\n",
2733 fcf_rec->fcf_indx,
2734 bf_get(lpfc_fcf_record_fcf_index,
2735 new_fcf_record));
2736 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2737 addr_mode, vlan_id, 0);
2738
2739 phba->fcf.eligible_fcf_cnt = 1;
2740 } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2741
2742 phba->fcf.eligible_fcf_cnt++;
2743 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2744 phba->fcf.eligible_fcf_cnt);
2745 if (select_new_fcf) {
2746 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2747 "2839 Update current FCF record "
2748 "(x%x) with new FCF record (x%x)\n",
2749 fcf_rec->fcf_indx,
2750 bf_get(lpfc_fcf_record_fcf_index,
2751 new_fcf_record));
2752
2753 __lpfc_update_fcf_record(phba, fcf_rec,
2754 new_fcf_record,
2755 addr_mode, vlan_id, 0);
2756 }
2757 }
2758 spin_unlock_irq(&phba->hbalock);
2759 goto read_next_fcf;
2760 }
2761
2762
2763
2764
2765 if (fcf_rec) {
2766 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2767 "2840 Update initial FCF candidate "
2768 "with FCF (x%x)\n",
2769 bf_get(lpfc_fcf_record_fcf_index,
2770 new_fcf_record));
2771 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2772 addr_mode, vlan_id, (boot_flag ?
2773 BOOT_ENABLE : 0));
2774 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2775
2776 phba->fcf.eligible_fcf_cnt = 1;
2777 }
2778 spin_unlock_irq(&phba->hbalock);
2779 goto read_next_fcf;
2780
2781read_next_fcf:
2782 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2783 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2784 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2785
2786
2787
2788
2789
2790
2791
2792
2793 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2794 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2795 "2782 No suitable FCF found: "
2796 "(x%x/x%x)\n",
2797 phba->fcoe_eventtag_at_fcf_scan,
2798 bf_get(lpfc_fcf_record_fcf_index,
2799 new_fcf_record));
2800 spin_lock_irq(&phba->hbalock);
2801 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2802 phba->hba_flag &= ~FCF_TS_INPROG;
2803 spin_unlock_irq(&phba->hbalock);
2804
2805 lpfc_printf_log(phba, KERN_INFO,
2806 LOG_FIP,
2807 "2864 On devloss tmo "
2808 "unreg in-use FCF and "
2809 "rescan FCF table\n");
2810 lpfc_unregister_fcf_rescan(phba);
2811 return;
2812 }
2813
2814
2815
2816 phba->hba_flag &= ~FCF_TS_INPROG;
2817 spin_unlock_irq(&phba->hbalock);
2818 return;
2819 }
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830 lpfc_unregister_fcf(phba);
2831
2832
2833 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2834 "2842 Replace in-use FCF (x%x) "
2835 "with failover FCF (x%x)\n",
2836 phba->fcf.current_rec.fcf_indx,
2837 phba->fcf.failover_rec.fcf_indx);
2838 memcpy(&phba->fcf.current_rec,
2839 &phba->fcf.failover_rec,
2840 sizeof(struct lpfc_fcf_rec));
2841
2842
2843
2844
2845
2846 spin_lock_irq(&phba->hbalock);
2847 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2848 spin_unlock_irq(&phba->hbalock);
2849
2850 lpfc_register_fcf(phba);
2851 } else {
2852
2853
2854
2855
2856 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2857 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2858 return;
2859
2860 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2861 phba->fcf.fcf_flag & FCF_IN_USE) {
2862
2863
2864
2865
2866
2867
2868 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2869 "2841 In-use FCF record (x%x) "
2870 "not reported, entering fast "
2871 "FCF failover mode scanning.\n",
2872 phba->fcf.current_rec.fcf_indx);
2873 spin_lock_irq(&phba->hbalock);
2874 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2875 spin_unlock_irq(&phba->hbalock);
2876 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2877 LPFC_FCOE_FCF_GET_FIRST);
2878 return;
2879 }
2880
2881 lpfc_register_fcf(phba);
2882 }
2883 } else
2884 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
2885 return;
2886
2887out:
2888 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2889 lpfc_register_fcf(phba);
2890
2891 return;
2892}
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909void
2910lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2911{
2912 struct fcf_record *new_fcf_record;
2913 uint32_t boot_flag, addr_mode;
2914 uint16_t next_fcf_index, fcf_index;
2915 uint16_t current_fcf_index;
2916 uint16_t vlan_id;
2917 int rc;
2918
2919
2920 if (phba->link_state < LPFC_LINK_UP) {
2921 spin_lock_irq(&phba->hbalock);
2922 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2923 phba->hba_flag &= ~FCF_RR_INPROG;
2924 spin_unlock_irq(&phba->hbalock);
2925 goto out;
2926 }
2927
2928
2929 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2930 &next_fcf_index);
2931 if (!new_fcf_record) {
2932 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2933 "2766 Mailbox command READ_FCF_RECORD "
2934 "failed to retrieve a FCF record. "
2935 "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
2936 phba->fcf.fcf_flag);
2937 lpfc_unregister_fcf_rescan(phba);
2938 goto out;
2939 }
2940
2941
2942 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2943 &addr_mode, &vlan_id);
2944
2945
2946 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2947 next_fcf_index);
2948
2949 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2950 if (!rc) {
2951 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2952 "2848 Remove ineligible FCF (x%x) from "
2953 "from roundrobin bmask\n", fcf_index);
2954
2955 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2956
2957 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2958 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2959 if (rc)
2960 goto out;
2961 goto error_out;
2962 }
2963
2964 if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2965 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2966 "2760 Perform FLOGI roundrobin FCF failover: "
2967 "FCF (x%x) back to FCF (x%x)\n",
2968 phba->fcf.current_rec.fcf_indx, fcf_index);
2969
2970 msleep(500);
2971 lpfc_issue_init_vfi(phba->pport);
2972 goto out;
2973 }
2974
2975
2976 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2977 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2978 phba->fcf.failover_rec.fcf_indx, fcf_index);
2979 spin_lock_irq(&phba->hbalock);
2980 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2981 new_fcf_record, addr_mode, vlan_id,
2982 (boot_flag ? BOOT_ENABLE : 0));
2983 spin_unlock_irq(&phba->hbalock);
2984
2985 current_fcf_index = phba->fcf.current_rec.fcf_indx;
2986
2987
2988 lpfc_unregister_fcf(phba);
2989
2990
2991 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
2992 sizeof(struct lpfc_fcf_rec));
2993
2994 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2995 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2996 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2997
2998error_out:
2999 lpfc_register_fcf(phba);
3000out:
3001 lpfc_sli4_mbox_cmd_free(phba, mboxq);
3002}
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015void
3016lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3017{
3018 struct fcf_record *new_fcf_record;
3019 uint32_t boot_flag, addr_mode;
3020 uint16_t fcf_index, next_fcf_index;
3021 uint16_t vlan_id;
3022 int rc;
3023
3024
3025 if (phba->link_state < LPFC_LINK_UP)
3026 goto out;
3027
3028
3029 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
3030 goto out;
3031
3032
3033 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
3034 &next_fcf_index);
3035 if (!new_fcf_record) {
3036 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3037 "2767 Mailbox command READ_FCF_RECORD "
3038 "failed to retrieve a FCF record.\n");
3039 goto out;
3040 }
3041
3042
3043 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
3044 &addr_mode, &vlan_id);
3045
3046
3047 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
3048 next_fcf_index);
3049
3050 if (!rc)
3051 goto out;
3052
3053
3054 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
3055
3056 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
3057
3058out:
3059 lpfc_sli4_mbox_cmd_free(phba, mboxq);
3060}
3061
3062
3063
3064
3065
3066
3067
3068
3069static void
3070lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3071{
3072 struct lpfc_vport *vport = mboxq->vport;
3073
3074
3075
3076
3077
3078 if (mboxq->u.mb.mbxStatus &&
3079 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3080 LPFC_SLI_INTF_IF_TYPE_0) &&
3081 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
3082 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3083 "2891 Init VFI mailbox failed 0x%x\n",
3084 mboxq->u.mb.mbxStatus);
3085 mempool_free(mboxq, phba->mbox_mem_pool);
3086 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3087 return;
3088 }
3089
3090 lpfc_initial_flogi(vport);
3091 mempool_free(mboxq, phba->mbox_mem_pool);
3092 return;
3093}
3094
3095
3096
3097
3098
3099
3100
3101
3102void
3103lpfc_issue_init_vfi(struct lpfc_vport *vport)
3104{
3105 LPFC_MBOXQ_t *mboxq;
3106 int rc;
3107 struct lpfc_hba *phba = vport->phba;
3108
3109 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3110 if (!mboxq) {
3111 lpfc_printf_vlog(vport, KERN_ERR,
3112 LOG_TRACE_EVENT, "2892 Failed to allocate "
3113 "init_vfi mailbox\n");
3114 return;
3115 }
3116 lpfc_init_vfi(mboxq, vport);
3117 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
3118 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
3119 if (rc == MBX_NOT_FINISHED) {
3120 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3121 "2893 Failed to issue init_vfi mailbox\n");
3122 mempool_free(mboxq, vport->phba->mbox_mem_pool);
3123 }
3124}
3125
3126
3127
3128
3129
3130
3131
3132
3133void
3134lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3135{
3136 struct lpfc_vport *vport = mboxq->vport;
3137 struct lpfc_nodelist *ndlp;
3138 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3139
3140 if (mboxq->u.mb.mbxStatus) {
3141 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3142 "2609 Init VPI mailbox failed 0x%x\n",
3143 mboxq->u.mb.mbxStatus);
3144 mempool_free(mboxq, phba->mbox_mem_pool);
3145 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3146 return;
3147 }
3148 spin_lock_irq(shost->host_lock);
3149 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
3150 spin_unlock_irq(shost->host_lock);
3151
3152
3153 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
3154 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3155 if (!ndlp)
3156 lpfc_printf_vlog(vport, KERN_ERR,
3157 LOG_TRACE_EVENT,
3158 "2731 Cannot find fabric "
3159 "controller node\n");
3160 else
3161 lpfc_register_new_vport(phba, vport, ndlp);
3162 mempool_free(mboxq, phba->mbox_mem_pool);
3163 return;
3164 }
3165
3166 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
3167 lpfc_initial_fdisc(vport);
3168 else {
3169 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
3170 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3171 "2606 No NPIV Fabric support\n");
3172 }
3173 mempool_free(mboxq, phba->mbox_mem_pool);
3174 return;
3175}
3176
3177
3178
3179
3180
3181
3182
3183
3184void
3185lpfc_issue_init_vpi(struct lpfc_vport *vport)
3186{
3187 LPFC_MBOXQ_t *mboxq;
3188 int rc, vpi;
3189
3190 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
3191 vpi = lpfc_alloc_vpi(vport->phba);
3192 if (!vpi) {
3193 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3194 "3303 Failed to obtain vport vpi\n");
3195 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3196 return;
3197 }
3198 vport->vpi = vpi;
3199 }
3200
3201 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
3202 if (!mboxq) {
3203 lpfc_printf_vlog(vport, KERN_ERR,
3204 LOG_TRACE_EVENT, "2607 Failed to allocate "
3205 "init_vpi mailbox\n");
3206 return;
3207 }
3208 lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
3209 mboxq->vport = vport;
3210 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
3211 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
3212 if (rc == MBX_NOT_FINISHED) {
3213 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3214 "2608 Failed to issue init_vpi mailbox\n");
3215 mempool_free(mboxq, vport->phba->mbox_mem_pool);
3216 }
3217}
3218
3219
3220
3221
3222
3223
3224
3225
3226void
3227lpfc_start_fdiscs(struct lpfc_hba *phba)
3228{
3229 struct lpfc_vport **vports;
3230 int i;
3231
3232 vports = lpfc_create_vport_work_array(phba);
3233 if (vports != NULL) {
3234 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3235 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
3236 continue;
3237
3238 if (vports[i]->vpi > phba->max_vpi) {
3239 lpfc_vport_set_state(vports[i],
3240 FC_VPORT_FAILED);
3241 continue;
3242 }
3243 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3244 lpfc_vport_set_state(vports[i],
3245 FC_VPORT_LINKDOWN);
3246 continue;
3247 }
3248 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
3249 lpfc_issue_init_vpi(vports[i]);
3250 continue;
3251 }
3252 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
3253 lpfc_initial_fdisc(vports[i]);
3254 else {
3255 lpfc_vport_set_state(vports[i],
3256 FC_VPORT_NO_FABRIC_SUPP);
3257 lpfc_printf_vlog(vports[i], KERN_ERR,
3258 LOG_TRACE_EVENT,
3259 "0259 No NPIV "
3260 "Fabric support\n");
3261 }
3262 }
3263 }
3264 lpfc_destroy_vport_work_array(phba, vports);
3265}
3266
3267void
3268lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3269{
3270 struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
3271 struct lpfc_vport *vport = mboxq->vport;
3272 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3273
3274
3275
3276
3277
3278 if (mboxq->u.mb.mbxStatus &&
3279 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3280 LPFC_SLI_INTF_IF_TYPE_0) &&
3281 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
3282 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3283 "2018 REG_VFI mbxStatus error x%x "
3284 "HBA state x%x\n",
3285 mboxq->u.mb.mbxStatus, vport->port_state);
3286 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3287
3288 lpfc_disc_list_loopmap(vport);
3289
3290 lpfc_disc_start(vport);
3291 goto out_free_mem;
3292 }
3293 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3294 goto out_free_mem;
3295 }
3296
3297
3298
3299
3300
3301 if (vport->fc_flag & FC_VFI_REGISTERED)
3302 if (!(phba->sli_rev == LPFC_SLI_REV4 &&
3303 vport->fc_flag & FC_PT2PT))
3304 goto out_free_mem;
3305
3306
3307 spin_lock_irq(shost->host_lock);
3308 vport->vpi_state |= LPFC_VPI_REGISTERED;
3309 vport->fc_flag |= FC_VFI_REGISTERED;
3310 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3311 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
3312 spin_unlock_irq(shost->host_lock);
3313
3314
3315 if ((phba->sli_rev == LPFC_SLI_REV4) &&
3316 (phba->link_flag & LS_LOOPBACK_MODE)) {
3317 phba->link_state = LPFC_HBA_READY;
3318 goto out_free_mem;
3319 }
3320
3321 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3322 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
3323 "alpacnt:%d LinkState:%x topology:%x\n",
3324 vport->port_state, vport->fc_flag, vport->fc_myDID,
3325 vport->phba->alpa_map[0],
3326 phba->link_state, phba->fc_topology);
3327
3328 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3329
3330
3331
3332
3333 if ((vport->fc_flag & FC_PT2PT) ||
3334 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
3335 !(vport->fc_flag & FC_PUBLIC_LOOP))) {
3336
3337
3338 lpfc_disc_list_loopmap(vport);
3339
3340 if (vport->fc_flag & FC_PT2PT)
3341 vport->port_state = LPFC_VPORT_READY;
3342 else
3343 lpfc_disc_start(vport);
3344 } else {
3345 lpfc_start_fdiscs(phba);
3346 lpfc_do_scr_ns_plogi(phba, vport);
3347 }
3348 }
3349
3350out_free_mem:
3351 mempool_free(mboxq, phba->mbox_mem_pool);
3352 if (dmabuf) {
3353 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
3354 kfree(dmabuf);
3355 }
3356 return;
3357}
3358
3359static void
3360lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3361{
3362 MAILBOX_t *mb = &pmb->u.mb;
3363 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3364 struct lpfc_vport *vport = pmb->vport;
3365 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3366 struct serv_parm *sp = &vport->fc_sparam;
3367 uint32_t ed_tov;
3368
3369
3370 if (mb->mbxStatus) {
3371
3372 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3373 "0319 READ_SPARAM mbxStatus error x%x "
3374 "hba state x%x>\n",
3375 mb->mbxStatus, vport->port_state);
3376 lpfc_linkdown(phba);
3377 goto out;
3378 }
3379
3380 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
3381 sizeof (struct serv_parm));
3382
3383 ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
3384 if (sp->cmn.edtovResolution)
3385 ed_tov = (ed_tov + 999999) / 1000000;
3386
3387 phba->fc_edtov = ed_tov;
3388 phba->fc_ratov = (2 * ed_tov) / 1000;
3389 if (phba->fc_ratov < FF_DEF_RATOV) {
3390
3391 phba->fc_ratov = FF_DEF_RATOV;
3392 }
3393
3394 lpfc_update_vport_wwn(vport);
3395 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3396 if (vport->port_type == LPFC_PHYSICAL_PORT) {
3397 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
3398 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
3399 }
3400
3401 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3402 kfree(mp);
3403 mempool_free(pmb, phba->mbox_mem_pool);
3404
3405
3406
3407
3408 if (phba->hba_flag & HBA_DEFER_FLOGI) {
3409 lpfc_initial_flogi(vport);
3410 phba->hba_flag &= ~HBA_DEFER_FLOGI;
3411 }
3412 return;
3413
3414out:
3415 pmb->ctx_buf = NULL;
3416 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3417 kfree(mp);
3418 lpfc_issue_clear_la(phba, vport);
3419 mempool_free(pmb, phba->mbox_mem_pool);
3420 return;
3421}
3422
3423static void
3424lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3425{
3426 struct lpfc_vport *vport = phba->pport;
3427 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
3428 struct Scsi_Host *shost;
3429 int i;
3430 struct lpfc_dmabuf *mp;
3431 int rc;
3432 struct fcf_record *fcf_record;
3433 uint32_t fc_flags = 0;
3434 unsigned long iflags;
3435
3436 spin_lock_irqsave(&phba->hbalock, iflags);
3437 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
3438
3439 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3440 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
3441 case LPFC_LINK_SPEED_1GHZ:
3442 case LPFC_LINK_SPEED_2GHZ:
3443 case LPFC_LINK_SPEED_4GHZ:
3444 case LPFC_LINK_SPEED_8GHZ:
3445 case LPFC_LINK_SPEED_10GHZ:
3446 case LPFC_LINK_SPEED_16GHZ:
3447 case LPFC_LINK_SPEED_32GHZ:
3448 case LPFC_LINK_SPEED_64GHZ:
3449 case LPFC_LINK_SPEED_128GHZ:
3450 case LPFC_LINK_SPEED_256GHZ:
3451 break;
3452 default:
3453 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
3454 break;
3455 }
3456 }
3457
3458 if (phba->fc_topology &&
3459 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3460 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3461 "3314 Toplogy changed was 0x%x is 0x%x\n",
3462 phba->fc_topology,
3463 bf_get(lpfc_mbx_read_top_topology, la));
3464 phba->fc_topology_changed = 1;
3465 }
3466
3467 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3468 phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA);
3469
3470 shost = lpfc_shost_from_vport(vport);
3471 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3472 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
3473
3474
3475
3476
3477 if (phba->cfg_enable_npiv && phba->max_vpi)
3478 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3479 "1309 Link Up Event npiv not supported in loop "
3480 "topology\n");
3481
3482 if (bf_get(lpfc_mbx_read_top_il, la))
3483 fc_flags |= FC_LBIT;
3484
3485 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3486 i = la->lilpBde64.tus.f.bdeSize;
3487
3488 if (i == 0) {
3489 phba->alpa_map[0] = 0;
3490 } else {
3491 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
3492 int numalpa, j, k;
3493 union {
3494 uint8_t pamap[16];
3495 struct {
3496 uint32_t wd1;
3497 uint32_t wd2;
3498 uint32_t wd3;
3499 uint32_t wd4;
3500 } pa;
3501 } un;
3502 numalpa = phba->alpa_map[0];
3503 j = 0;
3504 while (j < numalpa) {
3505 memset(un.pamap, 0, 16);
3506 for (k = 1; j < numalpa; k++) {
3507 un.pamap[k - 1] =
3508 phba->alpa_map[j + 1];
3509 j++;
3510 if (k == 16)
3511 break;
3512 }
3513
3514 lpfc_printf_log(phba,
3515 KERN_WARNING,
3516 LOG_LINK_EVENT,
3517 "1304 Link Up Event "
3518 "ALPA map Data: x%x "
3519 "x%x x%x x%x\n",
3520 un.pa.wd1, un.pa.wd2,
3521 un.pa.wd3, un.pa.wd4);
3522 }
3523 }
3524 }
3525 } else {
3526 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3527 if (phba->max_vpi && phba->cfg_enable_npiv &&
3528 (phba->sli_rev >= LPFC_SLI_REV3))
3529 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3530 }
3531 vport->fc_myDID = phba->fc_pref_DID;
3532 fc_flags |= FC_LBIT;
3533 }
3534 spin_unlock_irqrestore(&phba->hbalock, iflags);
3535
3536 if (fc_flags) {
3537 spin_lock_irqsave(shost->host_lock, iflags);
3538 vport->fc_flag |= fc_flags;
3539 spin_unlock_irqrestore(shost->host_lock, iflags);
3540 }
3541
3542 lpfc_linkup(phba);
3543 sparam_mbox = NULL;
3544
3545 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3546 if (!sparam_mbox)
3547 goto out;
3548
3549 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3550 if (rc) {
3551 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3552 goto out;
3553 }
3554 sparam_mbox->vport = vport;
3555 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3556 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3557 if (rc == MBX_NOT_FINISHED) {
3558 mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
3559 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3560 kfree(mp);
3561 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3562 goto out;
3563 }
3564
3565 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3566 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3567 if (!cfglink_mbox)
3568 goto out;
3569 vport->port_state = LPFC_LOCAL_CFG_LINK;
3570 lpfc_config_link(phba, cfglink_mbox);
3571 cfglink_mbox->vport = vport;
3572 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3573 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3574 if (rc == MBX_NOT_FINISHED) {
3575 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3576 goto out;
3577 }
3578 } else {
3579 vport->port_state = LPFC_VPORT_UNKNOWN;
3580
3581
3582
3583
3584
3585 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
3586 fcf_record = kzalloc(sizeof(struct fcf_record),
3587 GFP_KERNEL);
3588 if (unlikely(!fcf_record)) {
3589 lpfc_printf_log(phba, KERN_ERR,
3590 LOG_TRACE_EVENT,
3591 "2554 Could not allocate memory for "
3592 "fcf record\n");
3593 rc = -ENODEV;
3594 goto out;
3595 }
3596
3597 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3598 LPFC_FCOE_FCF_DEF_INDEX);
3599 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3600 if (unlikely(rc)) {
3601 lpfc_printf_log(phba, KERN_ERR,
3602 LOG_TRACE_EVENT,
3603 "2013 Could not manually add FCF "
3604 "record 0, status %d\n", rc);
3605 rc = -ENODEV;
3606 kfree(fcf_record);
3607 goto out;
3608 }
3609 kfree(fcf_record);
3610 }
3611
3612
3613
3614
3615 spin_lock_irqsave(&phba->hbalock, iflags);
3616 if (phba->hba_flag & FCF_TS_INPROG) {
3617 spin_unlock_irqrestore(&phba->hbalock, iflags);
3618 return;
3619 }
3620
3621 phba->fcf.fcf_flag |= FCF_INIT_DISC;
3622 spin_unlock_irqrestore(&phba->hbalock, iflags);
3623 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3624 "2778 Start FCF table scan at linkup\n");
3625 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3626 LPFC_FCOE_FCF_GET_FIRST);
3627 if (rc) {
3628 spin_lock_irqsave(&phba->hbalock, iflags);
3629 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3630 spin_unlock_irqrestore(&phba->hbalock, iflags);
3631 goto out;
3632 }
3633
3634 lpfc_sli4_clear_fcf_rr_bmask(phba);
3635 }
3636
3637
3638 memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
3639 scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
3640 init_utsname()->nodename);
3641 return;
3642out:
3643 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3644 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3645 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
3646 vport->port_state, sparam_mbox, cfglink_mbox);
3647 lpfc_issue_clear_la(phba, vport);
3648 return;
3649}
3650
3651static void
3652lpfc_enable_la(struct lpfc_hba *phba)
3653{
3654 uint32_t control;
3655 struct lpfc_sli *psli = &phba->sli;
3656 spin_lock_irq(&phba->hbalock);
3657 psli->sli_flag |= LPFC_PROCESS_LA;
3658 if (phba->sli_rev <= LPFC_SLI_REV3) {
3659 control = readl(phba->HCregaddr);
3660 control |= HC_LAINT_ENA;
3661 writel(control, phba->HCregaddr);
3662 readl(phba->HCregaddr);
3663 }
3664 spin_unlock_irq(&phba->hbalock);
3665}
3666
3667static void
3668lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3669{
3670 lpfc_linkdown(phba);
3671 lpfc_enable_la(phba);
3672 lpfc_unregister_unused_fcf(phba);
3673
3674}
3675
3676
3677
3678
3679
3680
3681
3682
3683void
3684lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3685{
3686 struct lpfc_vport *vport = pmb->vport;
3687 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3688 struct lpfc_mbx_read_top *la;
3689 struct lpfc_sli_ring *pring;
3690 MAILBOX_t *mb = &pmb->u.mb;
3691 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3692 uint8_t attn_type;
3693 unsigned long iflags;
3694
3695
3696 pring = lpfc_phba_elsring(phba);
3697 if (pring)
3698 pring->flag &= ~LPFC_STOP_IOCB_EVENT;
3699
3700
3701 if (mb->mbxStatus) {
3702 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3703 "1307 READ_LA mbox error x%x state x%x\n",
3704 mb->mbxStatus, vport->port_state);
3705 lpfc_mbx_issue_link_down(phba);
3706 phba->link_state = LPFC_HBA_ERROR;
3707 goto lpfc_mbx_cmpl_read_topology_free_mbuf;
3708 }
3709
3710 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3711 attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
3712
3713 memcpy(&phba->alpa_map[0], mp->virt, 128);
3714
3715 spin_lock_irqsave(shost->host_lock, iflags);
3716 if (bf_get(lpfc_mbx_read_top_pb, la))
3717 vport->fc_flag |= FC_BYPASSED_MODE;
3718 else
3719 vport->fc_flag &= ~FC_BYPASSED_MODE;
3720 spin_unlock_irqrestore(shost->host_lock, iflags);
3721
3722 if (phba->fc_eventTag <= la->eventTag) {
3723 phba->fc_stat.LinkMultiEvent++;
3724 if (attn_type == LPFC_ATT_LINK_UP)
3725 if (phba->fc_eventTag != 0)
3726 lpfc_linkdown(phba);
3727 }
3728
3729 phba->fc_eventTag = la->eventTag;
3730 if (phba->sli_rev < LPFC_SLI_REV4) {
3731 spin_lock_irqsave(&phba->hbalock, iflags);
3732 if (bf_get(lpfc_mbx_read_top_mm, la))
3733 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3734 else
3735 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3736 spin_unlock_irqrestore(&phba->hbalock, iflags);
3737 }
3738
3739 phba->link_events++;
3740 if ((attn_type == LPFC_ATT_LINK_UP) &&
3741 !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
3742 phba->fc_stat.LinkUp++;
3743 if (phba->link_flag & LS_LOOPBACK_MODE) {
3744 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3745 "1306 Link Up Event in loop back mode "
3746 "x%x received Data: x%x x%x x%x x%x\n",
3747 la->eventTag, phba->fc_eventTag,
3748 bf_get(lpfc_mbx_read_top_alpa_granted,
3749 la),
3750 bf_get(lpfc_mbx_read_top_link_spd, la),
3751 phba->alpa_map[0]);
3752 } else {
3753 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3754 "1303 Link Up Event x%x received "
3755 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3756 la->eventTag, phba->fc_eventTag,
3757 bf_get(lpfc_mbx_read_top_alpa_granted,
3758 la),
3759 bf_get(lpfc_mbx_read_top_link_spd, la),
3760 phba->alpa_map[0],
3761 bf_get(lpfc_mbx_read_top_mm, la),
3762 bf_get(lpfc_mbx_read_top_fa, la),
3763 phba->wait_4_mlo_maint_flg);
3764 }
3765 lpfc_mbx_process_link_up(phba, la);
3766
3767 if (phba->cmf_active_mode != LPFC_CFG_OFF)
3768 lpfc_cmf_signal_init(phba);
3769
3770 } else if (attn_type == LPFC_ATT_LINK_DOWN ||
3771 attn_type == LPFC_ATT_UNEXP_WWPN) {
3772 phba->fc_stat.LinkDown++;
3773 if (phba->link_flag & LS_LOOPBACK_MODE)
3774 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3775 "1308 Link Down Event in loop back mode "
3776 "x%x received "
3777 "Data: x%x x%x x%x\n",
3778 la->eventTag, phba->fc_eventTag,
3779 phba->pport->port_state, vport->fc_flag);
3780 else if (attn_type == LPFC_ATT_UNEXP_WWPN)
3781 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3782 "1313 Link Down Unexpected FA WWPN Event x%x "
3783 "received Data: x%x x%x x%x x%x x%x\n",
3784 la->eventTag, phba->fc_eventTag,
3785 phba->pport->port_state, vport->fc_flag,
3786 bf_get(lpfc_mbx_read_top_mm, la),
3787 bf_get(lpfc_mbx_read_top_fa, la));
3788 else
3789 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3790 "1305 Link Down Event x%x received "
3791 "Data: x%x x%x x%x x%x x%x\n",
3792 la->eventTag, phba->fc_eventTag,
3793 phba->pport->port_state, vport->fc_flag,
3794 bf_get(lpfc_mbx_read_top_mm, la),
3795 bf_get(lpfc_mbx_read_top_fa, la));
3796 lpfc_mbx_issue_link_down(phba);
3797 }
3798 if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
3799 attn_type == LPFC_ATT_LINK_UP) {
3800 if (phba->link_state != LPFC_LINK_DOWN) {
3801 phba->fc_stat.LinkDown++;
3802 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3803 "1312 Link Down Event x%x received "
3804 "Data: x%x x%x x%x\n",
3805 la->eventTag, phba->fc_eventTag,
3806 phba->pport->port_state, vport->fc_flag);
3807 lpfc_mbx_issue_link_down(phba);
3808 } else
3809 lpfc_enable_la(phba);
3810
3811 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3812 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3813 "Data: x%x x%x x%x\n",
3814 la->eventTag, phba->fc_eventTag,
3815 phba->pport->port_state, vport->fc_flag);
3816
3817
3818
3819
3820
3821 if (phba->wait_4_mlo_maint_flg) {
3822 phba->wait_4_mlo_maint_flg = 0;
3823 wake_up_interruptible(&phba->wait_4_mlo_m_q);
3824 }
3825 }
3826
3827 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3828 bf_get(lpfc_mbx_read_top_fa, la)) {
3829 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
3830 lpfc_issue_clear_la(phba, vport);
3831 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3832 "1311 fa %d\n",
3833 bf_get(lpfc_mbx_read_top_fa, la));
3834 }
3835
3836lpfc_mbx_cmpl_read_topology_free_mbuf:
3837 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3838 kfree(mp);
3839 mempool_free(pmb, phba->mbox_mem_pool);
3840 return;
3841}
3842
3843
3844
3845
3846
3847
3848
3849void
3850lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3851{
3852 struct lpfc_vport *vport = pmb->vport;
3853 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3854 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3855
3856 pmb->ctx_buf = NULL;
3857 pmb->ctx_ndlp = NULL;
3858
3859 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY,
3860 "0002 rpi:%x DID:%x flg:%x %d x%px\n",
3861 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3862 kref_read(&ndlp->kref),
3863 ndlp);
3864 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3865 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3866
3867 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3868 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3869
3870
3871
3872
3873
3874
3875
3876
3877 spin_lock_irq(&ndlp->lock);
3878 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3879 spin_unlock_irq(&ndlp->lock);
3880
3881
3882
3883
3884
3885
3886 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3887 lpfc_unreg_rpi(vport, ndlp);
3888 }
3889
3890
3891 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3892
3893 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3894 kfree(mp);
3895 mempool_free(pmb, phba->mbox_mem_pool);
3896
3897
3898
3899 lpfc_nlp_put(ndlp);
3900
3901 return;
3902}
3903
3904static void
3905lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3906{
3907 MAILBOX_t *mb = &pmb->u.mb;
3908 struct lpfc_vport *vport = pmb->vport;
3909 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3910
3911 switch (mb->mbxStatus) {
3912 case 0x0011:
3913 case 0x0020:
3914 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3915 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3916 mb->mbxStatus);
3917 break;
3918
3919 case 0x9700:
3920 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3921 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3922 vport->vpi, mb->mbxStatus);
3923 if (!(phba->pport->load_flag & FC_UNLOADING))
3924 lpfc_workq_post_event(phba, NULL, NULL,
3925 LPFC_EVT_RESET_HBA);
3926 }
3927 spin_lock_irq(shost->host_lock);
3928 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
3929 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3930 spin_unlock_irq(shost->host_lock);
3931 vport->unreg_vpi_cmpl = VPORT_OK;
3932 mempool_free(pmb, phba->mbox_mem_pool);
3933 lpfc_cleanup_vports_rrqs(vport, NULL);
3934
3935
3936
3937
3938 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
3939 scsi_host_put(shost);
3940}
3941
3942int
3943lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
3944{
3945 struct lpfc_hba *phba = vport->phba;
3946 LPFC_MBOXQ_t *mbox;
3947 int rc;
3948
3949 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3950 if (!mbox)
3951 return 1;
3952
3953 lpfc_unreg_vpi(phba, vport->vpi, mbox);
3954 mbox->vport = vport;
3955 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
3956 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3957 if (rc == MBX_NOT_FINISHED) {
3958 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3959 "1800 Could not issue unreg_vpi\n");
3960 mempool_free(mbox, phba->mbox_mem_pool);
3961 vport->unreg_vpi_cmpl = VPORT_ERROR;
3962 return rc;
3963 }
3964 return 0;
3965}
3966
3967static void
3968lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3969{
3970 struct lpfc_vport *vport = pmb->vport;
3971 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3972 MAILBOX_t *mb = &pmb->u.mb;
3973
3974 switch (mb->mbxStatus) {
3975 case 0x0011:
3976 case 0x9601:
3977 case 0x9602:
3978 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3979 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3980 mb->mbxStatus);
3981 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3982 spin_lock_irq(shost->host_lock);
3983 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3984 spin_unlock_irq(shost->host_lock);
3985 vport->fc_myDID = 0;
3986
3987 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3988 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3989 if (phba->nvmet_support)
3990 lpfc_nvmet_update_targetport(phba);
3991 else
3992 lpfc_nvme_update_localport(vport);
3993 }
3994 goto out;
3995 }
3996
3997 spin_lock_irq(shost->host_lock);
3998 vport->vpi_state |= LPFC_VPI_REGISTERED;
3999 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
4000 spin_unlock_irq(shost->host_lock);
4001 vport->num_disc_nodes = 0;
4002
4003 if (vport->fc_npr_cnt)
4004 lpfc_els_disc_plogi(vport);
4005
4006 if (!vport->num_disc_nodes) {
4007 spin_lock_irq(shost->host_lock);
4008 vport->fc_flag &= ~FC_NDISC_ACTIVE;
4009 spin_unlock_irq(shost->host_lock);
4010 lpfc_can_disctmo(vport);
4011 }
4012 vport->port_state = LPFC_VPORT_READY;
4013
4014out:
4015 mempool_free(pmb, phba->mbox_mem_pool);
4016 return;
4017}
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027void
4028lpfc_create_static_vport(struct lpfc_hba *phba)
4029{
4030 LPFC_MBOXQ_t *pmb = NULL;
4031 MAILBOX_t *mb;
4032 struct static_vport_info *vport_info;
4033 int mbx_wait_rc = 0, i;
4034 struct fc_vport_identifiers vport_id;
4035 struct fc_vport *new_fc_vport;
4036 struct Scsi_Host *shost;
4037 struct lpfc_vport *vport;
4038 uint16_t offset = 0;
4039 uint8_t *vport_buff;
4040 struct lpfc_dmabuf *mp;
4041 uint32_t byte_count = 0;
4042
4043 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4044 if (!pmb) {
4045 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4046 "0542 lpfc_create_static_vport failed to"
4047 " allocate mailbox memory\n");
4048 return;
4049 }
4050 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
4051 mb = &pmb->u.mb;
4052
4053 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
4054 if (!vport_info) {
4055 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4056 "0543 lpfc_create_static_vport failed to"
4057 " allocate vport_info\n");
4058 mempool_free(pmb, phba->mbox_mem_pool);
4059 return;
4060 }
4061
4062 vport_buff = (uint8_t *) vport_info;
4063 do {
4064
4065 if (pmb->ctx_buf) {
4066 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
4067 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4068 kfree(mp);
4069 }
4070 if (lpfc_dump_static_vport(phba, pmb, offset))
4071 goto out;
4072
4073 pmb->vport = phba->pport;
4074 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
4075 LPFC_MBOX_TMO);
4076
4077 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
4078 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4079 "0544 lpfc_create_static_vport failed to"
4080 " issue dump mailbox command ret 0x%x "
4081 "status 0x%x\n",
4082 mbx_wait_rc, mb->mbxStatus);
4083 goto out;
4084 }
4085
4086 if (phba->sli_rev == LPFC_SLI_REV4) {
4087 byte_count = pmb->u.mqe.un.mb_words[5];
4088 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
4089 if (byte_count > sizeof(struct static_vport_info) -
4090 offset)
4091 byte_count = sizeof(struct static_vport_info)
4092 - offset;
4093 memcpy(vport_buff + offset, mp->virt, byte_count);
4094 offset += byte_count;
4095 } else {
4096 if (mb->un.varDmp.word_cnt >
4097 sizeof(struct static_vport_info) - offset)
4098 mb->un.varDmp.word_cnt =
4099 sizeof(struct static_vport_info)
4100 - offset;
4101 byte_count = mb->un.varDmp.word_cnt;
4102 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
4103 vport_buff + offset,
4104 byte_count);
4105
4106 offset += byte_count;
4107 }
4108
4109 } while (byte_count &&
4110 offset < sizeof(struct static_vport_info));
4111
4112
4113 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
4114 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
4115 != VPORT_INFO_REV)) {
4116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4117 "0545 lpfc_create_static_vport bad"
4118 " information header 0x%x 0x%x\n",
4119 le32_to_cpu(vport_info->signature),
4120 le32_to_cpu(vport_info->rev) &
4121 VPORT_INFO_REV_MASK);
4122
4123 goto out;
4124 }
4125
4126 shost = lpfc_shost_from_vport(phba->pport);
4127
4128 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
4129 memset(&vport_id, 0, sizeof(vport_id));
4130 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
4131 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
4132 if (!vport_id.port_name || !vport_id.node_name)
4133 continue;
4134
4135 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
4136 vport_id.vport_type = FC_PORTTYPE_NPIV;
4137 vport_id.disable = false;
4138 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
4139
4140 if (!new_fc_vport) {
4141 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4142 "0546 lpfc_create_static_vport failed to"
4143 " create vport\n");
4144 continue;
4145 }
4146
4147 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
4148 vport->vport_flag |= STATIC_VPORT;
4149 }
4150
4151out:
4152 kfree(vport_info);
4153 if (mbx_wait_rc != MBX_TIMEOUT) {
4154 if (pmb->ctx_buf) {
4155 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
4156 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4157 kfree(mp);
4158 }
4159 mempool_free(pmb, phba->mbox_mem_pool);
4160 }
4161
4162 return;
4163}
4164
4165
4166
4167
4168
4169
4170
4171void
4172lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4173{
4174 struct lpfc_vport *vport = pmb->vport;
4175 MAILBOX_t *mb = &pmb->u.mb;
4176 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
4177 struct lpfc_nodelist *ndlp;
4178 struct Scsi_Host *shost;
4179
4180 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4181 pmb->ctx_ndlp = NULL;
4182 pmb->ctx_buf = NULL;
4183
4184 if (mb->mbxStatus) {
4185 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4186 "0258 Register Fabric login error: 0x%x\n",
4187 mb->mbxStatus);
4188 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4189 kfree(mp);
4190 mempool_free(pmb, phba->mbox_mem_pool);
4191
4192 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4193
4194 lpfc_disc_list_loopmap(vport);
4195
4196
4197 lpfc_disc_start(vport);
4198
4199
4200
4201 lpfc_nlp_put(ndlp);
4202 return;
4203 }
4204
4205 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4206
4207
4208
4209 lpfc_nlp_put(ndlp);
4210 return;
4211 }
4212
4213 if (phba->sli_rev < LPFC_SLI_REV4)
4214 ndlp->nlp_rpi = mb->un.varWords[0];
4215 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4216 ndlp->nlp_type |= NLP_FABRIC;
4217 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4218
4219 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
4220
4221
4222 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
4223 lpfc_start_fdiscs(phba);
4224 else {
4225 shost = lpfc_shost_from_vport(vport);
4226 spin_lock_irq(shost->host_lock);
4227 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
4228 spin_unlock_irq(shost->host_lock);
4229 }
4230 lpfc_do_scr_ns_plogi(phba, vport);
4231 }
4232
4233 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4234 kfree(mp);
4235 mempool_free(pmb, phba->mbox_mem_pool);
4236
4237
4238
4239
4240 lpfc_nlp_put(ndlp);
4241 return;
4242}
4243
4244
4245
4246
4247
4248int
4249lpfc_issue_gidft(struct lpfc_vport *vport)
4250{
4251
4252 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4253 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
4254 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
4255
4256
4257
4258 lpfc_printf_vlog(vport, KERN_ERR,
4259 LOG_TRACE_EVENT,
4260 "0604 %s FC TYPE %x %s\n",
4261 "Failed to issue GID_FT to ",
4262 FC_TYPE_FCP,
4263 "Finishing discovery.");
4264 return 0;
4265 }
4266 vport->gidft_inp++;
4267 }
4268
4269 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4270 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
4271 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
4272
4273
4274
4275 lpfc_printf_vlog(vport, KERN_ERR,
4276 LOG_TRACE_EVENT,
4277 "0605 %s FC_TYPE %x %s %d\n",
4278 "Failed to issue GID_FT to ",
4279 FC_TYPE_NVME,
4280 "Finishing discovery: gidftinp ",
4281 vport->gidft_inp);
4282 if (vport->gidft_inp == 0)
4283 return 0;
4284 } else
4285 vport->gidft_inp++;
4286 }
4287 return vport->gidft_inp;
4288}
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300int
4301lpfc_issue_gidpt(struct lpfc_vport *vport)
4302{
4303
4304 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
4305
4306
4307
4308 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4309 "0606 %s Port TYPE %x %s\n",
4310 "Failed to issue GID_PT to ",
4311 GID_PT_N_PORT,
4312 "Finishing discovery.");
4313 return 0;
4314 }
4315 vport->gidft_inp++;
4316 return 1;
4317}
4318
4319
4320
4321
4322
4323
4324
4325void
4326lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4327{
4328 MAILBOX_t *mb = &pmb->u.mb;
4329 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
4330 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4331 struct lpfc_vport *vport = pmb->vport;
4332 int rc;
4333
4334 pmb->ctx_buf = NULL;
4335 pmb->ctx_ndlp = NULL;
4336 vport->gidft_inp = 0;
4337
4338 if (mb->mbxStatus) {
4339 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4340 "0260 Register NameServer error: 0x%x\n",
4341 mb->mbxStatus);
4342
4343out:
4344
4345
4346
4347 lpfc_nlp_put(ndlp);
4348 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4349 kfree(mp);
4350 mempool_free(pmb, phba->mbox_mem_pool);
4351
4352
4353
4354
4355
4356 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
4357 spin_lock_irq(&ndlp->lock);
4358 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
4359 spin_unlock_irq(&ndlp->lock);
4360 lpfc_nlp_not_used(ndlp);
4361 }
4362
4363 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4364
4365
4366
4367
4368 lpfc_disc_list_loopmap(vport);
4369
4370
4371 lpfc_disc_start(vport);
4372 return;
4373 }
4374 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4375 return;
4376 }
4377
4378 if (phba->sli_rev < LPFC_SLI_REV4)
4379 ndlp->nlp_rpi = mb->un.varWords[0];
4380 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4381 ndlp->nlp_type |= NLP_FABRIC;
4382 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4383 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
4384 "0003 rpi:%x DID:%x flg:%x %d x%px\n",
4385 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4386 kref_read(&ndlp->kref),
4387 ndlp);
4388
4389 if (vport->port_state < LPFC_VPORT_READY) {
4390
4391 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
4392 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
4393 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
4394 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
4395
4396 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4397 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
4398 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
4399
4400 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4401 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
4402 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
4403 FC_TYPE_NVME);
4404
4405
4406 lpfc_issue_els_scr(vport, 0);
4407
4408
4409
4410
4411
4412
4413
4414 if (phba->cmf_active_mode != LPFC_CFG_OFF) {
4415 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin;
4416 phba->cgn_reg_signal = phba->cgn_init_reg_signal;
4417 rc = lpfc_issue_els_edc(vport, 0);
4418 lpfc_printf_log(phba, KERN_INFO,
4419 LOG_INIT | LOG_ELS | LOG_DISCOVERY,
4420 "4220 EDC issue error x%x, Data: x%x\n",
4421 rc, phba->cgn_init_reg_signal);
4422 } else {
4423 lpfc_issue_els_rdf(vport, 0);
4424 }
4425 }
4426
4427 vport->fc_ns_retry = 0;
4428 if (lpfc_issue_gidft(vport) == 0)
4429 goto out;
4430
4431
4432
4433
4434
4435
4436
4437
4438 lpfc_nlp_put(ndlp);
4439 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4440 kfree(mp);
4441 mempool_free(pmb, phba->mbox_mem_pool);
4442
4443 return;
4444}
4445
4446
4447
4448
4449
4450
4451void
4452lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4453{
4454 struct lpfc_vport *vport = pmb->vport;
4455 MAILBOX_t *mb = &pmb->u.mb;
4456 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
4457 struct lpfc_nodelist *ndlp;
4458
4459 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4460 pmb->ctx_ndlp = NULL;
4461 pmb->ctx_buf = NULL;
4462
4463 if (mb->mbxStatus) {
4464 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4465 "0933 %s: Register FC login error: 0x%x\n",
4466 __func__, mb->mbxStatus);
4467 goto out;
4468 }
4469
4470 lpfc_check_nlp_post_devloss(vport, ndlp);
4471
4472 if (phba->sli_rev < LPFC_SLI_REV4)
4473 ndlp->nlp_rpi = mb->un.varWords[0];
4474
4475 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4476 "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n",
4477 __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
4478 ndlp->nlp_state);
4479
4480 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4481 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
4482 ndlp->nlp_type |= NLP_FABRIC;
4483 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4484
4485 out:
4486 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4487 kfree(mp);
4488 mempool_free(pmb, phba->mbox_mem_pool);
4489
4490
4491
4492
4493 lpfc_nlp_put(ndlp);
4494}
4495
4496static void
4497lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4498{
4499 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4500 struct fc_rport *rport;
4501 struct lpfc_rport_data *rdata;
4502 struct fc_rport_identifiers rport_ids;
4503 struct lpfc_hba *phba = vport->phba;
4504 unsigned long flags;
4505
4506 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4507 return;
4508
4509
4510 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4511 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
4512 rport_ids.port_id = ndlp->nlp_DID;
4513 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4514
4515
4516 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4517 "rport add: did:x%x flg:x%x type x%x",
4518 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4519
4520
4521 if (vport->load_flag & FC_UNLOADING)
4522 return;
4523
4524
4525
4526
4527 if (ndlp->rport) {
4528 rdata = ndlp->rport->dd_data;
4529 rdata->pnode = NULL;
4530 }
4531
4532 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
4533 if (!rport) {
4534 dev_printk(KERN_WARNING, &phba->pcidev->dev,
4535 "Warning: fc_remote_port_add failed\n");
4536 return;
4537 }
4538
4539
4540 rport->maxframe_size = ndlp->nlp_maxframe;
4541 rport->supported_classes = ndlp->nlp_class_sup;
4542 rdata = rport->dd_data;
4543 rdata->pnode = lpfc_nlp_get(ndlp);
4544 if (!rdata->pnode) {
4545 dev_warn(&phba->pcidev->dev,
4546 "Warning - node ref failed. Unreg rport\n");
4547 fc_remote_port_delete(rport);
4548 ndlp->rport = NULL;
4549 return;
4550 }
4551
4552 spin_lock_irqsave(&ndlp->lock, flags);
4553 ndlp->fc4_xpt_flags |= SCSI_XPT_REGD;
4554 spin_unlock_irqrestore(&ndlp->lock, flags);
4555
4556 if (ndlp->nlp_type & NLP_FCP_TARGET)
4557 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
4558 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
4559 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4560 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
4561 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
4562 if (ndlp->nlp_type & NLP_NVME_TARGET)
4563 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
4564 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
4565 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
4566
4567 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
4568 fc_remote_port_rolechg(rport, rport_ids.roles);
4569
4570 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4571 "3183 %s rport x%px DID x%x, role x%x refcnt %d\n",
4572 __func__, rport, rport->port_id, rport->roles,
4573 kref_read(&ndlp->kref));
4574
4575 if ((rport->scsi_target_id != -1) &&
4576 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
4577 ndlp->nlp_sid = rport->scsi_target_id;
4578 }
4579
4580 return;
4581}
4582
4583static void
4584lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4585{
4586 struct fc_rport *rport = ndlp->rport;
4587 struct lpfc_vport *vport = ndlp->vport;
4588
4589 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4590 return;
4591
4592 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4593 "rport delete: did:x%x flg:x%x type x%x",
4594 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4595
4596 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4597 "3184 rport unregister x%06x, rport x%px "
4598 "xptflg x%x refcnt %d\n",
4599 ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags,
4600 kref_read(&ndlp->kref));
4601
4602 fc_remote_port_delete(rport);
4603 lpfc_nlp_put(ndlp);
4604}
4605
4606static void
4607lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
4608{
4609 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4610 unsigned long iflags;
4611
4612 spin_lock_irqsave(shost->host_lock, iflags);
4613 switch (state) {
4614 case NLP_STE_UNUSED_NODE:
4615 vport->fc_unused_cnt += count;
4616 break;
4617 case NLP_STE_PLOGI_ISSUE:
4618 vport->fc_plogi_cnt += count;
4619 break;
4620 case NLP_STE_ADISC_ISSUE:
4621 vport->fc_adisc_cnt += count;
4622 break;
4623 case NLP_STE_REG_LOGIN_ISSUE:
4624 vport->fc_reglogin_cnt += count;
4625 break;
4626 case NLP_STE_PRLI_ISSUE:
4627 vport->fc_prli_cnt += count;
4628 break;
4629 case NLP_STE_UNMAPPED_NODE:
4630 vport->fc_unmap_cnt += count;
4631 break;
4632 case NLP_STE_MAPPED_NODE:
4633 vport->fc_map_cnt += count;
4634 break;
4635 case NLP_STE_NPR_NODE:
4636 if (vport->fc_npr_cnt == 0 && count == -1)
4637 vport->fc_npr_cnt = 0;
4638 else
4639 vport->fc_npr_cnt += count;
4640 break;
4641 }
4642 spin_unlock_irqrestore(shost->host_lock, iflags);
4643}
4644
4645
4646void
4647lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4648{
4649 unsigned long iflags;
4650
4651 lpfc_check_nlp_post_devloss(vport, ndlp);
4652
4653 spin_lock_irqsave(&ndlp->lock, iflags);
4654 if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) {
4655
4656 spin_unlock_irqrestore(&ndlp->lock, iflags);
4657
4658 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD &&
4659 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) {
4660 lpfc_nvme_rescan_port(vport, ndlp);
4661 }
4662 return;
4663 }
4664
4665 ndlp->fc4_xpt_flags |= NLP_XPT_REGD;
4666 spin_unlock_irqrestore(&ndlp->lock, iflags);
4667
4668 if (lpfc_valid_xpt_node(ndlp)) {
4669 vport->phba->nport_event_cnt++;
4670
4671
4672
4673
4674 lpfc_register_remote_port(vport, ndlp);
4675 }
4676
4677
4678 if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME))
4679 return;
4680
4681
4682 if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
4683 ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4684 if (vport->phba->nvmet_support == 0) {
4685
4686
4687
4688
4689 if (ndlp->nlp_type & NLP_NVME_TARGET) {
4690 vport->phba->nport_event_cnt++;
4691 lpfc_nvme_register_port(vport, ndlp);
4692 }
4693 } else {
4694
4695
4696
4697 lpfc_nlp_get(ndlp);
4698 }
4699 }
4700}
4701
4702
4703void
4704lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4705{
4706 unsigned long iflags;
4707
4708 spin_lock_irqsave(&ndlp->lock, iflags);
4709 if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
4710 spin_unlock_irqrestore(&ndlp->lock, iflags);
4711 return;
4712 }
4713
4714 ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
4715 spin_unlock_irqrestore(&ndlp->lock, iflags);
4716
4717 if (ndlp->rport &&
4718 ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
4719 vport->phba->nport_event_cnt++;
4720 lpfc_unregister_remote_port(ndlp);
4721 }
4722
4723 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
4724 vport->phba->nport_event_cnt++;
4725 if (vport->phba->nvmet_support == 0) {
4726
4727 if (ndlp->nlp_type & NLP_NVME_TARGET)
4728 lpfc_nvme_unregister_port(vport, ndlp);
4729 } else {
4730
4731 lpfc_nlp_put(ndlp);
4732 }
4733 }
4734
4735}
4736
4737
4738
4739
4740static void
4741lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4742 int new_state)
4743{
4744 switch (new_state) {
4745
4746
4747
4748
4749 case NLP_STE_ADISC_ISSUE:
4750 break;
4751
4752
4753
4754
4755
4756
4757 case NLP_STE_UNMAPPED_NODE:
4758 ndlp->nlp_type |= NLP_FC_NODE;
4759 fallthrough;
4760 case NLP_STE_MAPPED_NODE:
4761 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4762 lpfc_nlp_reg_node(vport, ndlp);
4763 break;
4764
4765
4766
4767
4768
4769
4770
4771 case NLP_STE_NPR_NODE:
4772 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4773 fallthrough;
4774 default:
4775 lpfc_nlp_unreg_node(vport, ndlp);
4776 break;
4777 }
4778
4779}
4780
4781static void
4782lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4783 int old_state, int new_state)
4784{
4785
4786 if (new_state == NLP_STE_ADISC_ISSUE ||
4787 old_state == NLP_STE_ADISC_ISSUE) {
4788 lpfc_handle_adisc_state(vport, ndlp, new_state);
4789 return;
4790 }
4791
4792 if (new_state == NLP_STE_UNMAPPED_NODE) {
4793 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4794 ndlp->nlp_type |= NLP_FC_NODE;
4795 }
4796 if (new_state == NLP_STE_MAPPED_NODE)
4797 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4798 if (new_state == NLP_STE_NPR_NODE)
4799 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4800
4801
4802 if ((old_state == NLP_STE_MAPPED_NODE ||
4803 old_state == NLP_STE_UNMAPPED_NODE)) {
4804
4805
4806
4807 if (!(ndlp->nlp_flag & NLP_NPR_ADISC) ||
4808 !lpfc_is_link_up(vport->phba))
4809 lpfc_nlp_unreg_node(vport, ndlp);
4810 }
4811
4812 if (new_state == NLP_STE_MAPPED_NODE ||
4813 new_state == NLP_STE_UNMAPPED_NODE)
4814 lpfc_nlp_reg_node(vport, ndlp);
4815
4816 if ((new_state == NLP_STE_MAPPED_NODE) &&
4817 (vport->stat_data_enabled)) {
4818
4819
4820
4821
4822 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
4823 sizeof(struct lpfc_scsicmd_bkt),
4824 GFP_KERNEL);
4825
4826 if (!ndlp->lat_data)
4827 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4828 "0286 lpfc_nlp_state_cleanup failed to "
4829 "allocate statistical data buffer DID "
4830 "0x%x\n", ndlp->nlp_DID);
4831 }
4832
4833
4834
4835
4836
4837
4838 if ((new_state == NLP_STE_MAPPED_NODE) &&
4839 (ndlp->nlp_type & NLP_FCP_TARGET) &&
4840 (!ndlp->rport ||
4841 ndlp->rport->scsi_target_id == -1 ||
4842 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4843 spin_lock_irq(&ndlp->lock);
4844 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
4845 spin_unlock_irq(&ndlp->lock);
4846 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4847 }
4848}
4849
4850static char *
4851lpfc_nlp_state_name(char *buffer, size_t size, int state)
4852{
4853 static char *states[] = {
4854 [NLP_STE_UNUSED_NODE] = "UNUSED",
4855 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
4856 [NLP_STE_ADISC_ISSUE] = "ADISC",
4857 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
4858 [NLP_STE_PRLI_ISSUE] = "PRLI",
4859 [NLP_STE_LOGO_ISSUE] = "LOGO",
4860 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
4861 [NLP_STE_MAPPED_NODE] = "MAPPED",
4862 [NLP_STE_NPR_NODE] = "NPR",
4863 };
4864
4865 if (state < NLP_STE_MAX_STATE && states[state])
4866 strlcpy(buffer, states[state], size);
4867 else
4868 snprintf(buffer, size, "unknown (%d)", state);
4869 return buffer;
4870}
4871
4872void
4873lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4874 int state)
4875{
4876 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4877 int old_state = ndlp->nlp_state;
4878 int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
4879 char name1[16], name2[16];
4880
4881 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4882 "0904 NPort state transition x%06x, %s -> %s\n",
4883 ndlp->nlp_DID,
4884 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4885 lpfc_nlp_state_name(name2, sizeof(name2), state));
4886
4887 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4888 "node statechg did:x%x old:%d ste:%d",
4889 ndlp->nlp_DID, old_state, state);
4890
4891 if (node_dropped && old_state == NLP_STE_UNUSED_NODE &&
4892 state != NLP_STE_UNUSED_NODE) {
4893 ndlp->nlp_flag &= ~NLP_DROPPED;
4894 lpfc_nlp_get(ndlp);
4895 }
4896
4897 if (old_state == NLP_STE_NPR_NODE &&
4898 state != NLP_STE_NPR_NODE)
4899 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4900 if (old_state == NLP_STE_UNMAPPED_NODE) {
4901 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4902 ndlp->nlp_type &= ~NLP_FC_NODE;
4903 }
4904
4905 if (list_empty(&ndlp->nlp_listp)) {
4906 spin_lock_irq(shost->host_lock);
4907 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4908 spin_unlock_irq(shost->host_lock);
4909 } else if (old_state)
4910 lpfc_nlp_counters(vport, old_state, -1);
4911
4912 ndlp->nlp_state = state;
4913 lpfc_nlp_counters(vport, state, 1);
4914 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4915}
4916
4917void
4918lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4919{
4920 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4921
4922 if (list_empty(&ndlp->nlp_listp)) {
4923 spin_lock_irq(shost->host_lock);
4924 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4925 spin_unlock_irq(shost->host_lock);
4926 }
4927}
4928
4929void
4930lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4931{
4932 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4933
4934 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4935 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4936 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4937 spin_lock_irq(shost->host_lock);
4938 list_del_init(&ndlp->nlp_listp);
4939 spin_unlock_irq(shost->host_lock);
4940 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4941 NLP_STE_UNUSED_NODE);
4942}
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958static inline void
4959lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4960 uint32_t did)
4961{
4962 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4963 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4964 timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
4965 INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
4966
4967 ndlp->nlp_DID = did;
4968 ndlp->vport = vport;
4969 ndlp->phba = vport->phba;
4970 ndlp->nlp_sid = NLP_NO_SID;
4971 ndlp->nlp_fc4_type = NLP_FC4_NONE;
4972 kref_init(&ndlp->kref);
4973 atomic_set(&ndlp->cmd_pending, 0);
4974 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4975 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4976}
4977
4978void
4979lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4980{
4981
4982
4983
4984
4985
4986
4987
4988 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4989 return;
4990 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4991 ndlp->nlp_flag |= NLP_DROPPED;
4992 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4993 lpfc_cleanup_vports_rrqs(vport, ndlp);
4994 lpfc_unreg_rpi(vport, ndlp);
4995 }
4996
4997 lpfc_nlp_put(ndlp);
4998 return;
4999}
5000
5001
5002
5003
5004void
5005lpfc_set_disctmo(struct lpfc_vport *vport)
5006{
5007 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5008 struct lpfc_hba *phba = vport->phba;
5009 uint32_t tmo;
5010
5011 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
5012
5013 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
5014 } else {
5015
5016
5017
5018 tmo = ((phba->fc_ratov * 3) + 3);
5019 }
5020
5021
5022 if (!timer_pending(&vport->fc_disctmo)) {
5023 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5024 "set disc timer: tmo:x%x state:x%x flg:x%x",
5025 tmo, vport->port_state, vport->fc_flag);
5026 }
5027
5028 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
5029 spin_lock_irq(shost->host_lock);
5030 vport->fc_flag |= FC_DISC_TMO;
5031 spin_unlock_irq(shost->host_lock);
5032
5033
5034 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5035 "0247 Start Discovery Timer state x%x "
5036 "Data: x%x x%lx x%x x%x\n",
5037 vport->port_state, tmo,
5038 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
5039 vport->fc_adisc_cnt);
5040
5041 return;
5042}
5043
5044
5045
5046
5047int
5048lpfc_can_disctmo(struct lpfc_vport *vport)
5049{
5050 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5051 unsigned long iflags;
5052
5053 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5054 "can disc timer: state:x%x rtry:x%x flg:x%x",
5055 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5056
5057
5058 if (vport->fc_flag & FC_DISC_TMO) {
5059 spin_lock_irqsave(shost->host_lock, iflags);
5060 vport->fc_flag &= ~FC_DISC_TMO;
5061 spin_unlock_irqrestore(shost->host_lock, iflags);
5062 del_timer_sync(&vport->fc_disctmo);
5063 spin_lock_irqsave(&vport->work_port_lock, iflags);
5064 vport->work_port_events &= ~WORKER_DISC_TMO;
5065 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
5066 }
5067
5068
5069 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5070 "0248 Cancel Discovery Timer state x%x "
5071 "Data: x%x x%x x%x\n",
5072 vport->port_state, vport->fc_flag,
5073 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
5074 return 0;
5075}
5076
5077
5078
5079
5080
5081int
5082lpfc_check_sli_ndlp(struct lpfc_hba *phba,
5083 struct lpfc_sli_ring *pring,
5084 struct lpfc_iocbq *iocb,
5085 struct lpfc_nodelist *ndlp)
5086{
5087 IOCB_t *icmd = &iocb->iocb;
5088 struct lpfc_vport *vport = ndlp->vport;
5089
5090 if (iocb->vport != vport)
5091 return 0;
5092
5093 if (pring->ringno == LPFC_ELS_RING) {
5094 switch (icmd->ulpCommand) {
5095 case CMD_GEN_REQUEST64_CR:
5096 if (iocb->context_un.ndlp == ndlp)
5097 return 1;
5098 fallthrough;
5099 case CMD_ELS_REQUEST64_CR:
5100 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
5101 return 1;
5102 fallthrough;
5103 case CMD_XMIT_ELS_RSP64_CX:
5104 if (iocb->context1 == (uint8_t *) ndlp)
5105 return 1;
5106 }
5107 } else if (pring->ringno == LPFC_FCP_RING) {
5108
5109 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
5110 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
5111 return 0;
5112 }
5113 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
5114 return 1;
5115 }
5116 }
5117 return 0;
5118}
5119
5120static void
5121__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
5122 struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
5123 struct list_head *dequeue_list)
5124{
5125 struct lpfc_iocbq *iocb, *next_iocb;
5126
5127 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5128
5129 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
5130
5131 list_move_tail(&iocb->list, dequeue_list);
5132 }
5133}
5134
5135static void
5136lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
5137 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
5138{
5139 struct lpfc_sli *psli = &phba->sli;
5140 uint32_t i;
5141
5142 spin_lock_irq(&phba->hbalock);
5143 for (i = 0; i < psli->num_rings; i++)
5144 __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
5145 dequeue_list);
5146 spin_unlock_irq(&phba->hbalock);
5147}
5148
5149static void
5150lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
5151 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
5152{
5153 struct lpfc_sli_ring *pring;
5154 struct lpfc_queue *qp = NULL;
5155
5156 spin_lock_irq(&phba->hbalock);
5157 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
5158 pring = qp->pring;
5159 if (!pring)
5160 continue;
5161 spin_lock(&pring->ring_lock);
5162 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
5163 spin_unlock(&pring->ring_lock);
5164 }
5165 spin_unlock_irq(&phba->hbalock);
5166}
5167
5168
5169
5170
5171
5172static int
5173lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5174{
5175 LIST_HEAD(completions);
5176
5177 lpfc_fabric_abort_nport(ndlp);
5178
5179
5180
5181
5182
5183 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5184 if (phba->sli_rev != LPFC_SLI_REV4)
5185 lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
5186 else
5187 lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
5188 }
5189
5190
5191 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5192 IOERR_SLI_ABORTED);
5193
5194 return 0;
5195}
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205static void
5206lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5207{
5208 struct lpfc_vport *vport = pmb->vport;
5209 struct lpfc_nodelist *ndlp;
5210
5211 ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
5212 if (!ndlp)
5213 return;
5214 lpfc_issue_els_logo(vport, ndlp, 0);
5215 mempool_free(pmb, phba->mbox_mem_pool);
5216
5217
5218 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
5219 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
5220 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5221 "1434 UNREG cmpl deferred logo x%x "
5222 "on NPort x%x Data: x%x x%px\n",
5223 ndlp->nlp_rpi, ndlp->nlp_DID,
5224 ndlp->nlp_defer_did, ndlp);
5225
5226 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5227 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
5228 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5229 } else {
5230
5231 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
5232 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
5233 spin_lock_irq(&ndlp->lock);
5234 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
5235 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
5236 spin_unlock_irq(&ndlp->lock);
5237 }
5238 spin_lock_irq(&ndlp->lock);
5239 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5240 spin_unlock_irq(&ndlp->lock);
5241 }
5242}
5243
5244
5245
5246
5247
5248
5249static void
5250lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
5251 struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
5252{
5253 unsigned long iflags;
5254
5255
5256
5257
5258 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
5259 if (!mbox->ctx_ndlp)
5260 return;
5261
5262 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
5263 mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
5264
5265 } else if (phba->sli_rev == LPFC_SLI_REV4 &&
5266 (!(vport->load_flag & FC_UNLOADING)) &&
5267 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
5268 LPFC_SLI_INTF_IF_TYPE_2) &&
5269 (kref_read(&ndlp->kref) > 0)) {
5270 mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
5271 } else {
5272 if (vport->load_flag & FC_UNLOADING) {
5273 if (phba->sli_rev == LPFC_SLI_REV4) {
5274 spin_lock_irqsave(&ndlp->lock, iflags);
5275 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5276 spin_unlock_irqrestore(&ndlp->lock, iflags);
5277 }
5278 }
5279 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5280 }
5281}
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292int
5293lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5294{
5295 struct lpfc_hba *phba = vport->phba;
5296 LPFC_MBOXQ_t *mbox;
5297 int rc, acc_plogi = 1;
5298 uint16_t rpi;
5299
5300 if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
5301 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
5302 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
5303 lpfc_printf_vlog(vport, KERN_INFO,
5304 LOG_NODE | LOG_DISCOVERY,
5305 "3366 RPI x%x needs to be "
5306 "unregistered nlp_flag x%x "
5307 "did x%x\n",
5308 ndlp->nlp_rpi, ndlp->nlp_flag,
5309 ndlp->nlp_DID);
5310
5311
5312
5313
5314 if (ndlp->nlp_flag & NLP_UNREG_INP) {
5315 lpfc_printf_vlog(vport, KERN_INFO,
5316 LOG_NODE | LOG_DISCOVERY,
5317 "1436 unreg_rpi SKIP UNREG x%x on "
5318 "NPort x%x deferred x%x flg x%x "
5319 "Data: x%px\n",
5320 ndlp->nlp_rpi, ndlp->nlp_DID,
5321 ndlp->nlp_defer_did,
5322 ndlp->nlp_flag, ndlp);
5323 goto out;
5324 }
5325
5326 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5327 if (mbox) {
5328
5329 rpi = ndlp->nlp_rpi;
5330 if (phba->sli_rev == LPFC_SLI_REV4)
5331 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
5332
5333 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
5334 mbox->vport = vport;
5335 lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
5336 if (!mbox->ctx_ndlp) {
5337 mempool_free(mbox, phba->mbox_mem_pool);
5338 return 1;
5339 }
5340
5341 if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
5342
5343
5344
5345 acc_plogi = 0;
5346 if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
5347 Fabric_DID_MASK) &&
5348 (!(vport->fc_flag & FC_OFFLINE_MODE)))
5349 ndlp->nlp_flag |= NLP_UNREG_INP;
5350
5351 lpfc_printf_vlog(vport, KERN_INFO,
5352 LOG_NODE | LOG_DISCOVERY,
5353 "1433 unreg_rpi UNREG x%x on "
5354 "NPort x%x deferred flg x%x "
5355 "Data:x%px\n",
5356 ndlp->nlp_rpi, ndlp->nlp_DID,
5357 ndlp->nlp_flag, ndlp);
5358
5359 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5360 if (rc == MBX_NOT_FINISHED) {
5361 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5362 mempool_free(mbox, phba->mbox_mem_pool);
5363 acc_plogi = 1;
5364 }
5365 } else {
5366 lpfc_printf_vlog(vport, KERN_INFO,
5367 LOG_NODE | LOG_DISCOVERY,
5368 "1444 Failed to allocate mempool "
5369 "unreg_rpi UNREG x%x, "
5370 "DID x%x, flag x%x, "
5371 "ndlp x%px\n",
5372 ndlp->nlp_rpi, ndlp->nlp_DID,
5373 ndlp->nlp_flag, ndlp);
5374
5375
5376
5377
5378
5379 if (!(vport->load_flag & FC_UNLOADING)) {
5380 ndlp->nlp_flag &= ~NLP_UNREG_INP;
5381 lpfc_issue_els_logo(vport, ndlp, 0);
5382 ndlp->nlp_prev_state = ndlp->nlp_state;
5383 lpfc_nlp_set_state(vport, ndlp,
5384 NLP_STE_NPR_NODE);
5385 }
5386
5387 return 1;
5388 }
5389 lpfc_no_rpi(phba, ndlp);
5390out:
5391 if (phba->sli_rev != LPFC_SLI_REV4)
5392 ndlp->nlp_rpi = 0;
5393 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
5394 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
5395 if (acc_plogi)
5396 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5397 return 1;
5398 }
5399 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5400 return 0;
5401}
5402
5403
5404
5405
5406
5407
5408
5409
5410void
5411lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
5412{
5413 struct lpfc_vport **vports;
5414 struct lpfc_nodelist *ndlp;
5415 struct Scsi_Host *shost;
5416 int i;
5417
5418 vports = lpfc_create_vport_work_array(phba);
5419 if (!vports) {
5420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5421 "2884 Vport array allocation failed \n");
5422 return;
5423 }
5424 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5425 shost = lpfc_shost_from_vport(vports[i]);
5426 spin_lock_irq(shost->host_lock);
5427 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5428 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5429
5430 spin_unlock_irq(shost->host_lock);
5431 lpfc_unreg_rpi(vports[i], ndlp);
5432 spin_lock_irq(shost->host_lock);
5433 }
5434 }
5435 spin_unlock_irq(shost->host_lock);
5436 }
5437 lpfc_destroy_vport_work_array(phba, vports);
5438}
5439
5440void
5441lpfc_unreg_all_rpis(struct lpfc_vport *vport)
5442{
5443 struct lpfc_hba *phba = vport->phba;
5444 LPFC_MBOXQ_t *mbox;
5445 int rc;
5446
5447 if (phba->sli_rev == LPFC_SLI_REV4) {
5448 lpfc_sli4_unreg_all_rpis(vport);
5449 return;
5450 }
5451
5452 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5453 if (mbox) {
5454 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
5455 mbox);
5456 mbox->vport = vport;
5457 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5458 mbox->ctx_ndlp = NULL;
5459 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5460 if (rc != MBX_TIMEOUT)
5461 mempool_free(mbox, phba->mbox_mem_pool);
5462
5463 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5464 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5465 "1836 Could not issue "
5466 "unreg_login(all_rpis) status %d\n",
5467 rc);
5468 }
5469}
5470
5471void
5472lpfc_unreg_default_rpis(struct lpfc_vport *vport)
5473{
5474 struct lpfc_hba *phba = vport->phba;
5475 LPFC_MBOXQ_t *mbox;
5476 int rc;
5477
5478
5479 if (phba->sli_rev > LPFC_SLI_REV3)
5480 return;
5481
5482 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5483 if (mbox) {
5484 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
5485 mbox);
5486 mbox->vport = vport;
5487 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5488 mbox->ctx_ndlp = NULL;
5489 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5490 if (rc != MBX_TIMEOUT)
5491 mempool_free(mbox, phba->mbox_mem_pool);
5492
5493 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5494 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5495 "1815 Could not issue "
5496 "unreg_did (default rpis) status %d\n",
5497 rc);
5498 }
5499}
5500
5501
5502
5503
5504
5505static int
5506lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5507{
5508 struct lpfc_hba *phba = vport->phba;
5509 LPFC_MBOXQ_t *mb, *nextmb;
5510 struct lpfc_dmabuf *mp;
5511
5512
5513 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5514 "0900 Cleanup node for NPort x%x "
5515 "Data: x%x x%x x%x\n",
5516 ndlp->nlp_DID, ndlp->nlp_flag,
5517 ndlp->nlp_state, ndlp->nlp_rpi);
5518 lpfc_dequeue_node(vport, ndlp);
5519
5520
5521
5522
5523 if ((mb = phba->sli.mbox_active)) {
5524 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5525 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5526 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5527 mb->ctx_ndlp = NULL;
5528 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5529 }
5530 }
5531
5532 spin_lock_irq(&phba->hbalock);
5533
5534 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
5535 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
5536 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
5537 (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
5538 continue;
5539
5540 mb->ctx_ndlp = NULL;
5541 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5542 }
5543
5544 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
5545 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5546 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5547 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5548 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
5549 if (mp) {
5550 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
5551 kfree(mp);
5552 }
5553 list_del(&mb->list);
5554 mempool_free(mb, phba->mbox_mem_pool);
5555
5556
5557
5558
5559 }
5560 }
5561 spin_unlock_irq(&phba->hbalock);
5562
5563 lpfc_els_abort(phba, ndlp);
5564
5565 spin_lock_irq(&ndlp->lock);
5566 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
5567 spin_unlock_irq(&ndlp->lock);
5568
5569 ndlp->nlp_last_elscmd = 0;
5570 del_timer_sync(&ndlp->nlp_delayfunc);
5571
5572 list_del_init(&ndlp->els_retry_evt.evt_listp);
5573 list_del_init(&ndlp->dev_loss_evt.evt_listp);
5574 list_del_init(&ndlp->recovery_evt.evt_listp);
5575 lpfc_cleanup_vports_rrqs(vport, ndlp);
5576
5577 if (phba->sli_rev == LPFC_SLI_REV4)
5578 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5579
5580 return 0;
5581}
5582
5583static int
5584lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5585 uint32_t did)
5586{
5587 D_ID mydid, ndlpdid, matchdid;
5588
5589 if (did == Bcast_DID)
5590 return 0;
5591
5592
5593 if (ndlp->nlp_DID == did)
5594 return 1;
5595
5596
5597 mydid.un.word = vport->fc_myDID;
5598 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
5599 return 0;
5600 }
5601
5602 matchdid.un.word = did;
5603 ndlpdid.un.word = ndlp->nlp_DID;
5604 if (matchdid.un.b.id == ndlpdid.un.b.id) {
5605 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
5606 (mydid.un.b.area == matchdid.un.b.area)) {
5607
5608
5609
5610
5611
5612
5613
5614
5615 if ((ndlpdid.un.b.domain == 0) &&
5616 (ndlpdid.un.b.area == 0)) {
5617 if (ndlpdid.un.b.id &&
5618 vport->phba->fc_topology ==
5619 LPFC_TOPOLOGY_LOOP)
5620 return 1;
5621 }
5622 return 0;
5623 }
5624
5625 matchdid.un.word = ndlp->nlp_DID;
5626 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
5627 (mydid.un.b.area == ndlpdid.un.b.area)) {
5628 if ((matchdid.un.b.domain == 0) &&
5629 (matchdid.un.b.area == 0)) {
5630 if (matchdid.un.b.id)
5631 return 1;
5632 }
5633 }
5634 }
5635 return 0;
5636}
5637
5638
5639static struct lpfc_nodelist *
5640__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5641{
5642 struct lpfc_nodelist *ndlp;
5643 uint32_t data1;
5644
5645 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5646 if (lpfc_matchdid(vport, ndlp, did)) {
5647 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5648 ((uint32_t)ndlp->nlp_xri << 16) |
5649 ((uint32_t)ndlp->nlp_type << 8)
5650 );
5651 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5652 "0929 FIND node DID "
5653 "Data: x%px x%x x%x x%x x%x x%px\n",
5654 ndlp, ndlp->nlp_DID,
5655 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
5656 ndlp->active_rrqs_xri_bitmap);
5657 return ndlp;
5658 }
5659 }
5660
5661
5662 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5663 "0932 FIND node did x%x NOT FOUND.\n", did);
5664 return NULL;
5665}
5666
5667struct lpfc_nodelist *
5668lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5669{
5670 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5671 struct lpfc_nodelist *ndlp;
5672 unsigned long iflags;
5673
5674 spin_lock_irqsave(shost->host_lock, iflags);
5675 ndlp = __lpfc_findnode_did(vport, did);
5676 spin_unlock_irqrestore(shost->host_lock, iflags);
5677 return ndlp;
5678}
5679
5680struct lpfc_nodelist *
5681lpfc_findnode_mapped(struct lpfc_vport *vport)
5682{
5683 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5684 struct lpfc_nodelist *ndlp;
5685 uint32_t data1;
5686 unsigned long iflags;
5687
5688 spin_lock_irqsave(shost->host_lock, iflags);
5689
5690 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5691 if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
5692 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
5693 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5694 ((uint32_t)ndlp->nlp_xri << 16) |
5695 ((uint32_t)ndlp->nlp_type << 8) |
5696 ((uint32_t)ndlp->nlp_rpi & 0xff));
5697 spin_unlock_irqrestore(shost->host_lock, iflags);
5698 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5699 "2025 FIND node DID "
5700 "Data: x%px x%x x%x x%x x%px\n",
5701 ndlp, ndlp->nlp_DID,
5702 ndlp->nlp_flag, data1,
5703 ndlp->active_rrqs_xri_bitmap);
5704 return ndlp;
5705 }
5706 }
5707 spin_unlock_irqrestore(shost->host_lock, iflags);
5708
5709
5710 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5711 "2026 FIND mapped did NOT FOUND.\n");
5712 return NULL;
5713}
5714
5715struct lpfc_nodelist *
5716lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
5717{
5718 struct lpfc_nodelist *ndlp;
5719
5720 ndlp = lpfc_findnode_did(vport, did);
5721 if (!ndlp) {
5722 if (vport->phba->nvmet_support)
5723 return NULL;
5724 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
5725 lpfc_rscn_payload_check(vport, did) == 0)
5726 return NULL;
5727 ndlp = lpfc_nlp_init(vport, did);
5728 if (!ndlp)
5729 return NULL;
5730 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5731
5732 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5733 "6453 Setup New Node 2B_DISC x%x "
5734 "Data:x%x x%x x%x\n",
5735 ndlp->nlp_DID, ndlp->nlp_flag,
5736 ndlp->nlp_state, vport->fc_flag);
5737
5738 spin_lock_irq(&ndlp->lock);
5739 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5740 spin_unlock_irq(&ndlp->lock);
5741 return ndlp;
5742 }
5743
5744
5745
5746
5747
5748 if ((vport->fc_flag & FC_RSCN_MODE) &&
5749 !(vport->fc_flag & FC_NDISC_ACTIVE)) {
5750 if (lpfc_rscn_payload_check(vport, did)) {
5751
5752
5753
5754
5755 lpfc_cancel_retry_delay_tmo(vport, ndlp);
5756
5757 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5758 "6455 Setup RSCN Node 2B_DISC x%x "
5759 "Data:x%x x%x x%x\n",
5760 ndlp->nlp_DID, ndlp->nlp_flag,
5761 ndlp->nlp_state, vport->fc_flag);
5762
5763
5764
5765
5766
5767
5768 if (vport->phba->nvmet_support)
5769 return ndlp;
5770
5771
5772
5773
5774 if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
5775 !(ndlp->nlp_type &
5776 (NLP_FCP_TARGET | NLP_NVME_TARGET)))
5777 return NULL;
5778
5779 ndlp->nlp_prev_state = ndlp->nlp_state;
5780 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5781
5782 spin_lock_irq(&ndlp->lock);
5783 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5784 spin_unlock_irq(&ndlp->lock);
5785 } else {
5786 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5787 "6456 Skip Setup RSCN Node x%x "
5788 "Data:x%x x%x x%x\n",
5789 ndlp->nlp_DID, ndlp->nlp_flag,
5790 ndlp->nlp_state, vport->fc_flag);
5791 ndlp = NULL;
5792 }
5793 } else {
5794 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5795 "6457 Setup Active Node 2B_DISC x%x "
5796 "Data:x%x x%x x%x\n",
5797 ndlp->nlp_DID, ndlp->nlp_flag,
5798 ndlp->nlp_state, vport->fc_flag);
5799
5800
5801
5802
5803
5804 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
5805 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5806 (!vport->phba->nvmet_support &&
5807 ndlp->nlp_flag & NLP_RCV_PLOGI))
5808 return NULL;
5809
5810 if (vport->phba->nvmet_support)
5811 return ndlp;
5812
5813
5814
5815
5816 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5817
5818 spin_lock_irq(&ndlp->lock);
5819 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5820 spin_unlock_irq(&ndlp->lock);
5821 }
5822 return ndlp;
5823}
5824
5825
5826void
5827lpfc_disc_list_loopmap(struct lpfc_vport *vport)
5828{
5829 struct lpfc_hba *phba = vport->phba;
5830 int j;
5831 uint32_t alpa, index;
5832
5833 if (!lpfc_is_link_up(phba))
5834 return;
5835
5836 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5837 return;
5838
5839
5840 if (phba->alpa_map[0]) {
5841 for (j = 1; j <= phba->alpa_map[0]; j++) {
5842 alpa = phba->alpa_map[j];
5843 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
5844 continue;
5845 lpfc_setup_disc_node(vport, alpa);
5846 }
5847 } else {
5848
5849 for (j = 0; j < FC_MAXLOOP; j++) {
5850
5851
5852
5853 if (vport->cfg_scan_down)
5854 index = j;
5855 else
5856 index = FC_MAXLOOP - j - 1;
5857 alpa = lpfcAlpaArray[index];
5858 if ((vport->fc_myDID & 0xff) == alpa)
5859 continue;
5860 lpfc_setup_disc_node(vport, alpa);
5861 }
5862 }
5863 return;
5864}
5865
5866
5867void
5868lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
5869{
5870 LPFC_MBOXQ_t *mbox;
5871 struct lpfc_sli *psli = &phba->sli;
5872 struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
5873 struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING];
5874 int rc;
5875
5876
5877
5878
5879
5880 if ((phba->link_state >= LPFC_CLEAR_LA) ||
5881 (vport->port_type != LPFC_PHYSICAL_PORT) ||
5882 (phba->sli_rev == LPFC_SLI_REV4))
5883 return;
5884
5885
5886 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
5887 phba->link_state = LPFC_CLEAR_LA;
5888 lpfc_clear_la(phba, mbox);
5889 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
5890 mbox->vport = vport;
5891 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5892 if (rc == MBX_NOT_FINISHED) {
5893 mempool_free(mbox, phba->mbox_mem_pool);
5894 lpfc_disc_flush_list(vport);
5895 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5896 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5897 phba->link_state = LPFC_HBA_ERROR;
5898 }
5899 }
5900}
5901
5902
5903void
5904lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5905{
5906 LPFC_MBOXQ_t *regvpimbox;
5907
5908 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5909 if (regvpimbox) {
5910 lpfc_reg_vpi(vport, regvpimbox);
5911 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
5912 regvpimbox->vport = vport;
5913 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
5914 == MBX_NOT_FINISHED) {
5915 mempool_free(regvpimbox, phba->mbox_mem_pool);
5916 }
5917 }
5918}
5919
5920
5921void
5922lpfc_disc_start(struct lpfc_vport *vport)
5923{
5924 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5925 struct lpfc_hba *phba = vport->phba;
5926 uint32_t num_sent;
5927 uint32_t clear_la_pending;
5928
5929 if (!lpfc_is_link_up(phba)) {
5930 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5931 "3315 Link is not up %x\n",
5932 phba->link_state);
5933 return;
5934 }
5935
5936 if (phba->link_state == LPFC_CLEAR_LA)
5937 clear_la_pending = 1;
5938 else
5939 clear_la_pending = 0;
5940
5941 if (vport->port_state < LPFC_VPORT_READY)
5942 vport->port_state = LPFC_DISC_AUTH;
5943
5944 lpfc_set_disctmo(vport);
5945
5946 vport->fc_prevDID = vport->fc_myDID;
5947 vport->num_disc_nodes = 0;
5948
5949
5950 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5951 "0202 Start Discovery port state x%x "
5952 "flg x%x Data: x%x x%x x%x\n",
5953 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
5954 vport->fc_adisc_cnt, vport->fc_npr_cnt);
5955
5956
5957 num_sent = lpfc_els_disc_adisc(vport);
5958
5959 if (num_sent)
5960 return;
5961
5962
5963 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5964 !(vport->fc_flag & FC_PT2PT) &&
5965 !(vport->fc_flag & FC_RSCN_MODE) &&
5966 (phba->sli_rev < LPFC_SLI_REV4)) {
5967 lpfc_issue_clear_la(phba, vport);
5968 lpfc_issue_reg_vpi(phba, vport);
5969 return;
5970 }
5971
5972
5973
5974
5975
5976 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
5977
5978 lpfc_issue_clear_la(phba, vport);
5979
5980 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
5981 vport->num_disc_nodes = 0;
5982
5983 if (vport->fc_npr_cnt)
5984 lpfc_els_disc_plogi(vport);
5985
5986 if (!vport->num_disc_nodes) {
5987 spin_lock_irq(shost->host_lock);
5988 vport->fc_flag &= ~FC_NDISC_ACTIVE;
5989 spin_unlock_irq(shost->host_lock);
5990 lpfc_can_disctmo(vport);
5991 }
5992 }
5993 vport->port_state = LPFC_VPORT_READY;
5994 } else {
5995
5996 num_sent = lpfc_els_disc_plogi(vport);
5997
5998 if (num_sent)
5999 return;
6000
6001 if (vport->fc_flag & FC_RSCN_MODE) {
6002
6003
6004
6005 if ((vport->fc_rscn_id_cnt == 0) &&
6006 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
6007 spin_lock_irq(shost->host_lock);
6008 vport->fc_flag &= ~FC_RSCN_MODE;
6009 spin_unlock_irq(shost->host_lock);
6010 lpfc_can_disctmo(vport);
6011 } else
6012 lpfc_els_handle_rscn(vport);
6013 }
6014 }
6015 return;
6016}
6017
6018
6019
6020
6021
6022static void
6023lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
6024{
6025 LIST_HEAD(completions);
6026 IOCB_t *icmd;
6027 struct lpfc_iocbq *iocb, *next_iocb;
6028 struct lpfc_sli_ring *pring;
6029
6030 pring = lpfc_phba_elsring(phba);
6031 if (unlikely(!pring))
6032 return;
6033
6034
6035
6036
6037 spin_lock_irq(&phba->hbalock);
6038 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
6039 if (iocb->context1 != ndlp) {
6040 continue;
6041 }
6042 icmd = &iocb->iocb;
6043 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
6044 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
6045
6046 list_move_tail(&iocb->list, &completions);
6047 }
6048 }
6049
6050
6051 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
6052 if (iocb->context1 != ndlp) {
6053 continue;
6054 }
6055 icmd = &iocb->iocb;
6056 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
6057 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
6058 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
6059 }
6060 }
6061 spin_unlock_irq(&phba->hbalock);
6062
6063
6064 lpfc_issue_hb_tmo(phba);
6065
6066
6067 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6068 IOERR_SLI_ABORTED);
6069}
6070
6071static void
6072lpfc_disc_flush_list(struct lpfc_vport *vport)
6073{
6074 struct lpfc_nodelist *ndlp, *next_ndlp;
6075 struct lpfc_hba *phba = vport->phba;
6076
6077 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
6078 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
6079 nlp_listp) {
6080 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
6081 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
6082 lpfc_free_tx(phba, ndlp);
6083 }
6084 }
6085 }
6086}
6087
6088void
6089lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
6090{
6091 lpfc_els_flush_rscn(vport);
6092 lpfc_els_flush_cmd(vport);
6093 lpfc_disc_flush_list(vport);
6094}
6095
6096
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111void
6112lpfc_disc_timeout(struct timer_list *t)
6113{
6114 struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
6115 struct lpfc_hba *phba = vport->phba;
6116 uint32_t tmo_posted;
6117 unsigned long flags = 0;
6118
6119 if (unlikely(!phba))
6120 return;
6121
6122 spin_lock_irqsave(&vport->work_port_lock, flags);
6123 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
6124 if (!tmo_posted)
6125 vport->work_port_events |= WORKER_DISC_TMO;
6126 spin_unlock_irqrestore(&vport->work_port_lock, flags);
6127
6128 if (!tmo_posted)
6129 lpfc_worker_wake_up(phba);
6130 return;
6131}
6132
6133static void
6134lpfc_disc_timeout_handler(struct lpfc_vport *vport)
6135{
6136 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6137 struct lpfc_hba *phba = vport->phba;
6138 struct lpfc_sli *psli = &phba->sli;
6139 struct lpfc_nodelist *ndlp, *next_ndlp;
6140 LPFC_MBOXQ_t *initlinkmbox;
6141 int rc, clrlaerr = 0;
6142
6143 if (!(vport->fc_flag & FC_DISC_TMO))
6144 return;
6145
6146 spin_lock_irq(shost->host_lock);
6147 vport->fc_flag &= ~FC_DISC_TMO;
6148 spin_unlock_irq(shost->host_lock);
6149
6150 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6151 "disc timeout: state:x%x rtry:x%x flg:x%x",
6152 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
6153
6154 switch (vport->port_state) {
6155
6156 case LPFC_LOCAL_CFG_LINK:
6157
6158
6159
6160
6161 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
6162 "0221 FAN timeout\n");
6163
6164
6165 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
6166 nlp_listp) {
6167 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
6168 continue;
6169 if (ndlp->nlp_type & NLP_FABRIC) {
6170
6171 lpfc_drop_node(vport, ndlp);
6172
6173 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
6174
6175
6176
6177 lpfc_unreg_rpi(vport, ndlp);
6178 }
6179 }
6180 if (vport->port_state != LPFC_FLOGI) {
6181 if (phba->sli_rev <= LPFC_SLI_REV3)
6182 lpfc_initial_flogi(vport);
6183 else
6184 lpfc_issue_init_vfi(vport);
6185 return;
6186 }
6187 break;
6188
6189 case LPFC_FDISC:
6190 case LPFC_FLOGI:
6191
6192
6193 lpfc_printf_vlog(vport, KERN_ERR,
6194 LOG_TRACE_EVENT,
6195 "0222 Initial %s timeout\n",
6196 vport->vpi ? "FDISC" : "FLOGI");
6197
6198
6199
6200
6201
6202
6203 lpfc_disc_list_loopmap(vport);
6204
6205
6206 lpfc_disc_start(vport);
6207 break;
6208
6209 case LPFC_FABRIC_CFG_LINK:
6210
6211
6212 lpfc_printf_vlog(vport, KERN_ERR,
6213 LOG_TRACE_EVENT,
6214 "0223 Timeout while waiting for "
6215 "NameServer login\n");
6216
6217 ndlp = lpfc_findnode_did(vport, NameServer_DID);
6218 if (ndlp)
6219 lpfc_els_abort(phba, ndlp);
6220
6221
6222 goto restart_disc;
6223
6224 case LPFC_NS_QRY:
6225
6226 lpfc_printf_vlog(vport, KERN_ERR,
6227 LOG_TRACE_EVENT,
6228 "0224 NameServer Query timeout "
6229 "Data: x%x x%x\n",
6230 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
6231
6232 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
6233
6234 vport->fc_ns_retry++;
6235 vport->gidft_inp = 0;
6236 rc = lpfc_issue_gidft(vport);
6237 if (rc == 0)
6238 break;
6239 }
6240 vport->fc_ns_retry = 0;
6241
6242restart_disc:
6243
6244
6245
6246
6247
6248 if (phba->sli_rev < LPFC_SLI_REV4) {
6249 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
6250 lpfc_issue_reg_vpi(phba, vport);
6251 else {
6252 lpfc_issue_clear_la(phba, vport);
6253 vport->port_state = LPFC_VPORT_READY;
6254 }
6255 }
6256
6257
6258 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6259 if (!initlinkmbox) {
6260 lpfc_printf_vlog(vport, KERN_ERR,
6261 LOG_TRACE_EVENT,
6262 "0206 Device Discovery "
6263 "completion error\n");
6264 phba->link_state = LPFC_HBA_ERROR;
6265 break;
6266 }
6267
6268 lpfc_linkdown(phba);
6269 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
6270 phba->cfg_link_speed);
6271 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
6272 initlinkmbox->vport = vport;
6273 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6274 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
6275 lpfc_set_loopback_flag(phba);
6276 if (rc == MBX_NOT_FINISHED)
6277 mempool_free(initlinkmbox, phba->mbox_mem_pool);
6278
6279 break;
6280
6281 case LPFC_DISC_AUTH:
6282
6283 lpfc_printf_vlog(vport, KERN_ERR,
6284 LOG_TRACE_EVENT,
6285 "0227 Node Authentication timeout\n");
6286 lpfc_disc_flush_list(vport);
6287
6288
6289
6290
6291
6292 if (phba->sli_rev < LPFC_SLI_REV4) {
6293 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
6294 lpfc_issue_reg_vpi(phba, vport);
6295 else {
6296 lpfc_issue_clear_la(phba, vport);
6297 vport->port_state = LPFC_VPORT_READY;
6298 }
6299 }
6300 break;
6301
6302 case LPFC_VPORT_READY:
6303 if (vport->fc_flag & FC_RSCN_MODE) {
6304 lpfc_printf_vlog(vport, KERN_ERR,
6305 LOG_TRACE_EVENT,
6306 "0231 RSCN timeout Data: x%x "
6307 "x%x\n",
6308 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
6309
6310
6311 lpfc_els_flush_cmd(vport);
6312
6313 lpfc_els_flush_rscn(vport);
6314 lpfc_disc_flush_list(vport);
6315 }
6316 break;
6317
6318 default:
6319 lpfc_printf_vlog(vport, KERN_ERR,
6320 LOG_TRACE_EVENT,
6321 "0273 Unexpected discovery timeout, "
6322 "vport State x%x\n", vport->port_state);
6323 break;
6324 }
6325
6326 switch (phba->link_state) {
6327 case LPFC_CLEAR_LA:
6328
6329 lpfc_printf_vlog(vport, KERN_ERR,
6330 LOG_TRACE_EVENT,
6331 "0228 CLEAR LA timeout\n");
6332 clrlaerr = 1;
6333 break;
6334
6335 case LPFC_LINK_UP:
6336 lpfc_issue_clear_la(phba, vport);
6337 fallthrough;
6338 case LPFC_LINK_UNKNOWN:
6339 case LPFC_WARM_START:
6340 case LPFC_INIT_START:
6341 case LPFC_INIT_MBX_CMDS:
6342 case LPFC_LINK_DOWN:
6343 case LPFC_HBA_ERROR:
6344 lpfc_printf_vlog(vport, KERN_ERR,
6345 LOG_TRACE_EVENT,
6346 "0230 Unexpected timeout, hba link "
6347 "state x%x\n", phba->link_state);
6348 clrlaerr = 1;
6349 break;
6350
6351 case LPFC_HBA_READY:
6352 break;
6353 }
6354
6355 if (clrlaerr) {
6356 lpfc_disc_flush_list(vport);
6357 if (phba->sli_rev != LPFC_SLI_REV4) {
6358 psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
6359 ~LPFC_STOP_IOCB_EVENT;
6360 psli->sli3_ring[LPFC_FCP_RING].flag &=
6361 ~LPFC_STOP_IOCB_EVENT;
6362 }
6363 vport->port_state = LPFC_VPORT_READY;
6364 }
6365 return;
6366}
6367
6368
6369
6370
6371
6372
6373
6374void
6375lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6376{
6377 MAILBOX_t *mb = &pmb->u.mb;
6378 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
6379 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
6380 struct lpfc_vport *vport = pmb->vport;
6381
6382 pmb->ctx_buf = NULL;
6383 pmb->ctx_ndlp = NULL;
6384
6385 if (phba->sli_rev < LPFC_SLI_REV4)
6386 ndlp->nlp_rpi = mb->un.varWords[0];
6387 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
6388 ndlp->nlp_type |= NLP_FABRIC;
6389 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
6390 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6391 "0004 rpi:%x DID:%x flg:%x %d x%px\n",
6392 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
6393 kref_read(&ndlp->kref),
6394 ndlp);
6395
6396
6397
6398
6399
6400
6401 if (vport->port_type == LPFC_PHYSICAL_PORT) {
6402 phba->link_flag &= ~LS_CT_VEN_RPA;
6403 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
6404 } else {
6405 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
6406 }
6407
6408
6409
6410
6411
6412 lpfc_nlp_put(ndlp);
6413 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6414 kfree(mp);
6415 mempool_free(pmb, phba->mbox_mem_pool);
6416
6417 return;
6418}
6419
6420static int
6421lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
6422{
6423 uint16_t *rpi = param;
6424
6425 return ndlp->nlp_rpi == *rpi;
6426}
6427
6428static int
6429lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
6430{
6431 return memcmp(&ndlp->nlp_portname, param,
6432 sizeof(ndlp->nlp_portname)) == 0;
6433}
6434
6435static struct lpfc_nodelist *
6436__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
6437{
6438 struct lpfc_nodelist *ndlp;
6439
6440 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6441 if (filter(ndlp, param)) {
6442 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6443 "3185 FIND node filter %ps DID "
6444 "ndlp x%px did x%x flg x%x st x%x "
6445 "xri x%x type x%x rpi x%x\n",
6446 filter, ndlp, ndlp->nlp_DID,
6447 ndlp->nlp_flag, ndlp->nlp_state,
6448 ndlp->nlp_xri, ndlp->nlp_type,
6449 ndlp->nlp_rpi);
6450 return ndlp;
6451 }
6452 }
6453 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6454 "3186 FIND node filter %ps NOT FOUND.\n", filter);
6455 return NULL;
6456}
6457
6458
6459
6460
6461
6462struct lpfc_nodelist *
6463__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6464{
6465 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
6466}
6467
6468
6469
6470
6471
6472struct lpfc_nodelist *
6473lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
6474{
6475 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6476 struct lpfc_nodelist *ndlp;
6477
6478 spin_lock_irq(shost->host_lock);
6479 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
6480 spin_unlock_irq(shost->host_lock);
6481 return ndlp;
6482}
6483
6484
6485
6486
6487
6488
6489struct lpfc_nodelist *
6490lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6491{
6492 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6493 struct lpfc_nodelist *ndlp;
6494 unsigned long flags;
6495
6496 spin_lock_irqsave(shost->host_lock, flags);
6497 ndlp = __lpfc_findnode_rpi(vport, rpi);
6498 spin_unlock_irqrestore(shost->host_lock, flags);
6499 return ndlp;
6500}
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515struct lpfc_vport *
6516lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6517{
6518 struct lpfc_vport *vport;
6519 unsigned long flags;
6520 int i = 0;
6521
6522
6523 if (vpi > 0) {
6524
6525
6526
6527
6528 for (i = 0; i <= phba->max_vpi; i++) {
6529 if (vpi == phba->vpi_ids[i])
6530 break;
6531 }
6532
6533 if (i > phba->max_vpi) {
6534 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6535 "2936 Could not find Vport mapped "
6536 "to vpi %d\n", vpi);
6537 return NULL;
6538 }
6539 }
6540
6541 spin_lock_irqsave(&phba->port_list_lock, flags);
6542 list_for_each_entry(vport, &phba->port_list, listentry) {
6543 if (vport->vpi == i) {
6544 spin_unlock_irqrestore(&phba->port_list_lock, flags);
6545 return vport;
6546 }
6547 }
6548 spin_unlock_irqrestore(&phba->port_list_lock, flags);
6549 return NULL;
6550}
6551
6552struct lpfc_nodelist *
6553lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
6554{
6555 struct lpfc_nodelist *ndlp;
6556 int rpi = LPFC_RPI_ALLOC_ERROR;
6557
6558 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6559 rpi = lpfc_sli4_alloc_rpi(vport->phba);
6560 if (rpi == LPFC_RPI_ALLOC_ERROR)
6561 return NULL;
6562 }
6563
6564 ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
6565 if (!ndlp) {
6566 if (vport->phba->sli_rev == LPFC_SLI_REV4)
6567 lpfc_sli4_free_rpi(vport->phba, rpi);
6568 return NULL;
6569 }
6570
6571 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
6572
6573 spin_lock_init(&ndlp->lock);
6574
6575 lpfc_initialize_node(vport, ndlp, did);
6576 INIT_LIST_HEAD(&ndlp->nlp_listp);
6577 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6578 ndlp->nlp_rpi = rpi;
6579 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6580 "0007 Init New ndlp x%px, rpi:x%x DID:%x "
6581 "flg:x%x refcnt:%d\n",
6582 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
6583 ndlp->nlp_flag, kref_read(&ndlp->kref));
6584
6585 ndlp->active_rrqs_xri_bitmap =
6586 mempool_alloc(vport->phba->active_rrq_pool,
6587 GFP_KERNEL);
6588 if (ndlp->active_rrqs_xri_bitmap)
6589 memset(ndlp->active_rrqs_xri_bitmap, 0,
6590 ndlp->phba->cfg_rrq_xri_bitmap_sz);
6591 }
6592
6593
6594
6595 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
6596 "node init: did:x%x",
6597 ndlp->nlp_DID, 0, 0);
6598
6599 return ndlp;
6600}
6601
6602
6603
6604
6605static void
6606lpfc_nlp_release(struct kref *kref)
6607{
6608 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6609 kref);
6610 struct lpfc_vport *vport = ndlp->vport;
6611
6612 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6613 "node release: did:x%x flg:x%x type:x%x",
6614 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6615
6616 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6617 "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n",
6618 __func__, ndlp, ndlp->nlp_DID,
6619 kref_read(&ndlp->kref), ndlp->nlp_rpi);
6620
6621
6622 lpfc_cancel_retry_delay_tmo(vport, ndlp);
6623 lpfc_cleanup_node(vport, ndlp);
6624
6625
6626
6627
6628
6629
6630
6631
6632 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
6633 if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR &&
6634 !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) {
6635 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
6636 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
6637 }
6638 }
6639
6640
6641
6642
6643 ndlp->vport = NULL;
6644 ndlp->nlp_state = NLP_STE_FREED_NODE;
6645 ndlp->nlp_flag = 0;
6646 ndlp->fc4_xpt_flags = 0;
6647
6648
6649 kfree(ndlp->lat_data);
6650 if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
6651 mempool_free(ndlp->active_rrqs_xri_bitmap,
6652 ndlp->phba->active_rrq_pool);
6653 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
6654}
6655
6656
6657
6658
6659
6660struct lpfc_nodelist *
6661lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6662{
6663 unsigned long flags;
6664
6665 if (ndlp) {
6666 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6667 "node get: did:x%x flg:x%x refcnt:x%x",
6668 ndlp->nlp_DID, ndlp->nlp_flag,
6669 kref_read(&ndlp->kref));
6670
6671
6672
6673
6674
6675 spin_lock_irqsave(&ndlp->lock, flags);
6676 if (!kref_get_unless_zero(&ndlp->kref)) {
6677 spin_unlock_irqrestore(&ndlp->lock, flags);
6678 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6679 "0276 %s: ndlp:x%px refcnt:%d\n",
6680 __func__, (void *)ndlp, kref_read(&ndlp->kref));
6681 return NULL;
6682 }
6683 spin_unlock_irqrestore(&ndlp->lock, flags);
6684 } else {
6685 WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__);
6686 }
6687
6688 return ndlp;
6689}
6690
6691
6692
6693
6694int
6695lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6696{
6697 if (ndlp) {
6698 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6699 "node put: did:x%x flg:x%x refcnt:x%x",
6700 ndlp->nlp_DID, ndlp->nlp_flag,
6701 kref_read(&ndlp->kref));
6702 } else {
6703 WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__);
6704 }
6705
6706 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
6707}
6708
6709
6710
6711
6712
6713
6714int
6715lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
6716{
6717 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6718 "node not used: did:x%x flg:x%x refcnt:x%x",
6719 ndlp->nlp_DID, ndlp->nlp_flag,
6720 kref_read(&ndlp->kref));
6721
6722 if (kref_read(&ndlp->kref) == 1)
6723 if (lpfc_nlp_put(ndlp))
6724 return 1;
6725 return 0;
6726}
6727
6728
6729
6730
6731
6732
6733
6734
6735
6736
6737
6738static int
6739lpfc_fcf_inuse(struct lpfc_hba *phba)
6740{
6741 struct lpfc_vport **vports;
6742 int i, ret = 0;
6743 struct lpfc_nodelist *ndlp;
6744 struct Scsi_Host *shost;
6745
6746 vports = lpfc_create_vport_work_array(phba);
6747
6748
6749 if (!vports)
6750 return 1;
6751
6752 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6753 shost = lpfc_shost_from_vport(vports[i]);
6754 spin_lock_irq(shost->host_lock);
6755
6756
6757
6758
6759
6760
6761 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
6762 spin_unlock_irq(shost->host_lock);
6763 ret = 1;
6764 goto out;
6765 }
6766 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6767 if (ndlp->rport &&
6768 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6769 ret = 1;
6770 spin_unlock_irq(shost->host_lock);
6771 goto out;
6772 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
6773 ret = 1;
6774 lpfc_printf_log(phba, KERN_INFO,
6775 LOG_NODE | LOG_DISCOVERY,
6776 "2624 RPI %x DID %x flag %x "
6777 "still logged in\n",
6778 ndlp->nlp_rpi, ndlp->nlp_DID,
6779 ndlp->nlp_flag);
6780 }
6781 }
6782 spin_unlock_irq(shost->host_lock);
6783 }
6784out:
6785 lpfc_destroy_vport_work_array(phba, vports);
6786 return ret;
6787}
6788
6789
6790
6791
6792
6793
6794
6795
6796void
6797lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6798{
6799 struct lpfc_vport *vport = mboxq->vport;
6800 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6801
6802 if (mboxq->u.mb.mbxStatus) {
6803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6804 "2555 UNREG_VFI mbxStatus error x%x "
6805 "HBA state x%x\n",
6806 mboxq->u.mb.mbxStatus, vport->port_state);
6807 }
6808 spin_lock_irq(shost->host_lock);
6809 phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
6810 spin_unlock_irq(shost->host_lock);
6811 mempool_free(mboxq, phba->mbox_mem_pool);
6812 return;
6813}
6814
6815
6816
6817
6818
6819
6820
6821
6822static void
6823lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6824{
6825 struct lpfc_vport *vport = mboxq->vport;
6826
6827 if (mboxq->u.mb.mbxStatus) {
6828 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6829 "2550 UNREG_FCFI mbxStatus error x%x "
6830 "HBA state x%x\n",
6831 mboxq->u.mb.mbxStatus, vport->port_state);
6832 }
6833 mempool_free(mboxq, phba->mbox_mem_pool);
6834 return;
6835}
6836
6837
6838
6839
6840
6841
6842
6843
6844
6845int
6846lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
6847{
6848 struct lpfc_vport **vports;
6849 struct lpfc_nodelist *ndlp;
6850 struct Scsi_Host *shost;
6851 int i = 0, rc;
6852
6853
6854 if (lpfc_fcf_inuse(phba))
6855 lpfc_unreg_hba_rpis(phba);
6856
6857
6858 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
6859
6860
6861 vports = lpfc_create_vport_work_array(phba);
6862 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
6863 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6864
6865 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6866 if (ndlp)
6867 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6868 lpfc_cleanup_pending_mbox(vports[i]);
6869 if (phba->sli_rev == LPFC_SLI_REV4)
6870 lpfc_sli4_unreg_all_rpis(vports[i]);
6871 lpfc_mbx_unreg_vpi(vports[i]);
6872 shost = lpfc_shost_from_vport(vports[i]);
6873 spin_lock_irq(shost->host_lock);
6874 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6875 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
6876 spin_unlock_irq(shost->host_lock);
6877 }
6878 lpfc_destroy_vport_work_array(phba, vports);
6879 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
6880 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6881 if (ndlp)
6882 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
6883 lpfc_cleanup_pending_mbox(phba->pport);
6884 if (phba->sli_rev == LPFC_SLI_REV4)
6885 lpfc_sli4_unreg_all_rpis(phba->pport);
6886 lpfc_mbx_unreg_vpi(phba->pport);
6887 shost = lpfc_shost_from_vport(phba->pport);
6888 spin_lock_irq(shost->host_lock);
6889 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6890 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
6891 spin_unlock_irq(shost->host_lock);
6892 }
6893
6894
6895 lpfc_els_flush_all_cmd(phba);
6896
6897
6898 rc = lpfc_issue_unreg_vfi(phba->pport);
6899 return rc;
6900}
6901
6902
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912int
6913lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
6914{
6915 LPFC_MBOXQ_t *mbox;
6916 int rc;
6917
6918 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6919 if (!mbox) {
6920 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6921 "2551 UNREG_FCFI mbox allocation failed"
6922 "HBA state x%x\n", phba->pport->port_state);
6923 return -ENOMEM;
6924 }
6925 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
6926 mbox->vport = phba->pport;
6927 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
6928 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6929
6930 if (rc == MBX_NOT_FINISHED) {
6931 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6932 "2552 Unregister FCFI command failed rc x%x "
6933 "HBA state x%x\n",
6934 rc, phba->pport->port_state);
6935 return -EINVAL;
6936 }
6937 return 0;
6938}
6939
6940
6941
6942
6943
6944
6945
6946
6947void
6948lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
6949{
6950 int rc;
6951
6952
6953 rc = lpfc_unregister_fcf_prep(phba);
6954 if (rc) {
6955 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6956 "2748 Failed to prepare for unregistering "
6957 "HBA's FCF record: rc=%d\n", rc);
6958 return;
6959 }
6960
6961
6962 rc = lpfc_sli4_unregister_fcf(phba);
6963 if (rc)
6964 return;
6965
6966 phba->fcf.fcf_flag = 0;
6967 phba->fcf.current_rec.flag = 0;
6968
6969
6970
6971
6972
6973 if ((phba->pport->load_flag & FC_UNLOADING) ||
6974 (phba->link_state < LPFC_LINK_UP))
6975 return;
6976
6977
6978 spin_lock_irq(&phba->hbalock);
6979 phba->fcf.fcf_flag |= FCF_INIT_DISC;
6980 spin_unlock_irq(&phba->hbalock);
6981
6982
6983 lpfc_sli4_clear_fcf_rr_bmask(phba);
6984
6985 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6986
6987 if (rc) {
6988 spin_lock_irq(&phba->hbalock);
6989 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
6990 spin_unlock_irq(&phba->hbalock);
6991 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6992 "2553 lpfc_unregister_unused_fcf failed "
6993 "to read FCF record HBA state x%x\n",
6994 phba->pport->port_state);
6995 }
6996}
6997
6998
6999
7000
7001
7002
7003
7004
7005void
7006lpfc_unregister_fcf(struct lpfc_hba *phba)
7007{
7008 int rc;
7009
7010
7011 rc = lpfc_unregister_fcf_prep(phba);
7012 if (rc) {
7013 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7014 "2749 Failed to prepare for unregistering "
7015 "HBA's FCF record: rc=%d\n", rc);
7016 return;
7017 }
7018
7019
7020 rc = lpfc_sli4_unregister_fcf(phba);
7021 if (rc)
7022 return;
7023
7024 spin_lock_irq(&phba->hbalock);
7025 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
7026 spin_unlock_irq(&phba->hbalock);
7027}
7028
7029
7030
7031
7032
7033
7034
7035
7036
7037void
7038lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
7039{
7040
7041
7042
7043
7044
7045 spin_lock_irq(&phba->hbalock);
7046 if (!(phba->hba_flag & HBA_FCOE_MODE) ||
7047 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
7048 !(phba->hba_flag & HBA_FIP_SUPPORT) ||
7049 (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
7050 (phba->pport->port_state == LPFC_FLOGI)) {
7051 spin_unlock_irq(&phba->hbalock);
7052 return;
7053 }
7054 spin_unlock_irq(&phba->hbalock);
7055
7056 if (lpfc_fcf_inuse(phba))
7057 return;
7058
7059 lpfc_unregister_fcf_rescan(phba);
7060}
7061
7062
7063
7064
7065
7066
7067
7068
7069
7070static void
7071lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
7072 uint8_t *buff)
7073{
7074 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
7075 struct lpfc_fcf_conn_hdr *conn_hdr;
7076 struct lpfc_fcf_conn_rec *conn_rec;
7077 uint32_t record_count;
7078 int i;
7079
7080
7081 list_for_each_entry_safe(conn_entry, next_conn_entry,
7082 &phba->fcf_conn_rec_list, list) {
7083 list_del_init(&conn_entry->list);
7084 kfree(conn_entry);
7085 }
7086
7087 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
7088 record_count = conn_hdr->length * sizeof(uint32_t)/
7089 sizeof(struct lpfc_fcf_conn_rec);
7090
7091 conn_rec = (struct lpfc_fcf_conn_rec *)
7092 (buff + sizeof(struct lpfc_fcf_conn_hdr));
7093
7094 for (i = 0; i < record_count; i++) {
7095 if (!(conn_rec[i].flags & FCFCNCT_VALID))
7096 continue;
7097 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
7098 GFP_KERNEL);
7099 if (!conn_entry) {
7100 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7101 "2566 Failed to allocate connection"
7102 " table entry\n");
7103 return;
7104 }
7105
7106 memcpy(&conn_entry->conn_rec, &conn_rec[i],
7107 sizeof(struct lpfc_fcf_conn_rec));
7108 list_add_tail(&conn_entry->list,
7109 &phba->fcf_conn_rec_list);
7110 }
7111
7112 if (!list_empty(&phba->fcf_conn_rec_list)) {
7113 i = 0;
7114 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
7115 list) {
7116 conn_rec = &conn_entry->conn_rec;
7117 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7118 "3345 FCF connection list rec[%02d]: "
7119 "flags:x%04x, vtag:x%04x, "
7120 "fabric_name:x%02x:%02x:%02x:%02x:"
7121 "%02x:%02x:%02x:%02x, "
7122 "switch_name:x%02x:%02x:%02x:%02x:"
7123 "%02x:%02x:%02x:%02x\n", i++,
7124 conn_rec->flags, conn_rec->vlan_tag,
7125 conn_rec->fabric_name[0],
7126 conn_rec->fabric_name[1],
7127 conn_rec->fabric_name[2],
7128 conn_rec->fabric_name[3],
7129 conn_rec->fabric_name[4],
7130 conn_rec->fabric_name[5],
7131 conn_rec->fabric_name[6],
7132 conn_rec->fabric_name[7],
7133 conn_rec->switch_name[0],
7134 conn_rec->switch_name[1],
7135 conn_rec->switch_name[2],
7136 conn_rec->switch_name[3],
7137 conn_rec->switch_name[4],
7138 conn_rec->switch_name[5],
7139 conn_rec->switch_name[6],
7140 conn_rec->switch_name[7]);
7141 }
7142 }
7143}
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153static void
7154lpfc_read_fcoe_param(struct lpfc_hba *phba,
7155 uint8_t *buff)
7156{
7157 struct lpfc_fip_param_hdr *fcoe_param_hdr;
7158 struct lpfc_fcoe_params *fcoe_param;
7159
7160 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
7161 buff;
7162 fcoe_param = (struct lpfc_fcoe_params *)
7163 (buff + sizeof(struct lpfc_fip_param_hdr));
7164
7165 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
7166 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
7167 return;
7168
7169 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
7170 phba->valid_vlan = 1;
7171 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
7172 0xFFF;
7173 }
7174
7175 phba->fc_map[0] = fcoe_param->fc_map[0];
7176 phba->fc_map[1] = fcoe_param->fc_map[1];
7177 phba->fc_map[2] = fcoe_param->fc_map[2];
7178 return;
7179}
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191static uint8_t *
7192lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
7193{
7194 uint32_t offset = 0, rec_length;
7195
7196 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
7197 (size < sizeof(uint32_t)))
7198 return NULL;
7199
7200 rec_length = buff[offset + 1];
7201
7202
7203
7204
7205
7206 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
7207 <= size) {
7208 if (buff[offset] == rec_type)
7209 return &buff[offset];
7210
7211 if (buff[offset] == LPFC_REGION23_LAST_REC)
7212 return NULL;
7213
7214 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
7215 rec_length = buff[offset + 1];
7216 }
7217 return NULL;
7218}
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228
7229void
7230lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
7231 uint8_t *buff,
7232 uint32_t size)
7233{
7234 uint32_t offset = 0;
7235 uint8_t *rec_ptr;
7236
7237
7238
7239
7240
7241 if (size < 2*sizeof(uint32_t))
7242 return;
7243
7244
7245 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
7246 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7247 "2567 Config region 23 has bad signature\n");
7248 return;
7249 }
7250
7251 offset += 4;
7252
7253
7254 if (buff[offset] != LPFC_REGION23_VERSION) {
7255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7256 "2568 Config region 23 has bad version\n");
7257 return;
7258 }
7259 offset += 4;
7260
7261
7262 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7263 size - offset, FCOE_PARAM_TYPE);
7264 if (rec_ptr)
7265 lpfc_read_fcoe_param(phba, rec_ptr);
7266
7267
7268 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
7269 size - offset, FCOE_CONN_TBL_TYPE);
7270 if (rec_ptr)
7271 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
7272
7273}
7274