1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27#include <linux/pci.h>
28#include <linux/kthread.h>
29#include <linux/interrupt.h>
30#include <linux/lockdep.h>
31#include <linux/utsname.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_hw4.h"
40#include "lpfc_hw.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc_sli.h"
44#include "lpfc_sli4.h"
45#include "lpfc.h"
46#include "lpfc_scsi.h"
47#include "lpfc_nvme.h"
48#include "lpfc_logmsg.h"
49#include "lpfc_crtn.h"
50#include "lpfc_vport.h"
51#include "lpfc_debugfs.h"
52
53
54static uint8_t lpfcAlpaArray[] = {
55 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
56 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
57 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
58 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
59 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
60 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
61 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
62 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
63 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
64 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
65 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
66 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
67 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
68};
69
70static void lpfc_disc_timeout_handler(struct lpfc_vport *);
71static void lpfc_disc_flush_list(struct lpfc_vport *vport);
72static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
73static int lpfc_fcf_inuse(struct lpfc_hba *);
74static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
75
76static int
77lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
78{
79 if (ndlp->nlp_fc4_type ||
80 ndlp->nlp_DID == Fabric_DID ||
81 ndlp->nlp_DID == NameServer_DID ||
82 ndlp->nlp_DID == FDMI_DID)
83 return 1;
84 return 0;
85}
86
87
88
89
90
91
92static int
93lpfc_rport_invalid(struct fc_rport *rport)
94{
95 struct lpfc_rport_data *rdata;
96 struct lpfc_nodelist *ndlp;
97
98 if (!rport) {
99 pr_err("**** %s: NULL rport, exit.\n", __func__);
100 return -EINVAL;
101 }
102
103 rdata = rport->dd_data;
104 if (!rdata) {
105 pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n",
106 __func__, rport, rport->scsi_target_id);
107 return -EINVAL;
108 }
109
110 ndlp = rdata->pnode;
111 if (!rdata->pnode) {
112 pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n",
113 __func__, rport, rport->scsi_target_id);
114 return -EINVAL;
115 }
116
117 if (!ndlp->vport) {
118 pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px "
119 "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport,
120 rport->scsi_target_id);
121 return -EINVAL;
122 }
123 return 0;
124}
125
126void
127lpfc_terminate_rport_io(struct fc_rport *rport)
128{
129 struct lpfc_rport_data *rdata;
130 struct lpfc_nodelist *ndlp;
131 struct lpfc_vport *vport;
132
133 if (lpfc_rport_invalid(rport))
134 return;
135
136 rdata = rport->dd_data;
137 ndlp = rdata->pnode;
138 vport = ndlp->vport;
139 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
140 "rport terminate: sid:x%x did:x%x flg:x%x",
141 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
142
143 if (ndlp->nlp_sid != NLP_NO_SID)
144 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
145}
146
147
148
149
150void
151lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
152{
153 struct lpfc_nodelist *ndlp;
154 struct lpfc_vport *vport;
155 struct lpfc_hba *phba;
156 struct lpfc_work_evt *evtp;
157 unsigned long iflags;
158
159 ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode;
160 if (!ndlp)
161 return;
162
163 vport = ndlp->vport;
164 phba = vport->phba;
165
166 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
167 "rport devlosscb: sid:x%x did:x%x flg:x%x",
168 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
169
170 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
171 "3181 dev_loss_callbk x%06x, rport x%px flg x%x "
172 "load_flag x%x refcnt %d\n",
173 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
174 vport->load_flag, kref_read(&ndlp->kref));
175
176
177
178
179 if (vport->load_flag & FC_UNLOADING) {
180 ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
181 ndlp->rport = NULL;
182
183 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
184
185
186
187
188 lpfc_nlp_put(ndlp);
189 return;
190 }
191
192 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
193 return;
194
195 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
196 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
197 "6789 rport name %llx != node port name %llx",
198 rport->port_name,
199 wwn_to_u64(ndlp->nlp_portname.u.wwn));
200
201 evtp = &ndlp->dev_loss_evt;
202
203 if (!list_empty(&evtp->evt_listp)) {
204 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
205 "6790 rport name %llx dev_loss_evt pending\n",
206 rport->port_name);
207 return;
208 }
209
210 spin_lock_irqsave(&ndlp->lock, iflags);
211 ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
212 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
213
214
215
216
217
218 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
219 ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
220 ndlp->rport = NULL;
221 spin_unlock_irqrestore(&ndlp->lock, iflags);
222
223
224
225
226 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
227
228 spin_lock_irqsave(&phba->hbalock, iflags);
229 if (evtp->evt_arg1) {
230 evtp->evt = LPFC_EVT_DEV_LOSS;
231 list_add_tail(&evtp->evt_listp, &phba->work_list);
232 lpfc_worker_wake_up(phba);
233 }
234 spin_unlock_irqrestore(&phba->hbalock, iflags);
235
236 return;
237}
238
239
240
241
242
243
244
245
246
247
248
249static int
250lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
251{
252 struct lpfc_vport *vport;
253 struct lpfc_hba *phba;
254 uint8_t *name;
255 int warn_on = 0;
256 int fcf_inuse = 0;
257 unsigned long iflags;
258
259 vport = ndlp->vport;
260 name = (uint8_t *)&ndlp->nlp_portname;
261 phba = vport->phba;
262
263 spin_lock_irqsave(&ndlp->lock, iflags);
264 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
265 spin_unlock_irqrestore(&ndlp->lock, iflags);
266
267 if (phba->sli_rev == LPFC_SLI_REV4)
268 fcf_inuse = lpfc_fcf_inuse(phba);
269
270 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
271 "rport devlosstmo:did:x%x type:x%x id:x%x",
272 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid);
273
274 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
275 "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n",
276 __func__, ndlp->nlp_DID, ndlp->nlp_flag,
277 ndlp->fc4_xpt_flags, kref_read(&ndlp->kref));
278
279
280 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
281 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
282 "0284 Devloss timeout Ignored on "
283 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
284 "NPort x%x\n",
285 *name, *(name+1), *(name+2), *(name+3),
286 *(name+4), *(name+5), *(name+6), *(name+7),
287 ndlp->nlp_DID);
288 return fcf_inuse;
289 }
290
291
292 if (ndlp->nlp_type & NLP_FABRIC) {
293 lpfc_nlp_put(ndlp);
294 return fcf_inuse;
295 }
296
297 if (ndlp->nlp_sid != NLP_NO_SID) {
298 warn_on = 1;
299 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT);
300 }
301
302 if (warn_on) {
303 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
304 "0203 Devloss timeout on "
305 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
306 "NPort x%06x Data: x%x x%x x%x\n",
307 *name, *(name+1), *(name+2), *(name+3),
308 *(name+4), *(name+5), *(name+6), *(name+7),
309 ndlp->nlp_DID, ndlp->nlp_flag,
310 ndlp->nlp_state, ndlp->nlp_rpi);
311 } else {
312 lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
313 "0204 Devloss timeout on "
314 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
315 "NPort x%06x Data: x%x x%x x%x\n",
316 *name, *(name+1), *(name+2), *(name+3),
317 *(name+4), *(name+5), *(name+6), *(name+7),
318 ndlp->nlp_DID, ndlp->nlp_flag,
319 ndlp->nlp_state, ndlp->nlp_rpi);
320 }
321
322 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
323 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
324
325 return fcf_inuse;
326}
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346static void
347lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
348 uint32_t nlp_did)
349{
350
351
352
353 if (!fcf_inuse)
354 return;
355
356 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
357 spin_lock_irq(&phba->hbalock);
358 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
359 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
360 spin_unlock_irq(&phba->hbalock);
361 return;
362 }
363 phba->hba_flag |= HBA_DEVLOSS_TMO;
364 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
365 "2847 Last remote node (x%x) using "
366 "FCF devloss tmo\n", nlp_did);
367 }
368 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
369 spin_unlock_irq(&phba->hbalock);
370 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
371 "2868 Devloss tmo to FCF rediscovery "
372 "in progress\n");
373 return;
374 }
375 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
376 spin_unlock_irq(&phba->hbalock);
377 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
378 "2869 Devloss tmo to idle FIP engine, "
379 "unreg in-use FCF and rescan.\n");
380
381 lpfc_unregister_fcf_rescan(phba);
382 return;
383 }
384 spin_unlock_irq(&phba->hbalock);
385 if (phba->hba_flag & FCF_TS_INPROG)
386 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
387 "2870 FCF table scan in progress\n");
388 if (phba->hba_flag & FCF_RR_INPROG)
389 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
390 "2871 FLOGI roundrobin FCF failover "
391 "in progress\n");
392 }
393 lpfc_unregister_unused_fcf(phba);
394}
395
396
397
398
399
400
401
402
403
404
405
406struct lpfc_fast_path_event *
407lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
408 struct lpfc_fast_path_event *ret;
409
410
411 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
412 return NULL;
413
414 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
415 GFP_ATOMIC);
416 if (ret) {
417 atomic_inc(&phba->fast_event_count);
418 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
419 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
420 }
421 return ret;
422}
423
424
425
426
427
428
429
430
431
432void
433lpfc_free_fast_evt(struct lpfc_hba *phba,
434 struct lpfc_fast_path_event *evt) {
435
436 atomic_dec(&phba->fast_event_count);
437 kfree(evt);
438}
439
440
441
442
443
444
445
446
447
448
449static void
450lpfc_send_fastpath_evt(struct lpfc_hba *phba,
451 struct lpfc_work_evt *evtp)
452{
453 unsigned long evt_category, evt_sub_category;
454 struct lpfc_fast_path_event *fast_evt_data;
455 char *evt_data;
456 uint32_t evt_data_size;
457 struct Scsi_Host *shost;
458
459 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
460 work_evt);
461
462 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
463 evt_sub_category = (unsigned long) fast_evt_data->un.
464 fabric_evt.subcategory;
465 shost = lpfc_shost_from_vport(fast_evt_data->vport);
466 if (evt_category == FC_REG_FABRIC_EVENT) {
467 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
468 evt_data = (char *) &fast_evt_data->un.read_check_error;
469 evt_data_size = sizeof(fast_evt_data->un.
470 read_check_error);
471 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
472 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
473 evt_data = (char *) &fast_evt_data->un.fabric_evt;
474 evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
475 } else {
476 lpfc_free_fast_evt(phba, fast_evt_data);
477 return;
478 }
479 } else if (evt_category == FC_REG_SCSI_EVENT) {
480 switch (evt_sub_category) {
481 case LPFC_EVENT_QFULL:
482 case LPFC_EVENT_DEVBSY:
483 evt_data = (char *) &fast_evt_data->un.scsi_evt;
484 evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
485 break;
486 case LPFC_EVENT_CHECK_COND:
487 evt_data = (char *) &fast_evt_data->un.check_cond_evt;
488 evt_data_size = sizeof(fast_evt_data->un.
489 check_cond_evt);
490 break;
491 case LPFC_EVENT_VARQUEDEPTH:
492 evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
493 evt_data_size = sizeof(fast_evt_data->un.
494 queue_depth_evt);
495 break;
496 default:
497 lpfc_free_fast_evt(phba, fast_evt_data);
498 return;
499 }
500 } else {
501 lpfc_free_fast_evt(phba, fast_evt_data);
502 return;
503 }
504
505 if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
506 fc_host_post_vendor_event(shost,
507 fc_get_event_number(),
508 evt_data_size,
509 evt_data,
510 LPFC_NL_VENDOR_ID);
511
512 lpfc_free_fast_evt(phba, fast_evt_data);
513 return;
514}
515
516static void
517lpfc_work_list_done(struct lpfc_hba *phba)
518{
519 struct lpfc_work_evt *evtp = NULL;
520 struct lpfc_nodelist *ndlp;
521 int free_evt;
522 int fcf_inuse;
523 uint32_t nlp_did;
524
525 spin_lock_irq(&phba->hbalock);
526 while (!list_empty(&phba->work_list)) {
527 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
528 evt_listp);
529 spin_unlock_irq(&phba->hbalock);
530 free_evt = 1;
531 switch (evtp->evt) {
532 case LPFC_EVT_ELS_RETRY:
533 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
534 lpfc_els_retry_delay_handler(ndlp);
535 free_evt = 0;
536
537
538
539 lpfc_nlp_put(ndlp);
540 break;
541 case LPFC_EVT_DEV_LOSS:
542 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
543 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
544 free_evt = 0;
545
546
547
548 nlp_did = ndlp->nlp_DID;
549 lpfc_nlp_put(ndlp);
550 if (phba->sli_rev == LPFC_SLI_REV4)
551 lpfc_sli4_post_dev_loss_tmo_handler(phba,
552 fcf_inuse,
553 nlp_did);
554 break;
555 case LPFC_EVT_RECOVER_PORT:
556 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
557 lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
558 free_evt = 0;
559
560
561
562 lpfc_nlp_put(ndlp);
563 break;
564 case LPFC_EVT_ONLINE:
565 if (phba->link_state < LPFC_LINK_DOWN)
566 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
567 else
568 *(int *) (evtp->evt_arg1) = 0;
569 complete((struct completion *)(evtp->evt_arg2));
570 break;
571 case LPFC_EVT_OFFLINE_PREP:
572 if (phba->link_state >= LPFC_LINK_DOWN)
573 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
574 *(int *)(evtp->evt_arg1) = 0;
575 complete((struct completion *)(evtp->evt_arg2));
576 break;
577 case LPFC_EVT_OFFLINE:
578 lpfc_offline(phba);
579 lpfc_sli_brdrestart(phba);
580 *(int *)(evtp->evt_arg1) =
581 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
582 lpfc_unblock_mgmt_io(phba);
583 complete((struct completion *)(evtp->evt_arg2));
584 break;
585 case LPFC_EVT_WARM_START:
586 lpfc_offline(phba);
587 lpfc_reset_barrier(phba);
588 lpfc_sli_brdreset(phba);
589 lpfc_hba_down_post(phba);
590 *(int *)(evtp->evt_arg1) =
591 lpfc_sli_brdready(phba, HS_MBRDY);
592 lpfc_unblock_mgmt_io(phba);
593 complete((struct completion *)(evtp->evt_arg2));
594 break;
595 case LPFC_EVT_KILL:
596 lpfc_offline(phba);
597 *(int *)(evtp->evt_arg1)
598 = (phba->pport->stopped)
599 ? 0 : lpfc_sli_brdkill(phba);
600 lpfc_unblock_mgmt_io(phba);
601 complete((struct completion *)(evtp->evt_arg2));
602 break;
603 case LPFC_EVT_FASTPATH_MGMT_EVT:
604 lpfc_send_fastpath_evt(phba, evtp);
605 free_evt = 0;
606 break;
607 case LPFC_EVT_RESET_HBA:
608 if (!(phba->pport->load_flag & FC_UNLOADING))
609 lpfc_reset_hba(phba);
610 break;
611 }
612 if (free_evt)
613 kfree(evtp);
614 spin_lock_irq(&phba->hbalock);
615 }
616 spin_unlock_irq(&phba->hbalock);
617
618}
619
620static void
621lpfc_work_done(struct lpfc_hba *phba)
622{
623 struct lpfc_sli_ring *pring;
624 uint32_t ha_copy, status, control, work_port_events;
625 struct lpfc_vport **vports;
626 struct lpfc_vport *vport;
627 int i;
628
629 spin_lock_irq(&phba->hbalock);
630 ha_copy = phba->work_ha;
631 phba->work_ha = 0;
632 spin_unlock_irq(&phba->hbalock);
633
634
635 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
636 lpfc_sli4_post_async_mbox(phba);
637
638 if (ha_copy & HA_ERATT)
639
640 lpfc_handle_eratt(phba);
641
642 if (ha_copy & HA_MBATT)
643 lpfc_sli_handle_mb_event(phba);
644
645 if (ha_copy & HA_LATT)
646 lpfc_handle_latt(phba);
647
648
649 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
650 if (phba->hba_flag & HBA_RRQ_ACTIVE)
651 lpfc_handle_rrq_active(phba);
652 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
653 lpfc_sli4_els_xri_abort_event_proc(phba);
654 if (phba->hba_flag & ASYNC_EVENT)
655 lpfc_sli4_async_event_proc(phba);
656 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
657 spin_lock_irq(&phba->hbalock);
658 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
659 spin_unlock_irq(&phba->hbalock);
660 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
661 }
662 if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
663 lpfc_sli4_fcf_redisc_event_proc(phba);
664 }
665
666 vports = lpfc_create_vport_work_array(phba);
667 if (vports != NULL)
668 for (i = 0; i <= phba->max_vports; i++) {
669
670
671
672
673 if (vports[i] == NULL && i == 0)
674 vport = phba->pport;
675 else
676 vport = vports[i];
677 if (vport == NULL)
678 break;
679 spin_lock_irq(&vport->work_port_lock);
680 work_port_events = vport->work_port_events;
681 vport->work_port_events &= ~work_port_events;
682 spin_unlock_irq(&vport->work_port_lock);
683 if (work_port_events & WORKER_DISC_TMO)
684 lpfc_disc_timeout_handler(vport);
685 if (work_port_events & WORKER_ELS_TMO)
686 lpfc_els_timeout_handler(vport);
687 if (work_port_events & WORKER_HB_TMO)
688 lpfc_hb_timeout_handler(phba);
689 if (work_port_events & WORKER_MBOX_TMO)
690 lpfc_mbox_timeout_handler(phba);
691 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
692 lpfc_unblock_fabric_iocbs(phba);
693 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
694 lpfc_ramp_down_queue_handler(phba);
695 if (work_port_events & WORKER_DELAYED_DISC_TMO)
696 lpfc_delayed_disc_timeout_handler(vport);
697 }
698 lpfc_destroy_vport_work_array(phba, vports);
699
700 pring = lpfc_phba_elsring(phba);
701 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
702 status >>= (4*LPFC_ELS_RING);
703 if (pring && (status & HA_RXMASK ||
704 pring->flag & LPFC_DEFERRED_RING_EVENT ||
705 phba->hba_flag & HBA_SP_QUEUE_EVT)) {
706 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
707 pring->flag |= LPFC_DEFERRED_RING_EVENT;
708
709 if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
710 set_bit(LPFC_DATA_READY, &phba->data_flags);
711 } else {
712
713
714
715 if (phba->link_state >= LPFC_LINK_DOWN ||
716 phba->link_flag & LS_MDS_LOOPBACK) {
717 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
718 lpfc_sli_handle_slow_ring_event(phba, pring,
719 (status &
720 HA_RXMASK));
721 }
722 }
723 if (phba->sli_rev == LPFC_SLI_REV4)
724 lpfc_drain_txq(phba);
725
726
727
728 if (phba->sli_rev <= LPFC_SLI_REV3) {
729 spin_lock_irq(&phba->hbalock);
730 control = readl(phba->HCregaddr);
731 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
732 lpfc_debugfs_slow_ring_trc(phba,
733 "WRK Enable ring: cntl:x%x hacopy:x%x",
734 control, ha_copy, 0);
735
736 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
737 writel(control, phba->HCregaddr);
738 readl(phba->HCregaddr);
739 } else {
740 lpfc_debugfs_slow_ring_trc(phba,
741 "WRK Ring ok: cntl:x%x hacopy:x%x",
742 control, ha_copy, 0);
743 }
744 spin_unlock_irq(&phba->hbalock);
745 }
746 }
747 lpfc_work_list_done(phba);
748}
749
750int
751lpfc_do_work(void *p)
752{
753 struct lpfc_hba *phba = p;
754 int rc;
755
756 set_user_nice(current, MIN_NICE);
757 current->flags |= PF_NOFREEZE;
758 phba->data_flags = 0;
759
760 while (!kthread_should_stop()) {
761
762 rc = wait_event_interruptible(phba->work_waitq,
763 (test_and_clear_bit(LPFC_DATA_READY,
764 &phba->data_flags)
765 || kthread_should_stop()));
766
767 if (rc) {
768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
769 "0433 Wakeup on signal: rc=x%x\n", rc);
770 break;
771 }
772
773
774 lpfc_work_done(phba);
775 }
776 phba->worker_thread = NULL;
777 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
778 "0432 Worker thread stopped.\n");
779 return 0;
780}
781
782
783
784
785
786
787int
788lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
789 uint32_t evt)
790{
791 struct lpfc_work_evt *evtp;
792 unsigned long flags;
793
794
795
796
797
798 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
799 if (!evtp)
800 return 0;
801
802 evtp->evt_arg1 = arg1;
803 evtp->evt_arg2 = arg2;
804 evtp->evt = evt;
805
806 spin_lock_irqsave(&phba->hbalock, flags);
807 list_add_tail(&evtp->evt_listp, &phba->work_list);
808 spin_unlock_irqrestore(&phba->hbalock, flags);
809
810 lpfc_worker_wake_up(phba);
811
812 return 1;
813}
814
815void
816lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
817{
818 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
819 struct lpfc_hba *phba = vport->phba;
820 struct lpfc_nodelist *ndlp, *next_ndlp;
821
822 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
823 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
824 continue;
825
826 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
827 ((vport->port_type == LPFC_NPIV_PORT) &&
828 ((ndlp->nlp_DID == NameServer_DID) ||
829 (ndlp->nlp_DID == FDMI_DID))))
830 lpfc_unreg_rpi(vport, ndlp);
831
832
833 if ((phba->sli_rev < LPFC_SLI_REV4) &&
834 (!remove && ndlp->nlp_type & NLP_FABRIC))
835 continue;
836
837
838 if (phba->nvmet_support &&
839 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
840 lpfc_nvmet_invalidate_host(phba, ndlp);
841
842 lpfc_disc_state_machine(vport, ndlp, NULL,
843 remove
844 ? NLP_EVT_DEVICE_RM
845 : NLP_EVT_DEVICE_RECOVERY);
846 }
847 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
848 if (phba->sli_rev == LPFC_SLI_REV4)
849 lpfc_sli4_unreg_all_rpis(vport);
850 lpfc_mbx_unreg_vpi(vport);
851 spin_lock_irq(shost->host_lock);
852 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
853 spin_unlock_irq(shost->host_lock);
854 }
855}
856
857void
858lpfc_port_link_failure(struct lpfc_vport *vport)
859{
860 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
861
862
863 lpfc_cleanup_rcv_buffers(vport);
864
865
866 lpfc_els_flush_rscn(vport);
867
868
869 lpfc_els_flush_cmd(vport);
870
871 lpfc_cleanup_rpis(vport, 0);
872
873
874 lpfc_can_disctmo(vport);
875}
876
877void
878lpfc_linkdown_port(struct lpfc_vport *vport)
879{
880 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
881
882 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
883 fc_host_post_event(shost, fc_get_event_number(),
884 FCH_EVT_LINKDOWN, 0);
885
886 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
887 "Link Down: state:x%x rtry:x%x flg:x%x",
888 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
889
890 lpfc_port_link_failure(vport);
891
892
893 spin_lock_irq(shost->host_lock);
894 vport->fc_flag &= ~FC_DISC_DELAYED;
895 spin_unlock_irq(shost->host_lock);
896 del_timer_sync(&vport->delayed_disc_tmo);
897}
898
899int
900lpfc_linkdown(struct lpfc_hba *phba)
901{
902 struct lpfc_vport *vport = phba->pport;
903 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
904 struct lpfc_vport **vports;
905 LPFC_MBOXQ_t *mb;
906 int i;
907
908 if (phba->link_state == LPFC_LINK_DOWN)
909 return 0;
910
911
912 lpfc_scsi_dev_block(phba);
913
914 phba->defer_flogi_acc_flag = false;
915
916 spin_lock_irq(&phba->hbalock);
917 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
918 spin_unlock_irq(&phba->hbalock);
919 if (phba->link_state > LPFC_LINK_DOWN) {
920 phba->link_state = LPFC_LINK_DOWN;
921 if (phba->sli4_hba.conf_trunk) {
922 phba->trunk_link.link0.state = 0;
923 phba->trunk_link.link1.state = 0;
924 phba->trunk_link.link2.state = 0;
925 phba->trunk_link.link3.state = 0;
926 phba->sli4_hba.link_state.logical_speed =
927 LPFC_LINK_SPEED_UNKNOWN;
928 }
929 spin_lock_irq(shost->host_lock);
930 phba->pport->fc_flag &= ~FC_LBIT;
931 spin_unlock_irq(shost->host_lock);
932 }
933 vports = lpfc_create_vport_work_array(phba);
934 if (vports != NULL) {
935 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
936
937 lpfc_linkdown_port(vports[i]);
938
939 vports[i]->fc_myDID = 0;
940
941 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
942 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
943 if (phba->nvmet_support)
944 lpfc_nvmet_update_targetport(phba);
945 else
946 lpfc_nvme_update_localport(vports[i]);
947 }
948 }
949 }
950 lpfc_destroy_vport_work_array(phba, vports);
951
952
953 if (phba->sli_rev > LPFC_SLI_REV3)
954 goto skip_unreg_did;
955
956 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
957 if (mb) {
958 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
959 mb->vport = vport;
960 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
961 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
962 == MBX_NOT_FINISHED) {
963 mempool_free(mb, phba->mbox_mem_pool);
964 }
965 }
966
967 skip_unreg_did:
968
969 if (phba->pport->fc_flag & FC_PT2PT) {
970 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
971 if (mb) {
972 lpfc_config_link(phba, mb);
973 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
974 mb->vport = vport;
975 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
976 == MBX_NOT_FINISHED) {
977 mempool_free(mb, phba->mbox_mem_pool);
978 }
979 }
980 spin_lock_irq(shost->host_lock);
981 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
982 phba->pport->rcv_flogi_cnt = 0;
983 spin_unlock_irq(shost->host_lock);
984 }
985 return 0;
986}
987
988static void
989lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
990{
991 struct lpfc_nodelist *ndlp;
992
993 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
994 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
995
996 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
997 continue;
998 if (ndlp->nlp_type & NLP_FABRIC) {
999
1000
1001
1002 if (ndlp->nlp_DID != Fabric_DID)
1003 lpfc_unreg_rpi(vport, ndlp);
1004 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1005 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
1006
1007
1008
1009 lpfc_unreg_rpi(vport, ndlp);
1010 }
1011 }
1012}
1013
1014static void
1015lpfc_linkup_port(struct lpfc_vport *vport)
1016{
1017 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1018 struct lpfc_hba *phba = vport->phba;
1019
1020 if ((vport->load_flag & FC_UNLOADING) != 0)
1021 return;
1022
1023 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1024 "Link Up: top:x%x speed:x%x flg:x%x",
1025 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
1026
1027
1028 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1029 (vport != phba->pport))
1030 return;
1031
1032 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
1033 fc_host_post_event(shost, fc_get_event_number(),
1034 FCH_EVT_LINKUP, 0);
1035
1036 spin_lock_irq(shost->host_lock);
1037 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
1038 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
1039 vport->fc_flag |= FC_NDISC_ACTIVE;
1040 vport->fc_ns_retry = 0;
1041 spin_unlock_irq(shost->host_lock);
1042
1043 lpfc_linkup_cleanup_nodes(vport);
1044}
1045
1046static int
1047lpfc_linkup(struct lpfc_hba *phba)
1048{
1049 struct lpfc_vport **vports;
1050 int i;
1051 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
1052
1053 phba->link_state = LPFC_LINK_UP;
1054
1055
1056 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
1057 del_timer_sync(&phba->fabric_block_timer);
1058
1059 vports = lpfc_create_vport_work_array(phba);
1060 if (vports != NULL)
1061 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1062 lpfc_linkup_port(vports[i]);
1063 lpfc_destroy_vport_work_array(phba, vports);
1064
1065
1066
1067
1068
1069 spin_lock_irq(shost->host_lock);
1070 phba->pport->rcv_flogi_cnt = 0;
1071 spin_unlock_irq(shost->host_lock);
1072
1073
1074 phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
1075 phba->defer_flogi_acc_flag = false;
1076
1077 return 0;
1078}
1079
1080
1081
1082
1083
1084
1085
1086static void
1087lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1088{
1089 struct lpfc_vport *vport = pmb->vport;
1090 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1091 struct lpfc_sli *psli = &phba->sli;
1092 MAILBOX_t *mb = &pmb->u.mb;
1093 uint32_t control;
1094
1095
1096 psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1097 psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1098
1099
1100 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1101
1102 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1103 "0320 CLEAR_LA mbxStatus error x%x hba "
1104 "state x%x\n",
1105 mb->mbxStatus, vport->port_state);
1106 phba->link_state = LPFC_HBA_ERROR;
1107 goto out;
1108 }
1109
1110 if (vport->port_type == LPFC_PHYSICAL_PORT)
1111 phba->link_state = LPFC_HBA_READY;
1112
1113 spin_lock_irq(&phba->hbalock);
1114 psli->sli_flag |= LPFC_PROCESS_LA;
1115 control = readl(phba->HCregaddr);
1116 control |= HC_LAINT_ENA;
1117 writel(control, phba->HCregaddr);
1118 readl(phba->HCregaddr);
1119 spin_unlock_irq(&phba->hbalock);
1120 mempool_free(pmb, phba->mbox_mem_pool);
1121 return;
1122
1123out:
1124
1125 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1126 "0225 Device Discovery completes\n");
1127 mempool_free(pmb, phba->mbox_mem_pool);
1128
1129 spin_lock_irq(shost->host_lock);
1130 vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1131 spin_unlock_irq(shost->host_lock);
1132
1133 lpfc_can_disctmo(vport);
1134
1135
1136
1137 spin_lock_irq(&phba->hbalock);
1138 psli->sli_flag |= LPFC_PROCESS_LA;
1139 control = readl(phba->HCregaddr);
1140 control |= HC_LAINT_ENA;
1141 writel(control, phba->HCregaddr);
1142 readl(phba->HCregaddr);
1143 spin_unlock_irq(&phba->hbalock);
1144
1145 return;
1146}
1147
1148void
1149lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1150{
1151 struct lpfc_vport *vport = pmb->vport;
1152 LPFC_MBOXQ_t *sparam_mb;
1153 struct lpfc_dmabuf *sparam_mp;
1154 u16 status = pmb->u.mb.mbxStatus;
1155 int rc;
1156
1157 mempool_free(pmb, phba->mbox_mem_pool);
1158
1159 if (status)
1160 goto out;
1161
1162
1163 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1164 !(phba->hba_flag & HBA_FCOE_MODE) &&
1165 (phba->link_flag & LS_LOOPBACK_MODE))
1166 return;
1167
1168 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1169 vport->fc_flag & FC_PUBLIC_LOOP &&
1170 !(vport->fc_flag & FC_LBIT)) {
1171
1172
1173
1174
1175 lpfc_set_disctmo(vport);
1176 return;
1177 }
1178
1179
1180
1181
1182 if (vport->port_state != LPFC_FLOGI) {
1183
1184
1185
1186 if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
1187 !(phba->link_flag & LS_LOOPBACK_MODE)) {
1188 sparam_mb = mempool_alloc(phba->mbox_mem_pool,
1189 GFP_KERNEL);
1190 if (!sparam_mb)
1191 goto sparam_out;
1192
1193 rc = lpfc_read_sparam(phba, sparam_mb, 0);
1194 if (rc) {
1195 mempool_free(sparam_mb, phba->mbox_mem_pool);
1196 goto sparam_out;
1197 }
1198 sparam_mb->vport = vport;
1199 sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
1200 rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
1201 if (rc == MBX_NOT_FINISHED) {
1202 sparam_mp = (struct lpfc_dmabuf *)
1203 sparam_mb->ctx_buf;
1204 lpfc_mbuf_free(phba, sparam_mp->virt,
1205 sparam_mp->phys);
1206 kfree(sparam_mp);
1207 sparam_mb->ctx_buf = NULL;
1208 mempool_free(sparam_mb, phba->mbox_mem_pool);
1209 goto sparam_out;
1210 }
1211
1212 phba->hba_flag |= HBA_DEFER_FLOGI;
1213 } else {
1214 lpfc_initial_flogi(vport);
1215 }
1216 } else {
1217 if (vport->fc_flag & FC_PT2PT)
1218 lpfc_disc_start(vport);
1219 }
1220 return;
1221
1222out:
1223 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1224 "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
1225 status, vport->port_state);
1226
1227sparam_out:
1228 lpfc_linkdown(phba);
1229
1230 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1231 "0200 CONFIG_LINK bad hba state x%x\n",
1232 vport->port_state);
1233
1234 lpfc_issue_clear_la(phba, vport);
1235 return;
1236}
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246void
1247lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1248{
1249 struct lpfc_fcf_pri *fcf_pri;
1250 struct lpfc_fcf_pri *next_fcf_pri;
1251 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1252 spin_lock_irq(&phba->hbalock);
1253 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1254 &phba->fcf.fcf_pri_list, list) {
1255 list_del_init(&fcf_pri->list);
1256 fcf_pri->fcf_rec.flag = 0;
1257 }
1258 spin_unlock_irq(&phba->hbalock);
1259}
1260static void
1261lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1262{
1263 struct lpfc_vport *vport = mboxq->vport;
1264
1265 if (mboxq->u.mb.mbxStatus) {
1266 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1267 "2017 REG_FCFI mbxStatus error x%x "
1268 "HBA state x%x\n", mboxq->u.mb.mbxStatus,
1269 vport->port_state);
1270 goto fail_out;
1271 }
1272
1273
1274 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1275
1276 spin_lock_irq(&phba->hbalock);
1277 phba->fcf.fcf_flag |= FCF_REGISTERED;
1278 spin_unlock_irq(&phba->hbalock);
1279
1280
1281 if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1282 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1283 goto fail_out;
1284
1285
1286 spin_lock_irq(&phba->hbalock);
1287 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1288 phba->hba_flag &= ~FCF_TS_INPROG;
1289 if (vport->port_state != LPFC_FLOGI) {
1290 phba->hba_flag |= FCF_RR_INPROG;
1291 spin_unlock_irq(&phba->hbalock);
1292 lpfc_issue_init_vfi(vport);
1293 goto out;
1294 }
1295 spin_unlock_irq(&phba->hbalock);
1296 goto out;
1297
1298fail_out:
1299 spin_lock_irq(&phba->hbalock);
1300 phba->hba_flag &= ~FCF_RR_INPROG;
1301 spin_unlock_irq(&phba->hbalock);
1302out:
1303 mempool_free(mboxq, phba->mbox_mem_pool);
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315static uint32_t
1316lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1317{
1318 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1319 return 0;
1320 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1321 return 0;
1322 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1323 return 0;
1324 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1325 return 0;
1326 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1327 return 0;
1328 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1329 return 0;
1330 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1331 return 0;
1332 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1333 return 0;
1334 return 1;
1335}
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346static uint32_t
1347lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1348{
1349 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1350 return 0;
1351 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1352 return 0;
1353 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1354 return 0;
1355 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1356 return 0;
1357 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1358 return 0;
1359 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1360 return 0;
1361 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1362 return 0;
1363 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1364 return 0;
1365 return 1;
1366}
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377static uint32_t
1378lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1379{
1380 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1381 return 0;
1382 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1383 return 0;
1384 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1385 return 0;
1386 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1387 return 0;
1388 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1389 return 0;
1390 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1391 return 0;
1392 return 1;
1393}
1394
1395static bool
1396lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1397{
1398 return (curr_vlan_id == new_vlan_id);
1399}
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411static void
1412__lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1413 struct fcf_record *new_fcf_record
1414 )
1415{
1416 struct lpfc_fcf_pri *fcf_pri;
1417
1418 fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1419 fcf_pri->fcf_rec.fcf_index = fcf_index;
1420
1421 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1422
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433static void
1434lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1435 struct fcf_record *new_fcf_record)
1436{
1437
1438 fcf_rec->fabric_name[0] =
1439 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1440 fcf_rec->fabric_name[1] =
1441 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1442 fcf_rec->fabric_name[2] =
1443 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1444 fcf_rec->fabric_name[3] =
1445 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1446 fcf_rec->fabric_name[4] =
1447 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1448 fcf_rec->fabric_name[5] =
1449 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1450 fcf_rec->fabric_name[6] =
1451 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1452 fcf_rec->fabric_name[7] =
1453 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1454
1455 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1456 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1457 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1458 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1459 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1460 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1461
1462 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1463
1464 fcf_rec->priority = new_fcf_record->fip_priority;
1465
1466 fcf_rec->switch_name[0] =
1467 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1468 fcf_rec->switch_name[1] =
1469 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1470 fcf_rec->switch_name[2] =
1471 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1472 fcf_rec->switch_name[3] =
1473 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1474 fcf_rec->switch_name[4] =
1475 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1476 fcf_rec->switch_name[5] =
1477 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1478 fcf_rec->switch_name[6] =
1479 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1480 fcf_rec->switch_name[7] =
1481 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1482}
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497static void
1498__lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1499 struct fcf_record *new_fcf_record, uint32_t addr_mode,
1500 uint16_t vlan_id, uint32_t flag)
1501{
1502 lockdep_assert_held(&phba->hbalock);
1503
1504
1505 lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1506
1507 fcf_rec->addr_mode = addr_mode;
1508 fcf_rec->vlan_id = vlan_id;
1509 fcf_rec->flag |= (flag | RECORD_VALID);
1510 __lpfc_update_fcf_record_pri(phba,
1511 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1512 new_fcf_record);
1513}
1514
1515
1516
1517
1518
1519
1520
1521
1522static void
1523lpfc_register_fcf(struct lpfc_hba *phba)
1524{
1525 LPFC_MBOXQ_t *fcf_mbxq;
1526 int rc;
1527
1528 spin_lock_irq(&phba->hbalock);
1529
1530 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1531 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1532 spin_unlock_irq(&phba->hbalock);
1533 return;
1534 }
1535
1536
1537 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1538 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1539 phba->hba_flag &= ~FCF_TS_INPROG;
1540 if (phba->pport->port_state != LPFC_FLOGI &&
1541 phba->pport->fc_flag & FC_FABRIC) {
1542 phba->hba_flag |= FCF_RR_INPROG;
1543 spin_unlock_irq(&phba->hbalock);
1544 lpfc_initial_flogi(phba->pport);
1545 return;
1546 }
1547 spin_unlock_irq(&phba->hbalock);
1548 return;
1549 }
1550 spin_unlock_irq(&phba->hbalock);
1551
1552 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1553 if (!fcf_mbxq) {
1554 spin_lock_irq(&phba->hbalock);
1555 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1556 spin_unlock_irq(&phba->hbalock);
1557 return;
1558 }
1559
1560 lpfc_reg_fcfi(phba, fcf_mbxq);
1561 fcf_mbxq->vport = phba->pport;
1562 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1563 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1564 if (rc == MBX_NOT_FINISHED) {
1565 spin_lock_irq(&phba->hbalock);
1566 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1567 spin_unlock_irq(&phba->hbalock);
1568 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1569 }
1570
1571 return;
1572}
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592static int
1593lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1594 struct fcf_record *new_fcf_record,
1595 uint32_t *boot_flag, uint32_t *addr_mode,
1596 uint16_t *vlan_id)
1597{
1598 struct lpfc_fcf_conn_entry *conn_entry;
1599 int i, j, fcf_vlan_id = 0;
1600
1601
1602 for (i = 0; i < 512; i++) {
1603 if (new_fcf_record->vlan_bitmap[i]) {
1604 fcf_vlan_id = i * 8;
1605 j = 0;
1606 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1607 j++;
1608 fcf_vlan_id++;
1609 }
1610 break;
1611 }
1612 }
1613
1614
1615 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1616 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
1617 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
1618 return 0;
1619
1620 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1621 *boot_flag = 0;
1622 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1623 new_fcf_record);
1624 if (phba->valid_vlan)
1625 *vlan_id = phba->vlan_id;
1626 else
1627 *vlan_id = LPFC_FCOE_NULL_VID;
1628 return 1;
1629 }
1630
1631
1632
1633
1634
1635 if (list_empty(&phba->fcf_conn_rec_list)) {
1636 *boot_flag = 0;
1637 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1638 new_fcf_record);
1639
1640
1641
1642
1643
1644 if (*addr_mode & LPFC_FCF_FPMA)
1645 *addr_mode = LPFC_FCF_FPMA;
1646
1647
1648 if (fcf_vlan_id)
1649 *vlan_id = fcf_vlan_id;
1650 else
1651 *vlan_id = LPFC_FCOE_NULL_VID;
1652 return 1;
1653 }
1654
1655 list_for_each_entry(conn_entry,
1656 &phba->fcf_conn_rec_list, list) {
1657 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1658 continue;
1659
1660 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1661 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1662 new_fcf_record))
1663 continue;
1664 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1665 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1666 new_fcf_record))
1667 continue;
1668 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1669
1670
1671
1672
1673 if (!(new_fcf_record->vlan_bitmap
1674 [conn_entry->conn_rec.vlan_tag / 8] &
1675 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1676 continue;
1677 }
1678
1679
1680
1681
1682
1683 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1684 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1685 continue;
1686
1687
1688
1689
1690
1691 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1692 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1693
1694
1695
1696
1697 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1698 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1699 new_fcf_record) & LPFC_FCF_SPMA))
1700 continue;
1701
1702
1703
1704
1705 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1706 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1707 new_fcf_record) & LPFC_FCF_FPMA))
1708 continue;
1709 }
1710
1711
1712
1713
1714 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1715 *boot_flag = 1;
1716 else
1717 *boot_flag = 0;
1718
1719
1720
1721
1722
1723
1724 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1725 new_fcf_record);
1726
1727
1728
1729
1730 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1731 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1732 *addr_mode = (conn_entry->conn_rec.flags &
1733 FCFCNCT_AM_SPMA) ?
1734 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1735
1736
1737
1738
1739 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1740 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1741 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1742 (*addr_mode & LPFC_FCF_SPMA))
1743 *addr_mode = LPFC_FCF_SPMA;
1744 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1745 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1746 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1747 (*addr_mode & LPFC_FCF_FPMA))
1748 *addr_mode = LPFC_FCF_FPMA;
1749
1750
1751 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1752 *vlan_id = conn_entry->conn_rec.vlan_tag;
1753
1754
1755
1756
1757 else if (fcf_vlan_id)
1758 *vlan_id = fcf_vlan_id;
1759 else
1760 *vlan_id = LPFC_FCOE_NULL_VID;
1761
1762 return 1;
1763 }
1764
1765 return 0;
1766}
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777int
1778lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1779{
1780
1781
1782
1783
1784 if ((phba->link_state >= LPFC_LINK_UP) &&
1785 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1786 return 0;
1787
1788 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1789 "2768 Pending link or FCF event during current "
1790 "handling of the previous event: link_state:x%x, "
1791 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1792 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1793 phba->fcoe_eventtag);
1794
1795 spin_lock_irq(&phba->hbalock);
1796 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1797 spin_unlock_irq(&phba->hbalock);
1798
1799 if (phba->link_state >= LPFC_LINK_UP) {
1800 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1801 "2780 Restart FCF table scan due to "
1802 "pending FCF event:evt_tag_at_scan:x%x, "
1803 "evt_tag_current:x%x\n",
1804 phba->fcoe_eventtag_at_fcf_scan,
1805 phba->fcoe_eventtag);
1806 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1807 } else {
1808
1809
1810
1811
1812 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1813 "2833 Stop FCF discovery process due to link "
1814 "state change (x%x)\n", phba->link_state);
1815 spin_lock_irq(&phba->hbalock);
1816 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1817 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1818 spin_unlock_irq(&phba->hbalock);
1819 }
1820
1821
1822 if (unreg_fcf) {
1823 spin_lock_irq(&phba->hbalock);
1824 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1825 spin_unlock_irq(&phba->hbalock);
1826 lpfc_sli4_unregister_fcf(phba);
1827 }
1828 return 1;
1829}
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846static bool
1847lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1848{
1849 uint32_t rand_num;
1850
1851
1852 rand_num = 0xFFFF & prandom_u32();
1853
1854
1855 if ((fcf_cnt * rand_num) < 0xFFFF)
1856 return true;
1857 else
1858 return false;
1859}
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874static struct fcf_record *
1875lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1876 uint16_t *next_fcf_index)
1877{
1878 void *virt_addr;
1879 struct lpfc_mbx_sge sge;
1880 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1881 uint32_t shdr_status, shdr_add_status, if_type;
1882 union lpfc_sli4_cfg_shdr *shdr;
1883 struct fcf_record *new_fcf_record;
1884
1885
1886
1887
1888 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1889 if (unlikely(!mboxq->sge_array)) {
1890 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1891 "2524 Failed to get the non-embedded SGE "
1892 "virtual address\n");
1893 return NULL;
1894 }
1895 virt_addr = mboxq->sge_array->addr[0];
1896
1897 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1898 lpfc_sli_pcimem_bcopy(shdr, shdr,
1899 sizeof(union lpfc_sli4_cfg_shdr));
1900 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1901 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1902 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1903 if (shdr_status || shdr_add_status) {
1904 if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
1905 if_type == LPFC_SLI_INTF_IF_TYPE_2)
1906 lpfc_printf_log(phba, KERN_ERR,
1907 LOG_TRACE_EVENT,
1908 "2726 READ_FCF_RECORD Indicates empty "
1909 "FCF table.\n");
1910 else
1911 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1912 "2521 READ_FCF_RECORD mailbox failed "
1913 "with status x%x add_status x%x, "
1914 "mbx\n", shdr_status, shdr_add_status);
1915 return NULL;
1916 }
1917
1918
1919 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1920 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1921 sizeof(struct lpfc_mbx_read_fcf_tbl));
1922 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1923 new_fcf_record = (struct fcf_record *)(virt_addr +
1924 sizeof(struct lpfc_mbx_read_fcf_tbl));
1925 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1926 offsetof(struct fcf_record, vlan_bitmap));
1927 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1928 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
1929
1930 return new_fcf_record;
1931}
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943static void
1944lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1945 struct fcf_record *fcf_record,
1946 uint16_t vlan_id,
1947 uint16_t next_fcf_index)
1948{
1949 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1950 "2764 READ_FCF_RECORD:\n"
1951 "\tFCF_Index : x%x\n"
1952 "\tFCF_Avail : x%x\n"
1953 "\tFCF_Valid : x%x\n"
1954 "\tFCF_SOL : x%x\n"
1955 "\tFIP_Priority : x%x\n"
1956 "\tMAC_Provider : x%x\n"
1957 "\tLowest VLANID : x%x\n"
1958 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1959 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1960 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1961 "\tNext_FCF_Index: x%x\n",
1962 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1963 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1964 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1965 bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
1966 fcf_record->fip_priority,
1967 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1968 vlan_id,
1969 bf_get(lpfc_fcf_record_mac_0, fcf_record),
1970 bf_get(lpfc_fcf_record_mac_1, fcf_record),
1971 bf_get(lpfc_fcf_record_mac_2, fcf_record),
1972 bf_get(lpfc_fcf_record_mac_3, fcf_record),
1973 bf_get(lpfc_fcf_record_mac_4, fcf_record),
1974 bf_get(lpfc_fcf_record_mac_5, fcf_record),
1975 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1976 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1977 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1978 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1979 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1980 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1981 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1982 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1983 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1984 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1985 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1986 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1987 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1988 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1989 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1990 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1991 next_fcf_index);
1992}
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007static bool
2008lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
2009 struct lpfc_fcf_rec *fcf_rec,
2010 struct fcf_record *new_fcf_record,
2011 uint16_t new_vlan_id)
2012{
2013 if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
2014 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
2015 return false;
2016 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
2017 return false;
2018 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
2019 return false;
2020 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
2021 return false;
2022 if (fcf_rec->priority != new_fcf_record->fip_priority)
2023 return false;
2024 return true;
2025}
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
2039{
2040 struct lpfc_hba *phba = vport->phba;
2041 int rc;
2042
2043 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
2044 spin_lock_irq(&phba->hbalock);
2045 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2046 spin_unlock_irq(&phba->hbalock);
2047 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2048 "2872 Devloss tmo with no eligible "
2049 "FCF, unregister in-use FCF (x%x) "
2050 "and rescan FCF table\n",
2051 phba->fcf.current_rec.fcf_indx);
2052 lpfc_unregister_fcf_rescan(phba);
2053 goto stop_flogi_current_fcf;
2054 }
2055
2056 phba->hba_flag &= ~FCF_RR_INPROG;
2057
2058 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
2059 spin_unlock_irq(&phba->hbalock);
2060 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2061 "2865 No FCF available, stop roundrobin FCF "
2062 "failover and change port state:x%x/x%x\n",
2063 phba->pport->port_state, LPFC_VPORT_UNKNOWN);
2064 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2065
2066 if (!phba->fcf.fcf_redisc_attempted) {
2067 lpfc_unregister_fcf(phba);
2068
2069 rc = lpfc_sli4_redisc_fcf_table(phba);
2070 if (!rc) {
2071 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2072 "3195 Rediscover FCF table\n");
2073 phba->fcf.fcf_redisc_attempted = 1;
2074 lpfc_sli4_clear_fcf_rr_bmask(phba);
2075 } else {
2076 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2077 "3196 Rediscover FCF table "
2078 "failed. Status:x%x\n", rc);
2079 }
2080 } else {
2081 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2082 "3197 Already rediscover FCF table "
2083 "attempted. No more retry\n");
2084 }
2085 goto stop_flogi_current_fcf;
2086 } else {
2087 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
2088 "2794 Try FLOGI roundrobin FCF failover to "
2089 "(x%x)\n", fcf_index);
2090 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
2091 if (rc)
2092 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
2093 "2761 FLOGI roundrobin FCF failover "
2094 "failed (rc:x%x) to read FCF (x%x)\n",
2095 rc, phba->fcf.current_rec.fcf_indx);
2096 else
2097 goto stop_flogi_current_fcf;
2098 }
2099 return 0;
2100
2101stop_flogi_current_fcf:
2102 lpfc_can_disctmo(vport);
2103 return 1;
2104}
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
2116 uint16_t fcf_index)
2117{
2118 struct lpfc_fcf_pri *new_fcf_pri;
2119
2120 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2121 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2122 "3058 deleting idx x%x pri x%x flg x%x\n",
2123 fcf_index, new_fcf_pri->fcf_rec.priority,
2124 new_fcf_pri->fcf_rec.flag);
2125 spin_lock_irq(&phba->hbalock);
2126 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
2127 if (phba->fcf.current_rec.priority ==
2128 new_fcf_pri->fcf_rec.priority)
2129 phba->fcf.eligible_fcf_cnt--;
2130 list_del_init(&new_fcf_pri->list);
2131 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2132 }
2133 spin_unlock_irq(&phba->hbalock);
2134}
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146void
2147lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2148{
2149 struct lpfc_fcf_pri *new_fcf_pri;
2150 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2151 spin_lock_irq(&phba->hbalock);
2152 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2153 spin_unlock_irq(&phba->hbalock);
2154}
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
2173 uint16_t fcf_index,
2174 struct fcf_record *new_fcf_record)
2175{
2176 uint16_t current_fcf_pri;
2177 uint16_t last_index;
2178 struct lpfc_fcf_pri *fcf_pri;
2179 struct lpfc_fcf_pri *next_fcf_pri;
2180 struct lpfc_fcf_pri *new_fcf_pri;
2181 int ret;
2182
2183 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2184 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2185 "3059 adding idx x%x pri x%x flg x%x\n",
2186 fcf_index, new_fcf_record->fip_priority,
2187 new_fcf_pri->fcf_rec.flag);
2188 spin_lock_irq(&phba->hbalock);
2189 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2190 list_del_init(&new_fcf_pri->list);
2191 new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2192 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2193 if (list_empty(&phba->fcf.fcf_pri_list)) {
2194 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2195 ret = lpfc_sli4_fcf_rr_index_set(phba,
2196 new_fcf_pri->fcf_rec.fcf_index);
2197 goto out;
2198 }
2199
2200 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2201 LPFC_SLI4_FCF_TBL_INDX_MAX);
2202 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2203 ret = 0;
2204 goto out;
2205 }
2206 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2207 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2208 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2209 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2210 memset(phba->fcf.fcf_rr_bmask, 0,
2211 sizeof(*phba->fcf.fcf_rr_bmask));
2212
2213 phba->fcf.eligible_fcf_cnt = 1;
2214 } else
2215
2216 phba->fcf.eligible_fcf_cnt++;
2217 ret = lpfc_sli4_fcf_rr_index_set(phba,
2218 new_fcf_pri->fcf_rec.fcf_index);
2219 goto out;
2220 }
2221
2222 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2223 &phba->fcf.fcf_pri_list, list) {
2224 if (new_fcf_pri->fcf_rec.priority <=
2225 fcf_pri->fcf_rec.priority) {
2226 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2227 list_add(&new_fcf_pri->list,
2228 &phba->fcf.fcf_pri_list);
2229 else
2230 list_add(&new_fcf_pri->list,
2231 &((struct lpfc_fcf_pri *)
2232 fcf_pri->list.prev)->list);
2233 ret = 0;
2234 goto out;
2235 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2236 || new_fcf_pri->fcf_rec.priority <
2237 next_fcf_pri->fcf_rec.priority) {
2238 list_add(&new_fcf_pri->list, &fcf_pri->list);
2239 ret = 0;
2240 goto out;
2241 }
2242 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2243 continue;
2244
2245 }
2246 ret = 1;
2247out:
2248
2249 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2250 spin_unlock_irq(&phba->hbalock);
2251 return ret;
2252}
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269void
2270lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2271{
2272 struct fcf_record *new_fcf_record;
2273 uint32_t boot_flag, addr_mode;
2274 uint16_t fcf_index, next_fcf_index;
2275 struct lpfc_fcf_rec *fcf_rec = NULL;
2276 uint16_t vlan_id = LPFC_FCOE_NULL_VID;
2277 bool select_new_fcf;
2278 int rc;
2279
2280
2281 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
2282 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2283 return;
2284 }
2285
2286
2287 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2288 &next_fcf_index);
2289 if (!new_fcf_record) {
2290 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2291 "2765 Mailbox command READ_FCF_RECORD "
2292 "failed to retrieve a FCF record.\n");
2293
2294 spin_lock_irq(&phba->hbalock);
2295 phba->hba_flag &= ~FCF_TS_INPROG;
2296 spin_unlock_irq(&phba->hbalock);
2297 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2298 return;
2299 }
2300
2301
2302 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2303 &addr_mode, &vlan_id);
2304
2305
2306 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2307 next_fcf_index);
2308
2309
2310
2311
2312
2313
2314 if (!rc) {
2315 lpfc_sli4_fcf_pri_list_del(phba,
2316 bf_get(lpfc_fcf_record_fcf_index,
2317 new_fcf_record));
2318 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2319 "2781 FCF (x%x) failed connection "
2320 "list check: (x%x/x%x/%x)\n",
2321 bf_get(lpfc_fcf_record_fcf_index,
2322 new_fcf_record),
2323 bf_get(lpfc_fcf_record_fcf_avail,
2324 new_fcf_record),
2325 bf_get(lpfc_fcf_record_fcf_valid,
2326 new_fcf_record),
2327 bf_get(lpfc_fcf_record_fcf_sol,
2328 new_fcf_record));
2329 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2330 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2331 new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
2332 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2333 phba->fcf.current_rec.fcf_indx) {
2334 lpfc_printf_log(phba, KERN_ERR,
2335 LOG_TRACE_EVENT,
2336 "2862 FCF (x%x) matches property "
2337 "of in-use FCF (x%x)\n",
2338 bf_get(lpfc_fcf_record_fcf_index,
2339 new_fcf_record),
2340 phba->fcf.current_rec.fcf_indx);
2341 goto read_next_fcf;
2342 }
2343
2344
2345
2346
2347
2348
2349 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2350 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2351 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2352 "2835 Invalid in-use FCF "
2353 "(x%x), enter FCF failover "
2354 "table scan.\n",
2355 phba->fcf.current_rec.fcf_indx);
2356 spin_lock_irq(&phba->hbalock);
2357 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2358 spin_unlock_irq(&phba->hbalock);
2359 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2360 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2361 LPFC_FCOE_FCF_GET_FIRST);
2362 return;
2363 }
2364 }
2365 goto read_next_fcf;
2366 } else {
2367 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2368 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2369 new_fcf_record);
2370 if (rc)
2371 goto read_next_fcf;
2372 }
2373
2374
2375
2376
2377
2378
2379
2380 spin_lock_irq(&phba->hbalock);
2381 if (phba->fcf.fcf_flag & FCF_IN_USE) {
2382 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2383 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2384 new_fcf_record, vlan_id)) {
2385 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2386 phba->fcf.current_rec.fcf_indx) {
2387 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2388 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2389
2390 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2391 phba);
2392 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2393
2394 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2395 spin_unlock_irq(&phba->hbalock);
2396 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2397 "2836 New FCF matches in-use "
2398 "FCF (x%x), port_state:x%x, "
2399 "fc_flag:x%x\n",
2400 phba->fcf.current_rec.fcf_indx,
2401 phba->pport->port_state,
2402 phba->pport->fc_flag);
2403 goto out;
2404 } else
2405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2406 "2863 New FCF (x%x) matches "
2407 "property of in-use FCF (x%x)\n",
2408 bf_get(lpfc_fcf_record_fcf_index,
2409 new_fcf_record),
2410 phba->fcf.current_rec.fcf_indx);
2411 }
2412
2413
2414
2415
2416
2417
2418
2419 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2420 spin_unlock_irq(&phba->hbalock);
2421 goto read_next_fcf;
2422 }
2423 }
2424
2425
2426
2427
2428 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2429 fcf_rec = &phba->fcf.failover_rec;
2430 else
2431 fcf_rec = &phba->fcf.current_rec;
2432
2433 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2434
2435
2436
2437
2438
2439 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2440
2441 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2442 "2837 Update current FCF record "
2443 "(x%x) with new FCF record (x%x)\n",
2444 fcf_rec->fcf_indx,
2445 bf_get(lpfc_fcf_record_fcf_index,
2446 new_fcf_record));
2447 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2448 addr_mode, vlan_id, BOOT_ENABLE);
2449 spin_unlock_irq(&phba->hbalock);
2450 goto read_next_fcf;
2451 }
2452
2453
2454
2455
2456
2457 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
2458 spin_unlock_irq(&phba->hbalock);
2459 goto read_next_fcf;
2460 }
2461
2462
2463
2464
2465 if (new_fcf_record->fip_priority < fcf_rec->priority) {
2466
2467 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2468 "2838 Update current FCF record "
2469 "(x%x) with new FCF record (x%x)\n",
2470 fcf_rec->fcf_indx,
2471 bf_get(lpfc_fcf_record_fcf_index,
2472 new_fcf_record));
2473 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2474 addr_mode, vlan_id, 0);
2475
2476 phba->fcf.eligible_fcf_cnt = 1;
2477 } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2478
2479 phba->fcf.eligible_fcf_cnt++;
2480 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2481 phba->fcf.eligible_fcf_cnt);
2482 if (select_new_fcf) {
2483 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2484 "2839 Update current FCF record "
2485 "(x%x) with new FCF record (x%x)\n",
2486 fcf_rec->fcf_indx,
2487 bf_get(lpfc_fcf_record_fcf_index,
2488 new_fcf_record));
2489
2490 __lpfc_update_fcf_record(phba, fcf_rec,
2491 new_fcf_record,
2492 addr_mode, vlan_id, 0);
2493 }
2494 }
2495 spin_unlock_irq(&phba->hbalock);
2496 goto read_next_fcf;
2497 }
2498
2499
2500
2501
2502 if (fcf_rec) {
2503 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2504 "2840 Update initial FCF candidate "
2505 "with FCF (x%x)\n",
2506 bf_get(lpfc_fcf_record_fcf_index,
2507 new_fcf_record));
2508 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2509 addr_mode, vlan_id, (boot_flag ?
2510 BOOT_ENABLE : 0));
2511 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2512
2513 phba->fcf.eligible_fcf_cnt = 1;
2514 }
2515 spin_unlock_irq(&phba->hbalock);
2516 goto read_next_fcf;
2517
2518read_next_fcf:
2519 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2520 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2521 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2522
2523
2524
2525
2526
2527
2528
2529
2530 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2531 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2532 "2782 No suitable FCF found: "
2533 "(x%x/x%x)\n",
2534 phba->fcoe_eventtag_at_fcf_scan,
2535 bf_get(lpfc_fcf_record_fcf_index,
2536 new_fcf_record));
2537 spin_lock_irq(&phba->hbalock);
2538 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2539 phba->hba_flag &= ~FCF_TS_INPROG;
2540 spin_unlock_irq(&phba->hbalock);
2541
2542 lpfc_printf_log(phba, KERN_INFO,
2543 LOG_FIP,
2544 "2864 On devloss tmo "
2545 "unreg in-use FCF and "
2546 "rescan FCF table\n");
2547 lpfc_unregister_fcf_rescan(phba);
2548 return;
2549 }
2550
2551
2552
2553 phba->hba_flag &= ~FCF_TS_INPROG;
2554 spin_unlock_irq(&phba->hbalock);
2555 return;
2556 }
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567 lpfc_unregister_fcf(phba);
2568
2569
2570 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2571 "2842 Replace in-use FCF (x%x) "
2572 "with failover FCF (x%x)\n",
2573 phba->fcf.current_rec.fcf_indx,
2574 phba->fcf.failover_rec.fcf_indx);
2575 memcpy(&phba->fcf.current_rec,
2576 &phba->fcf.failover_rec,
2577 sizeof(struct lpfc_fcf_rec));
2578
2579
2580
2581
2582
2583 spin_lock_irq(&phba->hbalock);
2584 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2585 spin_unlock_irq(&phba->hbalock);
2586
2587 lpfc_register_fcf(phba);
2588 } else {
2589
2590
2591
2592
2593 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2594 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2595 return;
2596
2597 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2598 phba->fcf.fcf_flag & FCF_IN_USE) {
2599
2600
2601
2602
2603
2604
2605 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2606 "2841 In-use FCF record (x%x) "
2607 "not reported, entering fast "
2608 "FCF failover mode scanning.\n",
2609 phba->fcf.current_rec.fcf_indx);
2610 spin_lock_irq(&phba->hbalock);
2611 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2612 spin_unlock_irq(&phba->hbalock);
2613 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2614 LPFC_FCOE_FCF_GET_FIRST);
2615 return;
2616 }
2617
2618 lpfc_register_fcf(phba);
2619 }
2620 } else
2621 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
2622 return;
2623
2624out:
2625 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2626 lpfc_register_fcf(phba);
2627
2628 return;
2629}
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646void
2647lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2648{
2649 struct fcf_record *new_fcf_record;
2650 uint32_t boot_flag, addr_mode;
2651 uint16_t next_fcf_index, fcf_index;
2652 uint16_t current_fcf_index;
2653 uint16_t vlan_id;
2654 int rc;
2655
2656
2657 if (phba->link_state < LPFC_LINK_UP) {
2658 spin_lock_irq(&phba->hbalock);
2659 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2660 phba->hba_flag &= ~FCF_RR_INPROG;
2661 spin_unlock_irq(&phba->hbalock);
2662 goto out;
2663 }
2664
2665
2666 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2667 &next_fcf_index);
2668 if (!new_fcf_record) {
2669 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2670 "2766 Mailbox command READ_FCF_RECORD "
2671 "failed to retrieve a FCF record. "
2672 "hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
2673 phba->fcf.fcf_flag);
2674 lpfc_unregister_fcf_rescan(phba);
2675 goto out;
2676 }
2677
2678
2679 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2680 &addr_mode, &vlan_id);
2681
2682
2683 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2684 next_fcf_index);
2685
2686 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2687 if (!rc) {
2688 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2689 "2848 Remove ineligible FCF (x%x) from "
2690 "from roundrobin bmask\n", fcf_index);
2691
2692 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2693
2694 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2695 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2696 if (rc)
2697 goto out;
2698 goto error_out;
2699 }
2700
2701 if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2702 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2703 "2760 Perform FLOGI roundrobin FCF failover: "
2704 "FCF (x%x) back to FCF (x%x)\n",
2705 phba->fcf.current_rec.fcf_indx, fcf_index);
2706
2707 msleep(500);
2708 lpfc_issue_init_vfi(phba->pport);
2709 goto out;
2710 }
2711
2712
2713 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2714 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2715 phba->fcf.failover_rec.fcf_indx, fcf_index);
2716 spin_lock_irq(&phba->hbalock);
2717 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2718 new_fcf_record, addr_mode, vlan_id,
2719 (boot_flag ? BOOT_ENABLE : 0));
2720 spin_unlock_irq(&phba->hbalock);
2721
2722 current_fcf_index = phba->fcf.current_rec.fcf_indx;
2723
2724
2725 lpfc_unregister_fcf(phba);
2726
2727
2728 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
2729 sizeof(struct lpfc_fcf_rec));
2730
2731 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2732 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2733 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2734
2735error_out:
2736 lpfc_register_fcf(phba);
2737out:
2738 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2739}
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752void
2753lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2754{
2755 struct fcf_record *new_fcf_record;
2756 uint32_t boot_flag, addr_mode;
2757 uint16_t fcf_index, next_fcf_index;
2758 uint16_t vlan_id;
2759 int rc;
2760
2761
2762 if (phba->link_state < LPFC_LINK_UP)
2763 goto out;
2764
2765
2766 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
2767 goto out;
2768
2769
2770 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2771 &next_fcf_index);
2772 if (!new_fcf_record) {
2773 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2774 "2767 Mailbox command READ_FCF_RECORD "
2775 "failed to retrieve a FCF record.\n");
2776 goto out;
2777 }
2778
2779
2780 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2781 &addr_mode, &vlan_id);
2782
2783
2784 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2785 next_fcf_index);
2786
2787 if (!rc)
2788 goto out;
2789
2790
2791 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2792
2793 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2794
2795out:
2796 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2797}
2798
2799
2800
2801
2802
2803
2804
2805
2806static void
2807lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2808{
2809 struct lpfc_vport *vport = mboxq->vport;
2810
2811
2812
2813
2814
2815 if (mboxq->u.mb.mbxStatus &&
2816 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2817 LPFC_SLI_INTF_IF_TYPE_0) &&
2818 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2819 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2820 "2891 Init VFI mailbox failed 0x%x\n",
2821 mboxq->u.mb.mbxStatus);
2822 mempool_free(mboxq, phba->mbox_mem_pool);
2823 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2824 return;
2825 }
2826
2827 lpfc_initial_flogi(vport);
2828 mempool_free(mboxq, phba->mbox_mem_pool);
2829 return;
2830}
2831
2832
2833
2834
2835
2836
2837
2838
2839void
2840lpfc_issue_init_vfi(struct lpfc_vport *vport)
2841{
2842 LPFC_MBOXQ_t *mboxq;
2843 int rc;
2844 struct lpfc_hba *phba = vport->phba;
2845
2846 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2847 if (!mboxq) {
2848 lpfc_printf_vlog(vport, KERN_ERR,
2849 LOG_TRACE_EVENT, "2892 Failed to allocate "
2850 "init_vfi mailbox\n");
2851 return;
2852 }
2853 lpfc_init_vfi(mboxq, vport);
2854 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
2855 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
2856 if (rc == MBX_NOT_FINISHED) {
2857 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2858 "2893 Failed to issue init_vfi mailbox\n");
2859 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2860 }
2861}
2862
2863
2864
2865
2866
2867
2868
2869
2870void
2871lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2872{
2873 struct lpfc_vport *vport = mboxq->vport;
2874 struct lpfc_nodelist *ndlp;
2875 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2876
2877 if (mboxq->u.mb.mbxStatus) {
2878 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2879 "2609 Init VPI mailbox failed 0x%x\n",
2880 mboxq->u.mb.mbxStatus);
2881 mempool_free(mboxq, phba->mbox_mem_pool);
2882 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2883 return;
2884 }
2885 spin_lock_irq(shost->host_lock);
2886 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2887 spin_unlock_irq(shost->host_lock);
2888
2889
2890 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2891 ndlp = lpfc_findnode_did(vport, Fabric_DID);
2892 if (!ndlp)
2893 lpfc_printf_vlog(vport, KERN_ERR,
2894 LOG_TRACE_EVENT,
2895 "2731 Cannot find fabric "
2896 "controller node\n");
2897 else
2898 lpfc_register_new_vport(phba, vport, ndlp);
2899 mempool_free(mboxq, phba->mbox_mem_pool);
2900 return;
2901 }
2902
2903 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2904 lpfc_initial_fdisc(vport);
2905 else {
2906 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
2907 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2908 "2606 No NPIV Fabric support\n");
2909 }
2910 mempool_free(mboxq, phba->mbox_mem_pool);
2911 return;
2912}
2913
2914
2915
2916
2917
2918
2919
2920
2921void
2922lpfc_issue_init_vpi(struct lpfc_vport *vport)
2923{
2924 LPFC_MBOXQ_t *mboxq;
2925 int rc, vpi;
2926
2927 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
2928 vpi = lpfc_alloc_vpi(vport->phba);
2929 if (!vpi) {
2930 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2931 "3303 Failed to obtain vport vpi\n");
2932 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2933 return;
2934 }
2935 vport->vpi = vpi;
2936 }
2937
2938 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2939 if (!mboxq) {
2940 lpfc_printf_vlog(vport, KERN_ERR,
2941 LOG_TRACE_EVENT, "2607 Failed to allocate "
2942 "init_vpi mailbox\n");
2943 return;
2944 }
2945 lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2946 mboxq->vport = vport;
2947 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2948 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2949 if (rc == MBX_NOT_FINISHED) {
2950 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2951 "2608 Failed to issue init_vpi mailbox\n");
2952 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2953 }
2954}
2955
2956
2957
2958
2959
2960
2961
2962
2963void
2964lpfc_start_fdiscs(struct lpfc_hba *phba)
2965{
2966 struct lpfc_vport **vports;
2967 int i;
2968
2969 vports = lpfc_create_vport_work_array(phba);
2970 if (vports != NULL) {
2971 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2972 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
2973 continue;
2974
2975 if (vports[i]->vpi > phba->max_vpi) {
2976 lpfc_vport_set_state(vports[i],
2977 FC_VPORT_FAILED);
2978 continue;
2979 }
2980 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2981 lpfc_vport_set_state(vports[i],
2982 FC_VPORT_LINKDOWN);
2983 continue;
2984 }
2985 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
2986 lpfc_issue_init_vpi(vports[i]);
2987 continue;
2988 }
2989 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2990 lpfc_initial_fdisc(vports[i]);
2991 else {
2992 lpfc_vport_set_state(vports[i],
2993 FC_VPORT_NO_FABRIC_SUPP);
2994 lpfc_printf_vlog(vports[i], KERN_ERR,
2995 LOG_TRACE_EVENT,
2996 "0259 No NPIV "
2997 "Fabric support\n");
2998 }
2999 }
3000 }
3001 lpfc_destroy_vport_work_array(phba, vports);
3002}
3003
3004void
3005lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
3006{
3007 struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
3008 struct lpfc_vport *vport = mboxq->vport;
3009 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3010
3011
3012
3013
3014
3015 if (mboxq->u.mb.mbxStatus &&
3016 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3017 LPFC_SLI_INTF_IF_TYPE_0) &&
3018 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
3019 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3020 "2018 REG_VFI mbxStatus error x%x "
3021 "HBA state x%x\n",
3022 mboxq->u.mb.mbxStatus, vport->port_state);
3023 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3024
3025 lpfc_disc_list_loopmap(vport);
3026
3027 lpfc_disc_start(vport);
3028 goto out_free_mem;
3029 }
3030 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3031 goto out_free_mem;
3032 }
3033
3034
3035
3036
3037
3038 if (vport->fc_flag & FC_VFI_REGISTERED)
3039 if (!(phba->sli_rev == LPFC_SLI_REV4 &&
3040 vport->fc_flag & FC_PT2PT))
3041 goto out_free_mem;
3042
3043
3044 spin_lock_irq(shost->host_lock);
3045 vport->vpi_state |= LPFC_VPI_REGISTERED;
3046 vport->fc_flag |= FC_VFI_REGISTERED;
3047 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3048 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
3049 spin_unlock_irq(shost->host_lock);
3050
3051
3052 if ((phba->sli_rev == LPFC_SLI_REV4) &&
3053 (phba->link_flag & LS_LOOPBACK_MODE)) {
3054 phba->link_state = LPFC_HBA_READY;
3055 goto out_free_mem;
3056 }
3057
3058 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3059 "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x "
3060 "alpacnt:%d LinkState:%x topology:%x\n",
3061 vport->port_state, vport->fc_flag, vport->fc_myDID,
3062 vport->phba->alpa_map[0],
3063 phba->link_state, phba->fc_topology);
3064
3065 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3066
3067
3068
3069
3070 if ((vport->fc_flag & FC_PT2PT) ||
3071 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
3072 !(vport->fc_flag & FC_PUBLIC_LOOP))) {
3073
3074
3075 lpfc_disc_list_loopmap(vport);
3076
3077 if (vport->fc_flag & FC_PT2PT)
3078 vport->port_state = LPFC_VPORT_READY;
3079 else
3080 lpfc_disc_start(vport);
3081 } else {
3082 lpfc_start_fdiscs(phba);
3083 lpfc_do_scr_ns_plogi(phba, vport);
3084 }
3085 }
3086
3087out_free_mem:
3088 mempool_free(mboxq, phba->mbox_mem_pool);
3089 if (dmabuf) {
3090 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
3091 kfree(dmabuf);
3092 }
3093 return;
3094}
3095
3096static void
3097lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3098{
3099 MAILBOX_t *mb = &pmb->u.mb;
3100 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3101 struct lpfc_vport *vport = pmb->vport;
3102 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3103 struct serv_parm *sp = &vport->fc_sparam;
3104 uint32_t ed_tov;
3105
3106
3107 if (mb->mbxStatus) {
3108
3109 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3110 "0319 READ_SPARAM mbxStatus error x%x "
3111 "hba state x%x>\n",
3112 mb->mbxStatus, vport->port_state);
3113 lpfc_linkdown(phba);
3114 goto out;
3115 }
3116
3117 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
3118 sizeof (struct serv_parm));
3119
3120 ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
3121 if (sp->cmn.edtovResolution)
3122 ed_tov = (ed_tov + 999999) / 1000000;
3123
3124 phba->fc_edtov = ed_tov;
3125 phba->fc_ratov = (2 * ed_tov) / 1000;
3126 if (phba->fc_ratov < FF_DEF_RATOV) {
3127
3128 phba->fc_ratov = FF_DEF_RATOV;
3129 }
3130
3131 lpfc_update_vport_wwn(vport);
3132 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3133 if (vport->port_type == LPFC_PHYSICAL_PORT) {
3134 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
3135 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
3136 }
3137
3138 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3139 kfree(mp);
3140 mempool_free(pmb, phba->mbox_mem_pool);
3141
3142
3143
3144
3145 if (phba->hba_flag & HBA_DEFER_FLOGI) {
3146 lpfc_initial_flogi(vport);
3147 phba->hba_flag &= ~HBA_DEFER_FLOGI;
3148 }
3149 return;
3150
3151out:
3152 pmb->ctx_buf = NULL;
3153 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3154 kfree(mp);
3155 lpfc_issue_clear_la(phba, vport);
3156 mempool_free(pmb, phba->mbox_mem_pool);
3157 return;
3158}
3159
3160static void
3161lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3162{
3163 struct lpfc_vport *vport = phba->pport;
3164 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
3165 struct Scsi_Host *shost;
3166 int i;
3167 struct lpfc_dmabuf *mp;
3168 int rc;
3169 struct fcf_record *fcf_record;
3170 uint32_t fc_flags = 0;
3171 unsigned long iflags;
3172
3173 spin_lock_irqsave(&phba->hbalock, iflags);
3174 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
3175
3176 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3177 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
3178 case LPFC_LINK_SPEED_1GHZ:
3179 case LPFC_LINK_SPEED_2GHZ:
3180 case LPFC_LINK_SPEED_4GHZ:
3181 case LPFC_LINK_SPEED_8GHZ:
3182 case LPFC_LINK_SPEED_10GHZ:
3183 case LPFC_LINK_SPEED_16GHZ:
3184 case LPFC_LINK_SPEED_32GHZ:
3185 case LPFC_LINK_SPEED_64GHZ:
3186 case LPFC_LINK_SPEED_128GHZ:
3187 break;
3188 default:
3189 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
3190 break;
3191 }
3192 }
3193
3194 if (phba->fc_topology &&
3195 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
3196 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3197 "3314 Toplogy changed was 0x%x is 0x%x\n",
3198 phba->fc_topology,
3199 bf_get(lpfc_mbx_read_top_topology, la));
3200 phba->fc_topology_changed = 1;
3201 }
3202
3203 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
3204 phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA);
3205
3206 shost = lpfc_shost_from_vport(vport);
3207 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3208 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
3209
3210
3211
3212
3213 if (phba->cfg_enable_npiv && phba->max_vpi)
3214 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3215 "1309 Link Up Event npiv not supported in loop "
3216 "topology\n");
3217
3218 if (bf_get(lpfc_mbx_read_top_il, la))
3219 fc_flags |= FC_LBIT;
3220
3221 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3222 i = la->lilpBde64.tus.f.bdeSize;
3223
3224 if (i == 0) {
3225 phba->alpa_map[0] = 0;
3226 } else {
3227 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
3228 int numalpa, j, k;
3229 union {
3230 uint8_t pamap[16];
3231 struct {
3232 uint32_t wd1;
3233 uint32_t wd2;
3234 uint32_t wd3;
3235 uint32_t wd4;
3236 } pa;
3237 } un;
3238 numalpa = phba->alpa_map[0];
3239 j = 0;
3240 while (j < numalpa) {
3241 memset(un.pamap, 0, 16);
3242 for (k = 1; j < numalpa; k++) {
3243 un.pamap[k - 1] =
3244 phba->alpa_map[j + 1];
3245 j++;
3246 if (k == 16)
3247 break;
3248 }
3249
3250 lpfc_printf_log(phba,
3251 KERN_WARNING,
3252 LOG_LINK_EVENT,
3253 "1304 Link Up Event "
3254 "ALPA map Data: x%x "
3255 "x%x x%x x%x\n",
3256 un.pa.wd1, un.pa.wd2,
3257 un.pa.wd3, un.pa.wd4);
3258 }
3259 }
3260 }
3261 } else {
3262 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3263 if (phba->max_vpi && phba->cfg_enable_npiv &&
3264 (phba->sli_rev >= LPFC_SLI_REV3))
3265 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3266 }
3267 vport->fc_myDID = phba->fc_pref_DID;
3268 fc_flags |= FC_LBIT;
3269 }
3270 spin_unlock_irqrestore(&phba->hbalock, iflags);
3271
3272 if (fc_flags) {
3273 spin_lock_irqsave(shost->host_lock, iflags);
3274 vport->fc_flag |= fc_flags;
3275 spin_unlock_irqrestore(shost->host_lock, iflags);
3276 }
3277
3278 lpfc_linkup(phba);
3279 sparam_mbox = NULL;
3280
3281 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3282 if (!sparam_mbox)
3283 goto out;
3284
3285 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3286 if (rc) {
3287 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3288 goto out;
3289 }
3290 sparam_mbox->vport = vport;
3291 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3292 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3293 if (rc == MBX_NOT_FINISHED) {
3294 mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
3295 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3296 kfree(mp);
3297 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3298 goto out;
3299 }
3300
3301 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3302 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3303 if (!cfglink_mbox)
3304 goto out;
3305 vport->port_state = LPFC_LOCAL_CFG_LINK;
3306 lpfc_config_link(phba, cfglink_mbox);
3307 cfglink_mbox->vport = vport;
3308 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3309 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3310 if (rc == MBX_NOT_FINISHED) {
3311 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3312 goto out;
3313 }
3314 } else {
3315 vport->port_state = LPFC_VPORT_UNKNOWN;
3316
3317
3318
3319
3320
3321 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
3322 fcf_record = kzalloc(sizeof(struct fcf_record),
3323 GFP_KERNEL);
3324 if (unlikely(!fcf_record)) {
3325 lpfc_printf_log(phba, KERN_ERR,
3326 LOG_TRACE_EVENT,
3327 "2554 Could not allocate memory for "
3328 "fcf record\n");
3329 rc = -ENODEV;
3330 goto out;
3331 }
3332
3333 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3334 LPFC_FCOE_FCF_DEF_INDEX);
3335 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3336 if (unlikely(rc)) {
3337 lpfc_printf_log(phba, KERN_ERR,
3338 LOG_TRACE_EVENT,
3339 "2013 Could not manually add FCF "
3340 "record 0, status %d\n", rc);
3341 rc = -ENODEV;
3342 kfree(fcf_record);
3343 goto out;
3344 }
3345 kfree(fcf_record);
3346 }
3347
3348
3349
3350
3351 spin_lock_irqsave(&phba->hbalock, iflags);
3352 if (phba->hba_flag & FCF_TS_INPROG) {
3353 spin_unlock_irqrestore(&phba->hbalock, iflags);
3354 return;
3355 }
3356
3357 phba->fcf.fcf_flag |= FCF_INIT_DISC;
3358 spin_unlock_irqrestore(&phba->hbalock, iflags);
3359 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3360 "2778 Start FCF table scan at linkup\n");
3361 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3362 LPFC_FCOE_FCF_GET_FIRST);
3363 if (rc) {
3364 spin_lock_irqsave(&phba->hbalock, iflags);
3365 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3366 spin_unlock_irqrestore(&phba->hbalock, iflags);
3367 goto out;
3368 }
3369
3370 lpfc_sli4_clear_fcf_rr_bmask(phba);
3371 }
3372
3373
3374 memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
3375 scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
3376 init_utsname()->nodename);
3377 return;
3378out:
3379 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3380 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3381 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
3382 vport->port_state, sparam_mbox, cfglink_mbox);
3383 lpfc_issue_clear_la(phba, vport);
3384 return;
3385}
3386
3387static void
3388lpfc_enable_la(struct lpfc_hba *phba)
3389{
3390 uint32_t control;
3391 struct lpfc_sli *psli = &phba->sli;
3392 spin_lock_irq(&phba->hbalock);
3393 psli->sli_flag |= LPFC_PROCESS_LA;
3394 if (phba->sli_rev <= LPFC_SLI_REV3) {
3395 control = readl(phba->HCregaddr);
3396 control |= HC_LAINT_ENA;
3397 writel(control, phba->HCregaddr);
3398 readl(phba->HCregaddr);
3399 }
3400 spin_unlock_irq(&phba->hbalock);
3401}
3402
3403static void
3404lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3405{
3406 lpfc_linkdown(phba);
3407 lpfc_enable_la(phba);
3408 lpfc_unregister_unused_fcf(phba);
3409
3410}
3411
3412
3413
3414
3415
3416
3417
3418
3419void
3420lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3421{
3422 struct lpfc_vport *vport = pmb->vport;
3423 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3424 struct lpfc_mbx_read_top *la;
3425 struct lpfc_sli_ring *pring;
3426 MAILBOX_t *mb = &pmb->u.mb;
3427 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3428 uint8_t attn_type;
3429 unsigned long iflags;
3430
3431
3432 pring = lpfc_phba_elsring(phba);
3433 if (pring)
3434 pring->flag &= ~LPFC_STOP_IOCB_EVENT;
3435
3436
3437 if (mb->mbxStatus) {
3438 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3439 "1307 READ_LA mbox error x%x state x%x\n",
3440 mb->mbxStatus, vport->port_state);
3441 lpfc_mbx_issue_link_down(phba);
3442 phba->link_state = LPFC_HBA_ERROR;
3443 goto lpfc_mbx_cmpl_read_topology_free_mbuf;
3444 }
3445
3446 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3447 attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
3448
3449 memcpy(&phba->alpa_map[0], mp->virt, 128);
3450
3451 spin_lock_irqsave(shost->host_lock, iflags);
3452 if (bf_get(lpfc_mbx_read_top_pb, la))
3453 vport->fc_flag |= FC_BYPASSED_MODE;
3454 else
3455 vport->fc_flag &= ~FC_BYPASSED_MODE;
3456 spin_unlock_irqrestore(shost->host_lock, iflags);
3457
3458 if (phba->fc_eventTag <= la->eventTag) {
3459 phba->fc_stat.LinkMultiEvent++;
3460 if (attn_type == LPFC_ATT_LINK_UP)
3461 if (phba->fc_eventTag != 0)
3462 lpfc_linkdown(phba);
3463 }
3464
3465 phba->fc_eventTag = la->eventTag;
3466 if (phba->sli_rev < LPFC_SLI_REV4) {
3467 spin_lock_irqsave(&phba->hbalock, iflags);
3468 if (bf_get(lpfc_mbx_read_top_mm, la))
3469 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3470 else
3471 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3472 spin_unlock_irqrestore(&phba->hbalock, iflags);
3473 }
3474
3475 phba->link_events++;
3476 if ((attn_type == LPFC_ATT_LINK_UP) &&
3477 !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
3478 phba->fc_stat.LinkUp++;
3479 if (phba->link_flag & LS_LOOPBACK_MODE) {
3480 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3481 "1306 Link Up Event in loop back mode "
3482 "x%x received Data: x%x x%x x%x x%x\n",
3483 la->eventTag, phba->fc_eventTag,
3484 bf_get(lpfc_mbx_read_top_alpa_granted,
3485 la),
3486 bf_get(lpfc_mbx_read_top_link_spd, la),
3487 phba->alpa_map[0]);
3488 } else {
3489 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3490 "1303 Link Up Event x%x received "
3491 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3492 la->eventTag, phba->fc_eventTag,
3493 bf_get(lpfc_mbx_read_top_alpa_granted,
3494 la),
3495 bf_get(lpfc_mbx_read_top_link_spd, la),
3496 phba->alpa_map[0],
3497 bf_get(lpfc_mbx_read_top_mm, la),
3498 bf_get(lpfc_mbx_read_top_fa, la),
3499 phba->wait_4_mlo_maint_flg);
3500 }
3501 lpfc_mbx_process_link_up(phba, la);
3502 } else if (attn_type == LPFC_ATT_LINK_DOWN ||
3503 attn_type == LPFC_ATT_UNEXP_WWPN) {
3504 phba->fc_stat.LinkDown++;
3505 if (phba->link_flag & LS_LOOPBACK_MODE)
3506 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3507 "1308 Link Down Event in loop back mode "
3508 "x%x received "
3509 "Data: x%x x%x x%x\n",
3510 la->eventTag, phba->fc_eventTag,
3511 phba->pport->port_state, vport->fc_flag);
3512 else if (attn_type == LPFC_ATT_UNEXP_WWPN)
3513 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3514 "1313 Link Down Unexpected FA WWPN Event x%x "
3515 "received Data: x%x x%x x%x x%x x%x\n",
3516 la->eventTag, phba->fc_eventTag,
3517 phba->pport->port_state, vport->fc_flag,
3518 bf_get(lpfc_mbx_read_top_mm, la),
3519 bf_get(lpfc_mbx_read_top_fa, la));
3520 else
3521 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3522 "1305 Link Down Event x%x received "
3523 "Data: x%x x%x x%x x%x x%x\n",
3524 la->eventTag, phba->fc_eventTag,
3525 phba->pport->port_state, vport->fc_flag,
3526 bf_get(lpfc_mbx_read_top_mm, la),
3527 bf_get(lpfc_mbx_read_top_fa, la));
3528 lpfc_mbx_issue_link_down(phba);
3529 }
3530 if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
3531 attn_type == LPFC_ATT_LINK_UP) {
3532 if (phba->link_state != LPFC_LINK_DOWN) {
3533 phba->fc_stat.LinkDown++;
3534 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3535 "1312 Link Down Event x%x received "
3536 "Data: x%x x%x x%x\n",
3537 la->eventTag, phba->fc_eventTag,
3538 phba->pport->port_state, vport->fc_flag);
3539 lpfc_mbx_issue_link_down(phba);
3540 } else
3541 lpfc_enable_la(phba);
3542
3543 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3544 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3545 "Data: x%x x%x x%x\n",
3546 la->eventTag, phba->fc_eventTag,
3547 phba->pport->port_state, vport->fc_flag);
3548
3549
3550
3551
3552
3553 if (phba->wait_4_mlo_maint_flg) {
3554 phba->wait_4_mlo_maint_flg = 0;
3555 wake_up_interruptible(&phba->wait_4_mlo_m_q);
3556 }
3557 }
3558
3559 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3560 bf_get(lpfc_mbx_read_top_fa, la)) {
3561 if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
3562 lpfc_issue_clear_la(phba, vport);
3563 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3564 "1311 fa %d\n",
3565 bf_get(lpfc_mbx_read_top_fa, la));
3566 }
3567
3568lpfc_mbx_cmpl_read_topology_free_mbuf:
3569 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3570 kfree(mp);
3571 mempool_free(pmb, phba->mbox_mem_pool);
3572 return;
3573}
3574
3575
3576
3577
3578
3579
3580
3581void
3582lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3583{
3584 struct lpfc_vport *vport = pmb->vport;
3585 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3586 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3587
3588 pmb->ctx_buf = NULL;
3589 pmb->ctx_ndlp = NULL;
3590
3591 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY,
3592 "0002 rpi:%x DID:%x flg:%x %d x%px\n",
3593 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3594 kref_read(&ndlp->kref),
3595 ndlp);
3596 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3597 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3598
3599 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3600 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3601
3602
3603
3604
3605
3606
3607
3608
3609 spin_lock_irq(&ndlp->lock);
3610 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3611 spin_unlock_irq(&ndlp->lock);
3612
3613
3614
3615
3616
3617
3618 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3619 lpfc_unreg_rpi(vport, ndlp);
3620 }
3621
3622
3623 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
3624
3625 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3626 kfree(mp);
3627 mempool_free(pmb, phba->mbox_mem_pool);
3628
3629
3630
3631 lpfc_nlp_put(ndlp);
3632
3633 return;
3634}
3635
3636static void
3637lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3638{
3639 MAILBOX_t *mb = &pmb->u.mb;
3640 struct lpfc_vport *vport = pmb->vport;
3641 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3642
3643 switch (mb->mbxStatus) {
3644 case 0x0011:
3645 case 0x0020:
3646 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3647 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3648 mb->mbxStatus);
3649 break;
3650
3651 case 0x9700:
3652 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3653 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3654 vport->vpi, mb->mbxStatus);
3655 if (!(phba->pport->load_flag & FC_UNLOADING))
3656 lpfc_workq_post_event(phba, NULL, NULL,
3657 LPFC_EVT_RESET_HBA);
3658 }
3659 spin_lock_irq(shost->host_lock);
3660 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
3661 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3662 spin_unlock_irq(shost->host_lock);
3663 vport->unreg_vpi_cmpl = VPORT_OK;
3664 mempool_free(pmb, phba->mbox_mem_pool);
3665 lpfc_cleanup_vports_rrqs(vport, NULL);
3666
3667
3668
3669
3670 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
3671 scsi_host_put(shost);
3672}
3673
3674int
3675lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
3676{
3677 struct lpfc_hba *phba = vport->phba;
3678 LPFC_MBOXQ_t *mbox;
3679 int rc;
3680
3681 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3682 if (!mbox)
3683 return 1;
3684
3685 lpfc_unreg_vpi(phba, vport->vpi, mbox);
3686 mbox->vport = vport;
3687 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
3688 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3689 if (rc == MBX_NOT_FINISHED) {
3690 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3691 "1800 Could not issue unreg_vpi\n");
3692 mempool_free(mbox, phba->mbox_mem_pool);
3693 vport->unreg_vpi_cmpl = VPORT_ERROR;
3694 return rc;
3695 }
3696 return 0;
3697}
3698
3699static void
3700lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3701{
3702 struct lpfc_vport *vport = pmb->vport;
3703 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3704 MAILBOX_t *mb = &pmb->u.mb;
3705
3706 switch (mb->mbxStatus) {
3707 case 0x0011:
3708 case 0x9601:
3709 case 0x9602:
3710 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3711 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3712 mb->mbxStatus);
3713 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3714 spin_lock_irq(shost->host_lock);
3715 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3716 spin_unlock_irq(shost->host_lock);
3717 vport->fc_myDID = 0;
3718
3719 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3720 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
3721 if (phba->nvmet_support)
3722 lpfc_nvmet_update_targetport(phba);
3723 else
3724 lpfc_nvme_update_localport(vport);
3725 }
3726 goto out;
3727 }
3728
3729 spin_lock_irq(shost->host_lock);
3730 vport->vpi_state |= LPFC_VPI_REGISTERED;
3731 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3732 spin_unlock_irq(shost->host_lock);
3733 vport->num_disc_nodes = 0;
3734
3735 if (vport->fc_npr_cnt)
3736 lpfc_els_disc_plogi(vport);
3737
3738 if (!vport->num_disc_nodes) {
3739 spin_lock_irq(shost->host_lock);
3740 vport->fc_flag &= ~FC_NDISC_ACTIVE;
3741 spin_unlock_irq(shost->host_lock);
3742 lpfc_can_disctmo(vport);
3743 }
3744 vport->port_state = LPFC_VPORT_READY;
3745
3746out:
3747 mempool_free(pmb, phba->mbox_mem_pool);
3748 return;
3749}
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759void
3760lpfc_create_static_vport(struct lpfc_hba *phba)
3761{
3762 LPFC_MBOXQ_t *pmb = NULL;
3763 MAILBOX_t *mb;
3764 struct static_vport_info *vport_info;
3765 int mbx_wait_rc = 0, i;
3766 struct fc_vport_identifiers vport_id;
3767 struct fc_vport *new_fc_vport;
3768 struct Scsi_Host *shost;
3769 struct lpfc_vport *vport;
3770 uint16_t offset = 0;
3771 uint8_t *vport_buff;
3772 struct lpfc_dmabuf *mp;
3773 uint32_t byte_count = 0;
3774
3775 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3776 if (!pmb) {
3777 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3778 "0542 lpfc_create_static_vport failed to"
3779 " allocate mailbox memory\n");
3780 return;
3781 }
3782 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
3783 mb = &pmb->u.mb;
3784
3785 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
3786 if (!vport_info) {
3787 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3788 "0543 lpfc_create_static_vport failed to"
3789 " allocate vport_info\n");
3790 mempool_free(pmb, phba->mbox_mem_pool);
3791 return;
3792 }
3793
3794 vport_buff = (uint8_t *) vport_info;
3795 do {
3796
3797 if (pmb->ctx_buf) {
3798 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3799 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3800 kfree(mp);
3801 }
3802 if (lpfc_dump_static_vport(phba, pmb, offset))
3803 goto out;
3804
3805 pmb->vport = phba->pport;
3806 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3807 LPFC_MBOX_TMO);
3808
3809 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
3810 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3811 "0544 lpfc_create_static_vport failed to"
3812 " issue dump mailbox command ret 0x%x "
3813 "status 0x%x\n",
3814 mbx_wait_rc, mb->mbxStatus);
3815 goto out;
3816 }
3817
3818 if (phba->sli_rev == LPFC_SLI_REV4) {
3819 byte_count = pmb->u.mqe.un.mb_words[5];
3820 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3821 if (byte_count > sizeof(struct static_vport_info) -
3822 offset)
3823 byte_count = sizeof(struct static_vport_info)
3824 - offset;
3825 memcpy(vport_buff + offset, mp->virt, byte_count);
3826 offset += byte_count;
3827 } else {
3828 if (mb->un.varDmp.word_cnt >
3829 sizeof(struct static_vport_info) - offset)
3830 mb->un.varDmp.word_cnt =
3831 sizeof(struct static_vport_info)
3832 - offset;
3833 byte_count = mb->un.varDmp.word_cnt;
3834 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
3835 vport_buff + offset,
3836 byte_count);
3837
3838 offset += byte_count;
3839 }
3840
3841 } while (byte_count &&
3842 offset < sizeof(struct static_vport_info));
3843
3844
3845 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
3846 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
3847 != VPORT_INFO_REV)) {
3848 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3849 "0545 lpfc_create_static_vport bad"
3850 " information header 0x%x 0x%x\n",
3851 le32_to_cpu(vport_info->signature),
3852 le32_to_cpu(vport_info->rev) &
3853 VPORT_INFO_REV_MASK);
3854
3855 goto out;
3856 }
3857
3858 shost = lpfc_shost_from_vport(phba->pport);
3859
3860 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
3861 memset(&vport_id, 0, sizeof(vport_id));
3862 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
3863 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
3864 if (!vport_id.port_name || !vport_id.node_name)
3865 continue;
3866
3867 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
3868 vport_id.vport_type = FC_PORTTYPE_NPIV;
3869 vport_id.disable = false;
3870 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
3871
3872 if (!new_fc_vport) {
3873 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3874 "0546 lpfc_create_static_vport failed to"
3875 " create vport\n");
3876 continue;
3877 }
3878
3879 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
3880 vport->vport_flag |= STATIC_VPORT;
3881 }
3882
3883out:
3884 kfree(vport_info);
3885 if (mbx_wait_rc != MBX_TIMEOUT) {
3886 if (pmb->ctx_buf) {
3887 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
3888 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3889 kfree(mp);
3890 }
3891 mempool_free(pmb, phba->mbox_mem_pool);
3892 }
3893
3894 return;
3895}
3896
3897
3898
3899
3900
3901
3902
3903void
3904lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3905{
3906 struct lpfc_vport *vport = pmb->vport;
3907 MAILBOX_t *mb = &pmb->u.mb;
3908 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
3909 struct lpfc_nodelist *ndlp;
3910 struct Scsi_Host *shost;
3911
3912 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
3913 pmb->ctx_ndlp = NULL;
3914 pmb->ctx_buf = NULL;
3915
3916 if (mb->mbxStatus) {
3917 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3918 "0258 Register Fabric login error: 0x%x\n",
3919 mb->mbxStatus);
3920 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3921 kfree(mp);
3922 mempool_free(pmb, phba->mbox_mem_pool);
3923
3924 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3925
3926 lpfc_disc_list_loopmap(vport);
3927
3928
3929 lpfc_disc_start(vport);
3930
3931
3932
3933 lpfc_nlp_put(ndlp);
3934 return;
3935 }
3936
3937 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3938
3939
3940
3941 lpfc_nlp_put(ndlp);
3942 return;
3943 }
3944
3945 if (phba->sli_rev < LPFC_SLI_REV4)
3946 ndlp->nlp_rpi = mb->un.varWords[0];
3947 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3948 ndlp->nlp_type |= NLP_FABRIC;
3949 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3950
3951 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3952
3953
3954 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3955 lpfc_start_fdiscs(phba);
3956 else {
3957 shost = lpfc_shost_from_vport(vport);
3958 spin_lock_irq(shost->host_lock);
3959 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
3960 spin_unlock_irq(shost->host_lock);
3961 }
3962 lpfc_do_scr_ns_plogi(phba, vport);
3963 }
3964
3965 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3966 kfree(mp);
3967 mempool_free(pmb, phba->mbox_mem_pool);
3968
3969
3970
3971
3972 lpfc_nlp_put(ndlp);
3973 return;
3974}
3975
3976
3977
3978
3979
3980int
3981lpfc_issue_gidft(struct lpfc_vport *vport)
3982{
3983
3984 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
3985 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
3986 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
3987
3988
3989
3990 lpfc_printf_vlog(vport, KERN_ERR,
3991 LOG_TRACE_EVENT,
3992 "0604 %s FC TYPE %x %s\n",
3993 "Failed to issue GID_FT to ",
3994 FC_TYPE_FCP,
3995 "Finishing discovery.");
3996 return 0;
3997 }
3998 vport->gidft_inp++;
3999 }
4000
4001 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4002 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
4003 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
4004
4005
4006
4007 lpfc_printf_vlog(vport, KERN_ERR,
4008 LOG_TRACE_EVENT,
4009 "0605 %s FC_TYPE %x %s %d\n",
4010 "Failed to issue GID_FT to ",
4011 FC_TYPE_NVME,
4012 "Finishing discovery: gidftinp ",
4013 vport->gidft_inp);
4014 if (vport->gidft_inp == 0)
4015 return 0;
4016 } else
4017 vport->gidft_inp++;
4018 }
4019 return vport->gidft_inp;
4020}
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032int
4033lpfc_issue_gidpt(struct lpfc_vport *vport)
4034{
4035
4036 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
4037
4038
4039
4040 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4041 "0606 %s Port TYPE %x %s\n",
4042 "Failed to issue GID_PT to ",
4043 GID_PT_N_PORT,
4044 "Finishing discovery.");
4045 return 0;
4046 }
4047 vport->gidft_inp++;
4048 return 1;
4049}
4050
4051
4052
4053
4054
4055
4056
4057void
4058lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4059{
4060 MAILBOX_t *mb = &pmb->u.mb;
4061 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
4062 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4063 struct lpfc_vport *vport = pmb->vport;
4064
4065 pmb->ctx_buf = NULL;
4066 pmb->ctx_ndlp = NULL;
4067 vport->gidft_inp = 0;
4068
4069 if (mb->mbxStatus) {
4070 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4071 "0260 Register NameServer error: 0x%x\n",
4072 mb->mbxStatus);
4073
4074out:
4075
4076
4077
4078 lpfc_nlp_put(ndlp);
4079 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4080 kfree(mp);
4081 mempool_free(pmb, phba->mbox_mem_pool);
4082
4083
4084
4085
4086
4087 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
4088 spin_lock_irq(&ndlp->lock);
4089 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
4090 spin_unlock_irq(&ndlp->lock);
4091 lpfc_nlp_not_used(ndlp);
4092 }
4093
4094 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
4095
4096
4097
4098
4099 lpfc_disc_list_loopmap(vport);
4100
4101
4102 lpfc_disc_start(vport);
4103 return;
4104 }
4105 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
4106 return;
4107 }
4108
4109 if (phba->sli_rev < LPFC_SLI_REV4)
4110 ndlp->nlp_rpi = mb->un.varWords[0];
4111 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
4112 ndlp->nlp_type |= NLP_FABRIC;
4113 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4114 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
4115 "0003 rpi:%x DID:%x flg:%x %d x%px\n",
4116 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4117 kref_read(&ndlp->kref),
4118 ndlp);
4119
4120 if (vport->port_state < LPFC_VPORT_READY) {
4121
4122 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
4123 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
4124 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
4125 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
4126
4127 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4128 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
4129 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
4130
4131 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
4132 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
4133 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
4134 FC_TYPE_NVME);
4135
4136
4137 lpfc_issue_els_scr(vport, 0);
4138
4139 if (!phba->cfg_enable_mi ||
4140 phba->sli4_hba.pc_sli4_params.mi_ver < LPFC_MIB3_SUPPORT)
4141 lpfc_issue_els_rdf(vport, 0);
4142 }
4143
4144 vport->fc_ns_retry = 0;
4145 if (lpfc_issue_gidft(vport) == 0)
4146 goto out;
4147
4148
4149
4150
4151
4152
4153
4154
4155 lpfc_nlp_put(ndlp);
4156 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4157 kfree(mp);
4158 mempool_free(pmb, phba->mbox_mem_pool);
4159
4160 return;
4161}
4162
4163static void
4164lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4165{
4166 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4167 struct fc_rport *rport;
4168 struct lpfc_rport_data *rdata;
4169 struct fc_rport_identifiers rport_ids;
4170 struct lpfc_hba *phba = vport->phba;
4171 unsigned long flags;
4172
4173 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4174 return;
4175
4176
4177 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
4178 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
4179 rport_ids.port_id = ndlp->nlp_DID;
4180 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
4181
4182
4183 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4184 "rport add: did:x%x flg:x%x type x%x",
4185 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4186
4187
4188 if (vport->load_flag & FC_UNLOADING)
4189 return;
4190
4191
4192
4193
4194 if (ndlp->rport) {
4195 rdata = ndlp->rport->dd_data;
4196 rdata->pnode = NULL;
4197 }
4198
4199 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
4200 if (!rport) {
4201 dev_printk(KERN_WARNING, &phba->pcidev->dev,
4202 "Warning: fc_remote_port_add failed\n");
4203 return;
4204 }
4205
4206
4207 rport->maxframe_size = ndlp->nlp_maxframe;
4208 rport->supported_classes = ndlp->nlp_class_sup;
4209 rdata = rport->dd_data;
4210 rdata->pnode = lpfc_nlp_get(ndlp);
4211 if (!rdata->pnode) {
4212 dev_warn(&phba->pcidev->dev,
4213 "Warning - node ref failed. Unreg rport\n");
4214 fc_remote_port_delete(rport);
4215 ndlp->rport = NULL;
4216 return;
4217 }
4218
4219 spin_lock_irqsave(&ndlp->lock, flags);
4220 ndlp->fc4_xpt_flags |= SCSI_XPT_REGD;
4221 spin_unlock_irqrestore(&ndlp->lock, flags);
4222
4223 if (ndlp->nlp_type & NLP_FCP_TARGET)
4224 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
4225 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
4226 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4227 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
4228 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
4229 if (ndlp->nlp_type & NLP_NVME_TARGET)
4230 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
4231 if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
4232 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
4233
4234 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
4235 fc_remote_port_rolechg(rport, rport_ids.roles);
4236
4237 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4238 "3183 %s rport x%px DID x%x, role x%x\n",
4239 __func__, rport, rport->port_id, rport->roles);
4240
4241 if ((rport->scsi_target_id != -1) &&
4242 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
4243 ndlp->nlp_sid = rport->scsi_target_id;
4244 }
4245
4246 return;
4247}
4248
4249static void
4250lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
4251{
4252 struct fc_rport *rport = ndlp->rport;
4253 struct lpfc_vport *vport = ndlp->vport;
4254
4255 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
4256 return;
4257
4258 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
4259 "rport delete: did:x%x flg:x%x type x%x",
4260 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4261
4262 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4263 "3184 rport unregister x%06x, rport x%px "
4264 "xptflg x%x\n",
4265 ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags);
4266
4267 fc_remote_port_delete(rport);
4268 lpfc_nlp_put(ndlp);
4269}
4270
4271static void
4272lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
4273{
4274 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4275 unsigned long iflags;
4276
4277 spin_lock_irqsave(shost->host_lock, iflags);
4278 switch (state) {
4279 case NLP_STE_UNUSED_NODE:
4280 vport->fc_unused_cnt += count;
4281 break;
4282 case NLP_STE_PLOGI_ISSUE:
4283 vport->fc_plogi_cnt += count;
4284 break;
4285 case NLP_STE_ADISC_ISSUE:
4286 vport->fc_adisc_cnt += count;
4287 break;
4288 case NLP_STE_REG_LOGIN_ISSUE:
4289 vport->fc_reglogin_cnt += count;
4290 break;
4291 case NLP_STE_PRLI_ISSUE:
4292 vport->fc_prli_cnt += count;
4293 break;
4294 case NLP_STE_UNMAPPED_NODE:
4295 vport->fc_unmap_cnt += count;
4296 break;
4297 case NLP_STE_MAPPED_NODE:
4298 vport->fc_map_cnt += count;
4299 break;
4300 case NLP_STE_NPR_NODE:
4301 if (vport->fc_npr_cnt == 0 && count == -1)
4302 vport->fc_npr_cnt = 0;
4303 else
4304 vport->fc_npr_cnt += count;
4305 break;
4306 }
4307 spin_unlock_irqrestore(shost->host_lock, iflags);
4308}
4309
4310static void
4311lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4312 int old_state, int new_state)
4313{
4314 if (new_state == NLP_STE_UNMAPPED_NODE) {
4315 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4316 ndlp->nlp_type |= NLP_FC_NODE;
4317 }
4318 if (new_state == NLP_STE_MAPPED_NODE)
4319 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
4320 if (new_state == NLP_STE_NPR_NODE)
4321 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
4322
4323
4324 if ((old_state == NLP_STE_MAPPED_NODE ||
4325 old_state == NLP_STE_UNMAPPED_NODE)) {
4326 if (ndlp->rport &&
4327 lpfc_valid_xpt_node(ndlp)) {
4328 vport->phba->nport_event_cnt++;
4329 lpfc_unregister_remote_port(ndlp);
4330 }
4331
4332 if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4333 vport->phba->nport_event_cnt++;
4334 if (vport->phba->nvmet_support == 0) {
4335
4336 if (ndlp->nlp_type & NLP_NVME_TARGET)
4337 lpfc_nvme_unregister_port(vport, ndlp);
4338 } else {
4339
4340 lpfc_nlp_put(ndlp);
4341 }
4342 }
4343 }
4344
4345
4346
4347 if (new_state == NLP_STE_MAPPED_NODE ||
4348 new_state == NLP_STE_UNMAPPED_NODE) {
4349 if (lpfc_valid_xpt_node(ndlp)) {
4350 vport->phba->nport_event_cnt++;
4351
4352
4353
4354
4355 lpfc_register_remote_port(vport, ndlp);
4356 }
4357
4358 if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
4359 ndlp->nlp_fc4_type & NLP_FC4_NVME) {
4360 if (vport->phba->nvmet_support == 0) {
4361
4362
4363
4364
4365 if (ndlp->nlp_type & NLP_NVME_TARGET) {
4366 vport->phba->nport_event_cnt++;
4367 lpfc_nvme_register_port(vport, ndlp);
4368 }
4369 } else {
4370
4371
4372
4373 lpfc_nlp_get(ndlp);
4374 }
4375 }
4376 }
4377
4378 if ((new_state == NLP_STE_MAPPED_NODE) &&
4379 (vport->stat_data_enabled)) {
4380
4381
4382
4383
4384 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
4385 sizeof(struct lpfc_scsicmd_bkt),
4386 GFP_KERNEL);
4387
4388 if (!ndlp->lat_data)
4389 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4390 "0286 lpfc_nlp_state_cleanup failed to "
4391 "allocate statistical data buffer DID "
4392 "0x%x\n", ndlp->nlp_DID);
4393 }
4394
4395
4396
4397
4398
4399
4400 if ((new_state == NLP_STE_MAPPED_NODE) &&
4401 (ndlp->nlp_type & NLP_FCP_TARGET) &&
4402 (!ndlp->rport ||
4403 ndlp->rport->scsi_target_id == -1 ||
4404 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
4405 spin_lock_irq(&ndlp->lock);
4406 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
4407 spin_unlock_irq(&ndlp->lock);
4408 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4409 }
4410}
4411
4412static char *
4413lpfc_nlp_state_name(char *buffer, size_t size, int state)
4414{
4415 static char *states[] = {
4416 [NLP_STE_UNUSED_NODE] = "UNUSED",
4417 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
4418 [NLP_STE_ADISC_ISSUE] = "ADISC",
4419 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
4420 [NLP_STE_PRLI_ISSUE] = "PRLI",
4421 [NLP_STE_LOGO_ISSUE] = "LOGO",
4422 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
4423 [NLP_STE_MAPPED_NODE] = "MAPPED",
4424 [NLP_STE_NPR_NODE] = "NPR",
4425 };
4426
4427 if (state < NLP_STE_MAX_STATE && states[state])
4428 strlcpy(buffer, states[state], size);
4429 else
4430 snprintf(buffer, size, "unknown (%d)", state);
4431 return buffer;
4432}
4433
4434void
4435lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4436 int state)
4437{
4438 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4439 int old_state = ndlp->nlp_state;
4440 int node_dropped = ndlp->nlp_flag & NLP_DROPPED;
4441 char name1[16], name2[16];
4442
4443 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4444 "0904 NPort state transition x%06x, %s -> %s\n",
4445 ndlp->nlp_DID,
4446 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4447 lpfc_nlp_state_name(name2, sizeof(name2), state));
4448
4449 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4450 "node statechg did:x%x old:%d ste:%d",
4451 ndlp->nlp_DID, old_state, state);
4452
4453 if (node_dropped && old_state == NLP_STE_UNUSED_NODE &&
4454 state != NLP_STE_UNUSED_NODE) {
4455 ndlp->nlp_flag &= ~NLP_DROPPED;
4456 lpfc_nlp_get(ndlp);
4457 }
4458
4459 if (old_state == NLP_STE_NPR_NODE &&
4460 state != NLP_STE_NPR_NODE)
4461 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4462 if (old_state == NLP_STE_UNMAPPED_NODE) {
4463 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4464 ndlp->nlp_type &= ~NLP_FC_NODE;
4465 }
4466
4467 if (list_empty(&ndlp->nlp_listp)) {
4468 spin_lock_irq(shost->host_lock);
4469 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4470 spin_unlock_irq(shost->host_lock);
4471 } else if (old_state)
4472 lpfc_nlp_counters(vport, old_state, -1);
4473
4474 ndlp->nlp_state = state;
4475 lpfc_nlp_counters(vport, state, 1);
4476 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4477}
4478
4479void
4480lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4481{
4482 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4483
4484 if (list_empty(&ndlp->nlp_listp)) {
4485 spin_lock_irq(shost->host_lock);
4486 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4487 spin_unlock_irq(shost->host_lock);
4488 }
4489}
4490
4491void
4492lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4493{
4494 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4495
4496 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4497 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4498 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4499 spin_lock_irq(shost->host_lock);
4500 list_del_init(&ndlp->nlp_listp);
4501 spin_unlock_irq(shost->host_lock);
4502 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4503 NLP_STE_UNUSED_NODE);
4504}
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520static inline void
4521lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4522 uint32_t did)
4523{
4524 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4525 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4526 timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
4527 INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
4528
4529 ndlp->nlp_DID = did;
4530 ndlp->vport = vport;
4531 ndlp->phba = vport->phba;
4532 ndlp->nlp_sid = NLP_NO_SID;
4533 ndlp->nlp_fc4_type = NLP_FC4_NONE;
4534 kref_init(&ndlp->kref);
4535 atomic_set(&ndlp->cmd_pending, 0);
4536 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4537 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4538}
4539
4540void
4541lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4542{
4543
4544
4545
4546
4547
4548
4549
4550 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4551 return;
4552 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4553 ndlp->nlp_flag |= NLP_DROPPED;
4554 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4555 lpfc_cleanup_vports_rrqs(vport, ndlp);
4556 lpfc_unreg_rpi(vport, ndlp);
4557 }
4558
4559 lpfc_nlp_put(ndlp);
4560 return;
4561}
4562
4563
4564
4565
4566void
4567lpfc_set_disctmo(struct lpfc_vport *vport)
4568{
4569 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4570 struct lpfc_hba *phba = vport->phba;
4571 uint32_t tmo;
4572
4573 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
4574
4575 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
4576 } else {
4577
4578
4579
4580 tmo = ((phba->fc_ratov * 3) + 3);
4581 }
4582
4583
4584 if (!timer_pending(&vport->fc_disctmo)) {
4585 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4586 "set disc timer: tmo:x%x state:x%x flg:x%x",
4587 tmo, vport->port_state, vport->fc_flag);
4588 }
4589
4590 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
4591 spin_lock_irq(shost->host_lock);
4592 vport->fc_flag |= FC_DISC_TMO;
4593 spin_unlock_irq(shost->host_lock);
4594
4595
4596 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4597 "0247 Start Discovery Timer state x%x "
4598 "Data: x%x x%lx x%x x%x\n",
4599 vport->port_state, tmo,
4600 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
4601 vport->fc_adisc_cnt);
4602
4603 return;
4604}
4605
4606
4607
4608
4609int
4610lpfc_can_disctmo(struct lpfc_vport *vport)
4611{
4612 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4613 unsigned long iflags;
4614
4615 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4616 "can disc timer: state:x%x rtry:x%x flg:x%x",
4617 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
4618
4619
4620 if (vport->fc_flag & FC_DISC_TMO) {
4621 spin_lock_irqsave(shost->host_lock, iflags);
4622 vport->fc_flag &= ~FC_DISC_TMO;
4623 spin_unlock_irqrestore(shost->host_lock, iflags);
4624 del_timer_sync(&vport->fc_disctmo);
4625 spin_lock_irqsave(&vport->work_port_lock, iflags);
4626 vport->work_port_events &= ~WORKER_DISC_TMO;
4627 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
4628 }
4629
4630
4631 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4632 "0248 Cancel Discovery Timer state x%x "
4633 "Data: x%x x%x x%x\n",
4634 vport->port_state, vport->fc_flag,
4635 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
4636 return 0;
4637}
4638
4639
4640
4641
4642
4643int
4644lpfc_check_sli_ndlp(struct lpfc_hba *phba,
4645 struct lpfc_sli_ring *pring,
4646 struct lpfc_iocbq *iocb,
4647 struct lpfc_nodelist *ndlp)
4648{
4649 IOCB_t *icmd = &iocb->iocb;
4650 struct lpfc_vport *vport = ndlp->vport;
4651
4652 if (iocb->vport != vport)
4653 return 0;
4654
4655 if (pring->ringno == LPFC_ELS_RING) {
4656 switch (icmd->ulpCommand) {
4657 case CMD_GEN_REQUEST64_CR:
4658 if (iocb->context_un.ndlp == ndlp)
4659 return 1;
4660 fallthrough;
4661 case CMD_ELS_REQUEST64_CR:
4662 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
4663 return 1;
4664 fallthrough;
4665 case CMD_XMIT_ELS_RSP64_CX:
4666 if (iocb->context1 == (uint8_t *) ndlp)
4667 return 1;
4668 }
4669 } else if (pring->ringno == LPFC_FCP_RING) {
4670
4671 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4672 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
4673 return 0;
4674 }
4675 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
4676 return 1;
4677 }
4678 }
4679 return 0;
4680}
4681
4682static void
4683__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
4684 struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
4685 struct list_head *dequeue_list)
4686{
4687 struct lpfc_iocbq *iocb, *next_iocb;
4688
4689 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
4690
4691 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
4692
4693 list_move_tail(&iocb->list, dequeue_list);
4694 }
4695}
4696
4697static void
4698lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
4699 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4700{
4701 struct lpfc_sli *psli = &phba->sli;
4702 uint32_t i;
4703
4704 spin_lock_irq(&phba->hbalock);
4705 for (i = 0; i < psli->num_rings; i++)
4706 __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
4707 dequeue_list);
4708 spin_unlock_irq(&phba->hbalock);
4709}
4710
4711static void
4712lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
4713 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
4714{
4715 struct lpfc_sli_ring *pring;
4716 struct lpfc_queue *qp = NULL;
4717
4718 spin_lock_irq(&phba->hbalock);
4719 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
4720 pring = qp->pring;
4721 if (!pring)
4722 continue;
4723 spin_lock(&pring->ring_lock);
4724 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
4725 spin_unlock(&pring->ring_lock);
4726 }
4727 spin_unlock_irq(&phba->hbalock);
4728}
4729
4730
4731
4732
4733
4734static int
4735lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4736{
4737 LIST_HEAD(completions);
4738
4739 lpfc_fabric_abort_nport(ndlp);
4740
4741
4742
4743
4744
4745 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4746 if (phba->sli_rev != LPFC_SLI_REV4)
4747 lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
4748 else
4749 lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
4750 }
4751
4752
4753 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4754 IOERR_SLI_ABORTED);
4755
4756 return 0;
4757}
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767static void
4768lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4769{
4770 struct lpfc_vport *vport = pmb->vport;
4771 struct lpfc_nodelist *ndlp;
4772
4773 ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
4774 if (!ndlp)
4775 return;
4776 lpfc_issue_els_logo(vport, ndlp, 0);
4777 mempool_free(pmb, phba->mbox_mem_pool);
4778
4779
4780 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
4781 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
4782 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4783 "1434 UNREG cmpl deferred logo x%x "
4784 "on NPort x%x Data: x%x x%px\n",
4785 ndlp->nlp_rpi, ndlp->nlp_DID,
4786 ndlp->nlp_defer_did, ndlp);
4787
4788 ndlp->nlp_flag &= ~NLP_UNREG_INP;
4789 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
4790 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4791 } else {
4792 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
4793 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
4794 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
4795 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4796 }
4797 ndlp->nlp_flag &= ~NLP_UNREG_INP;
4798 }
4799}
4800
4801
4802
4803
4804
4805
4806static void
4807lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
4808 struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
4809{
4810 unsigned long iflags;
4811
4812
4813
4814
4815 mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
4816 if (!mbox->ctx_ndlp)
4817 return;
4818
4819 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4820 mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4821
4822 } else if (phba->sli_rev == LPFC_SLI_REV4 &&
4823 (!(vport->load_flag & FC_UNLOADING)) &&
4824 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
4825 LPFC_SLI_INTF_IF_TYPE_2) &&
4826 (kref_read(&ndlp->kref) > 0)) {
4827 mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
4828 } else {
4829 if (vport->load_flag & FC_UNLOADING) {
4830 if (phba->sli_rev == LPFC_SLI_REV4) {
4831 spin_lock_irqsave(&ndlp->lock, iflags);
4832 ndlp->nlp_flag |= NLP_RELEASE_RPI;
4833 spin_unlock_irqrestore(&ndlp->lock, iflags);
4834 }
4835 }
4836 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4837 }
4838}
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849int
4850lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4851{
4852 struct lpfc_hba *phba = vport->phba;
4853 LPFC_MBOXQ_t *mbox;
4854 int rc, acc_plogi = 1;
4855 uint16_t rpi;
4856
4857 if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4858 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
4859 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
4860 lpfc_printf_vlog(vport, KERN_INFO,
4861 LOG_NODE | LOG_DISCOVERY,
4862 "3366 RPI x%x needs to be "
4863 "unregistered nlp_flag x%x "
4864 "did x%x\n",
4865 ndlp->nlp_rpi, ndlp->nlp_flag,
4866 ndlp->nlp_DID);
4867
4868
4869
4870
4871 if (ndlp->nlp_flag & NLP_UNREG_INP) {
4872 lpfc_printf_vlog(vport, KERN_INFO,
4873 LOG_NODE | LOG_DISCOVERY,
4874 "1436 unreg_rpi SKIP UNREG x%x on "
4875 "NPort x%x deferred x%x flg x%x "
4876 "Data: x%px\n",
4877 ndlp->nlp_rpi, ndlp->nlp_DID,
4878 ndlp->nlp_defer_did,
4879 ndlp->nlp_flag, ndlp);
4880 goto out;
4881 }
4882
4883 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4884 if (mbox) {
4885
4886 rpi = ndlp->nlp_rpi;
4887 if (phba->sli_rev == LPFC_SLI_REV4)
4888 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4889
4890 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4891 mbox->vport = vport;
4892 lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
4893 if (!mbox->ctx_ndlp) {
4894 mempool_free(mbox, phba->mbox_mem_pool);
4895 return 1;
4896 }
4897
4898 if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
4899
4900
4901
4902 acc_plogi = 0;
4903 if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
4904 Fabric_DID_MASK) &&
4905 (!(vport->fc_flag & FC_OFFLINE_MODE)))
4906 ndlp->nlp_flag |= NLP_UNREG_INP;
4907
4908 lpfc_printf_vlog(vport, KERN_INFO,
4909 LOG_NODE | LOG_DISCOVERY,
4910 "1433 unreg_rpi UNREG x%x on "
4911 "NPort x%x deferred flg x%x "
4912 "Data:x%px\n",
4913 ndlp->nlp_rpi, ndlp->nlp_DID,
4914 ndlp->nlp_flag, ndlp);
4915
4916 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4917 if (rc == MBX_NOT_FINISHED) {
4918 mempool_free(mbox, phba->mbox_mem_pool);
4919 acc_plogi = 1;
4920 }
4921 } else {
4922 lpfc_printf_vlog(vport, KERN_INFO,
4923 LOG_NODE | LOG_DISCOVERY,
4924 "1444 Failed to allocate mempool "
4925 "unreg_rpi UNREG x%x, "
4926 "DID x%x, flag x%x, "
4927 "ndlp x%px\n",
4928 ndlp->nlp_rpi, ndlp->nlp_DID,
4929 ndlp->nlp_flag, ndlp);
4930
4931
4932
4933
4934
4935 if (!(vport->load_flag & FC_UNLOADING)) {
4936 ndlp->nlp_flag &= ~NLP_UNREG_INP;
4937 lpfc_issue_els_logo(vport, ndlp, 0);
4938 ndlp->nlp_prev_state = ndlp->nlp_state;
4939 lpfc_nlp_set_state(vport, ndlp,
4940 NLP_STE_NPR_NODE);
4941 }
4942
4943 return 1;
4944 }
4945 lpfc_no_rpi(phba, ndlp);
4946out:
4947 if (phba->sli_rev != LPFC_SLI_REV4)
4948 ndlp->nlp_rpi = 0;
4949 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
4950 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4951 if (acc_plogi)
4952 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4953 return 1;
4954 }
4955 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4956 return 0;
4957}
4958
4959
4960
4961
4962
4963
4964
4965
4966void
4967lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
4968{
4969 struct lpfc_vport **vports;
4970 struct lpfc_nodelist *ndlp;
4971 struct Scsi_Host *shost;
4972 int i;
4973
4974 vports = lpfc_create_vport_work_array(phba);
4975 if (!vports) {
4976 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4977 "2884 Vport array allocation failed \n");
4978 return;
4979 }
4980 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4981 shost = lpfc_shost_from_vport(vports[i]);
4982 spin_lock_irq(shost->host_lock);
4983 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4984 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4985
4986 spin_unlock_irq(shost->host_lock);
4987 lpfc_unreg_rpi(vports[i], ndlp);
4988 spin_lock_irq(shost->host_lock);
4989 }
4990 }
4991 spin_unlock_irq(shost->host_lock);
4992 }
4993 lpfc_destroy_vport_work_array(phba, vports);
4994}
4995
4996void
4997lpfc_unreg_all_rpis(struct lpfc_vport *vport)
4998{
4999 struct lpfc_hba *phba = vport->phba;
5000 LPFC_MBOXQ_t *mbox;
5001 int rc;
5002
5003 if (phba->sli_rev == LPFC_SLI_REV4) {
5004 lpfc_sli4_unreg_all_rpis(vport);
5005 return;
5006 }
5007
5008 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5009 if (mbox) {
5010 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
5011 mbox);
5012 mbox->vport = vport;
5013 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5014 mbox->ctx_ndlp = NULL;
5015 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5016 if (rc != MBX_TIMEOUT)
5017 mempool_free(mbox, phba->mbox_mem_pool);
5018
5019 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5020 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5021 "1836 Could not issue "
5022 "unreg_login(all_rpis) status %d\n",
5023 rc);
5024 }
5025}
5026
5027void
5028lpfc_unreg_default_rpis(struct lpfc_vport *vport)
5029{
5030 struct lpfc_hba *phba = vport->phba;
5031 LPFC_MBOXQ_t *mbox;
5032 int rc;
5033
5034
5035 if (phba->sli_rev > LPFC_SLI_REV3)
5036 return;
5037
5038 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5039 if (mbox) {
5040 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
5041 mbox);
5042 mbox->vport = vport;
5043 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5044 mbox->ctx_ndlp = NULL;
5045 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
5046 if (rc != MBX_TIMEOUT)
5047 mempool_free(mbox, phba->mbox_mem_pool);
5048
5049 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
5050 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5051 "1815 Could not issue "
5052 "unreg_did (default rpis) status %d\n",
5053 rc);
5054 }
5055}
5056
5057
5058
5059
5060
5061static int
5062lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5063{
5064 struct lpfc_hba *phba = vport->phba;
5065 LPFC_MBOXQ_t *mb, *nextmb;
5066 struct lpfc_dmabuf *mp;
5067
5068
5069 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5070 "0900 Cleanup node for NPort x%x "
5071 "Data: x%x x%x x%x\n",
5072 ndlp->nlp_DID, ndlp->nlp_flag,
5073 ndlp->nlp_state, ndlp->nlp_rpi);
5074 lpfc_dequeue_node(vport, ndlp);
5075
5076
5077
5078
5079 if ((mb = phba->sli.mbox_active)) {
5080 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5081 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5082 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5083 mb->ctx_ndlp = NULL;
5084 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5085 }
5086 }
5087
5088 spin_lock_irq(&phba->hbalock);
5089
5090 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
5091 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
5092 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
5093 (ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
5094 continue;
5095
5096 mb->ctx_ndlp = NULL;
5097 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5098 }
5099
5100 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
5101 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
5102 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
5103 (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
5104 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
5105 if (mp) {
5106 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
5107 kfree(mp);
5108 }
5109 list_del(&mb->list);
5110 mempool_free(mb, phba->mbox_mem_pool);
5111
5112
5113
5114
5115 }
5116 }
5117 spin_unlock_irq(&phba->hbalock);
5118
5119 lpfc_els_abort(phba, ndlp);
5120
5121 spin_lock_irq(&ndlp->lock);
5122 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
5123 spin_unlock_irq(&ndlp->lock);
5124
5125 ndlp->nlp_last_elscmd = 0;
5126 del_timer_sync(&ndlp->nlp_delayfunc);
5127
5128 list_del_init(&ndlp->els_retry_evt.evt_listp);
5129 list_del_init(&ndlp->dev_loss_evt.evt_listp);
5130 list_del_init(&ndlp->recovery_evt.evt_listp);
5131 lpfc_cleanup_vports_rrqs(vport, ndlp);
5132 if (phba->sli_rev == LPFC_SLI_REV4)
5133 ndlp->nlp_flag |= NLP_RELEASE_RPI;
5134 return 0;
5135}
5136
5137static int
5138lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5139 uint32_t did)
5140{
5141 D_ID mydid, ndlpdid, matchdid;
5142
5143 if (did == Bcast_DID)
5144 return 0;
5145
5146
5147 if (ndlp->nlp_DID == did)
5148 return 1;
5149
5150
5151 mydid.un.word = vport->fc_myDID;
5152 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
5153 return 0;
5154 }
5155
5156 matchdid.un.word = did;
5157 ndlpdid.un.word = ndlp->nlp_DID;
5158 if (matchdid.un.b.id == ndlpdid.un.b.id) {
5159 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
5160 (mydid.un.b.area == matchdid.un.b.area)) {
5161
5162
5163
5164
5165
5166
5167
5168
5169 if ((ndlpdid.un.b.domain == 0) &&
5170 (ndlpdid.un.b.area == 0)) {
5171 if (ndlpdid.un.b.id &&
5172 vport->phba->fc_topology ==
5173 LPFC_TOPOLOGY_LOOP)
5174 return 1;
5175 }
5176 return 0;
5177 }
5178
5179 matchdid.un.word = ndlp->nlp_DID;
5180 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
5181 (mydid.un.b.area == ndlpdid.un.b.area)) {
5182 if ((matchdid.un.b.domain == 0) &&
5183 (matchdid.un.b.area == 0)) {
5184 if (matchdid.un.b.id)
5185 return 1;
5186 }
5187 }
5188 }
5189 return 0;
5190}
5191
5192
5193static struct lpfc_nodelist *
5194__lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5195{
5196 struct lpfc_nodelist *ndlp;
5197 uint32_t data1;
5198
5199 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5200 if (lpfc_matchdid(vport, ndlp, did)) {
5201 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5202 ((uint32_t)ndlp->nlp_xri << 16) |
5203 ((uint32_t)ndlp->nlp_type << 8)
5204 );
5205 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5206 "0929 FIND node DID "
5207 "Data: x%px x%x x%x x%x x%x x%px\n",
5208 ndlp, ndlp->nlp_DID,
5209 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
5210 ndlp->active_rrqs_xri_bitmap);
5211 return ndlp;
5212 }
5213 }
5214
5215
5216 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5217 "0932 FIND node did x%x NOT FOUND.\n", did);
5218 return NULL;
5219}
5220
5221struct lpfc_nodelist *
5222lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
5223{
5224 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5225 struct lpfc_nodelist *ndlp;
5226 unsigned long iflags;
5227
5228 spin_lock_irqsave(shost->host_lock, iflags);
5229 ndlp = __lpfc_findnode_did(vport, did);
5230 spin_unlock_irqrestore(shost->host_lock, iflags);
5231 return ndlp;
5232}
5233
5234struct lpfc_nodelist *
5235lpfc_findnode_mapped(struct lpfc_vport *vport)
5236{
5237 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5238 struct lpfc_nodelist *ndlp;
5239 uint32_t data1;
5240 unsigned long iflags;
5241
5242 spin_lock_irqsave(shost->host_lock, iflags);
5243
5244 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5245 if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
5246 ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
5247 data1 = (((uint32_t)ndlp->nlp_state << 24) |
5248 ((uint32_t)ndlp->nlp_xri << 16) |
5249 ((uint32_t)ndlp->nlp_type << 8) |
5250 ((uint32_t)ndlp->nlp_rpi & 0xff));
5251 spin_unlock_irqrestore(shost->host_lock, iflags);
5252 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5253 "2025 FIND node DID "
5254 "Data: x%px x%x x%x x%x x%px\n",
5255 ndlp, ndlp->nlp_DID,
5256 ndlp->nlp_flag, data1,
5257 ndlp->active_rrqs_xri_bitmap);
5258 return ndlp;
5259 }
5260 }
5261 spin_unlock_irqrestore(shost->host_lock, iflags);
5262
5263
5264 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5265 "2026 FIND mapped did NOT FOUND.\n");
5266 return NULL;
5267}
5268
5269struct lpfc_nodelist *
5270lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
5271{
5272 struct lpfc_nodelist *ndlp;
5273
5274 ndlp = lpfc_findnode_did(vport, did);
5275 if (!ndlp) {
5276 if (vport->phba->nvmet_support)
5277 return NULL;
5278 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
5279 lpfc_rscn_payload_check(vport, did) == 0)
5280 return NULL;
5281 ndlp = lpfc_nlp_init(vport, did);
5282 if (!ndlp)
5283 return NULL;
5284 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5285
5286 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5287 "6453 Setup New Node 2B_DISC x%x "
5288 "Data:x%x x%x x%x\n",
5289 ndlp->nlp_DID, ndlp->nlp_flag,
5290 ndlp->nlp_state, vport->fc_flag);
5291
5292 spin_lock_irq(&ndlp->lock);
5293 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5294 spin_unlock_irq(&ndlp->lock);
5295 return ndlp;
5296 }
5297
5298
5299
5300
5301
5302 if ((vport->fc_flag & FC_RSCN_MODE) &&
5303 !(vport->fc_flag & FC_NDISC_ACTIVE)) {
5304 if (lpfc_rscn_payload_check(vport, did)) {
5305
5306
5307
5308
5309 lpfc_cancel_retry_delay_tmo(vport, ndlp);
5310
5311 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5312 "6455 Setup RSCN Node 2B_DISC x%x "
5313 "Data:x%x x%x x%x\n",
5314 ndlp->nlp_DID, ndlp->nlp_flag,
5315 ndlp->nlp_state, vport->fc_flag);
5316
5317
5318
5319
5320
5321
5322 if (vport->phba->nvmet_support)
5323 return ndlp;
5324
5325
5326
5327
5328 if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
5329 !(ndlp->nlp_type &
5330 (NLP_FCP_TARGET | NLP_NVME_TARGET)))
5331 return NULL;
5332
5333 ndlp->nlp_prev_state = ndlp->nlp_state;
5334 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5335
5336 spin_lock_irq(&ndlp->lock);
5337 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5338 spin_unlock_irq(&ndlp->lock);
5339 } else {
5340 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5341 "6456 Skip Setup RSCN Node x%x "
5342 "Data:x%x x%x x%x\n",
5343 ndlp->nlp_DID, ndlp->nlp_flag,
5344 ndlp->nlp_state, vport->fc_flag);
5345 ndlp = NULL;
5346 }
5347 } else {
5348 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5349 "6457 Setup Active Node 2B_DISC x%x "
5350 "Data:x%x x%x x%x\n",
5351 ndlp->nlp_DID, ndlp->nlp_flag,
5352 ndlp->nlp_state, vport->fc_flag);
5353
5354
5355
5356
5357
5358 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
5359 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5360 (!vport->phba->nvmet_support &&
5361 ndlp->nlp_flag & NLP_RCV_PLOGI))
5362 return NULL;
5363
5364 if (vport->phba->nvmet_support)
5365 return ndlp;
5366
5367
5368
5369
5370 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5371
5372 spin_lock_irq(&ndlp->lock);
5373 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
5374 spin_unlock_irq(&ndlp->lock);
5375 }
5376 return ndlp;
5377}
5378
5379
5380void
5381lpfc_disc_list_loopmap(struct lpfc_vport *vport)
5382{
5383 struct lpfc_hba *phba = vport->phba;
5384 int j;
5385 uint32_t alpa, index;
5386
5387 if (!lpfc_is_link_up(phba))
5388 return;
5389
5390 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
5391 return;
5392
5393
5394 if (phba->alpa_map[0]) {
5395 for (j = 1; j <= phba->alpa_map[0]; j++) {
5396 alpa = phba->alpa_map[j];
5397 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
5398 continue;
5399 lpfc_setup_disc_node(vport, alpa);
5400 }
5401 } else {
5402
5403 for (j = 0; j < FC_MAXLOOP; j++) {
5404
5405
5406
5407 if (vport->cfg_scan_down)
5408 index = j;
5409 else
5410 index = FC_MAXLOOP - j - 1;
5411 alpa = lpfcAlpaArray[index];
5412 if ((vport->fc_myDID & 0xff) == alpa)
5413 continue;
5414 lpfc_setup_disc_node(vport, alpa);
5415 }
5416 }
5417 return;
5418}
5419
5420
5421void
5422lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
5423{
5424 LPFC_MBOXQ_t *mbox;
5425 struct lpfc_sli *psli = &phba->sli;
5426 struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
5427 struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING];
5428 int rc;
5429
5430
5431
5432
5433
5434 if ((phba->link_state >= LPFC_CLEAR_LA) ||
5435 (vport->port_type != LPFC_PHYSICAL_PORT) ||
5436 (phba->sli_rev == LPFC_SLI_REV4))
5437 return;
5438
5439
5440 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
5441 phba->link_state = LPFC_CLEAR_LA;
5442 lpfc_clear_la(phba, mbox);
5443 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
5444 mbox->vport = vport;
5445 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5446 if (rc == MBX_NOT_FINISHED) {
5447 mempool_free(mbox, phba->mbox_mem_pool);
5448 lpfc_disc_flush_list(vport);
5449 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5450 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
5451 phba->link_state = LPFC_HBA_ERROR;
5452 }
5453 }
5454}
5455
5456
5457void
5458lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
5459{
5460 LPFC_MBOXQ_t *regvpimbox;
5461
5462 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5463 if (regvpimbox) {
5464 lpfc_reg_vpi(vport, regvpimbox);
5465 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
5466 regvpimbox->vport = vport;
5467 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
5468 == MBX_NOT_FINISHED) {
5469 mempool_free(regvpimbox, phba->mbox_mem_pool);
5470 }
5471 }
5472}
5473
5474
5475void
5476lpfc_disc_start(struct lpfc_vport *vport)
5477{
5478 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5479 struct lpfc_hba *phba = vport->phba;
5480 uint32_t num_sent;
5481 uint32_t clear_la_pending;
5482
5483 if (!lpfc_is_link_up(phba)) {
5484 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
5485 "3315 Link is not up %x\n",
5486 phba->link_state);
5487 return;
5488 }
5489
5490 if (phba->link_state == LPFC_CLEAR_LA)
5491 clear_la_pending = 1;
5492 else
5493 clear_la_pending = 0;
5494
5495 if (vport->port_state < LPFC_VPORT_READY)
5496 vport->port_state = LPFC_DISC_AUTH;
5497
5498 lpfc_set_disctmo(vport);
5499
5500 vport->fc_prevDID = vport->fc_myDID;
5501 vport->num_disc_nodes = 0;
5502
5503
5504 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5505 "0202 Start Discovery port state x%x "
5506 "flg x%x Data: x%x x%x x%x\n",
5507 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
5508 vport->fc_adisc_cnt, vport->fc_npr_cnt);
5509
5510
5511 num_sent = lpfc_els_disc_adisc(vport);
5512
5513 if (num_sent)
5514 return;
5515
5516
5517 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5518 !(vport->fc_flag & FC_PT2PT) &&
5519 !(vport->fc_flag & FC_RSCN_MODE) &&
5520 (phba->sli_rev < LPFC_SLI_REV4)) {
5521 lpfc_issue_clear_la(phba, vport);
5522 lpfc_issue_reg_vpi(phba, vport);
5523 return;
5524 }
5525
5526
5527
5528
5529
5530 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
5531
5532 lpfc_issue_clear_la(phba, vport);
5533
5534 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
5535 vport->num_disc_nodes = 0;
5536
5537 if (vport->fc_npr_cnt)
5538 lpfc_els_disc_plogi(vport);
5539
5540 if (!vport->num_disc_nodes) {
5541 spin_lock_irq(shost->host_lock);
5542 vport->fc_flag &= ~FC_NDISC_ACTIVE;
5543 spin_unlock_irq(shost->host_lock);
5544 lpfc_can_disctmo(vport);
5545 }
5546 }
5547 vport->port_state = LPFC_VPORT_READY;
5548 } else {
5549
5550 num_sent = lpfc_els_disc_plogi(vport);
5551
5552 if (num_sent)
5553 return;
5554
5555 if (vport->fc_flag & FC_RSCN_MODE) {
5556
5557
5558
5559 if ((vport->fc_rscn_id_cnt == 0) &&
5560 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
5561 spin_lock_irq(shost->host_lock);
5562 vport->fc_flag &= ~FC_RSCN_MODE;
5563 spin_unlock_irq(shost->host_lock);
5564 lpfc_can_disctmo(vport);
5565 } else
5566 lpfc_els_handle_rscn(vport);
5567 }
5568 }
5569 return;
5570}
5571
5572
5573
5574
5575
5576static void
5577lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5578{
5579 LIST_HEAD(completions);
5580 IOCB_t *icmd;
5581 struct lpfc_iocbq *iocb, *next_iocb;
5582 struct lpfc_sli_ring *pring;
5583
5584 pring = lpfc_phba_elsring(phba);
5585 if (unlikely(!pring))
5586 return;
5587
5588
5589
5590
5591 spin_lock_irq(&phba->hbalock);
5592 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5593 if (iocb->context1 != ndlp) {
5594 continue;
5595 }
5596 icmd = &iocb->iocb;
5597 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
5598 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5599
5600 list_move_tail(&iocb->list, &completions);
5601 }
5602 }
5603
5604
5605 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
5606 if (iocb->context1 != ndlp) {
5607 continue;
5608 }
5609 icmd = &iocb->iocb;
5610 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
5611 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
5612 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
5613 }
5614 }
5615 spin_unlock_irq(&phba->hbalock);
5616
5617
5618 lpfc_issue_hb_tmo(phba);
5619
5620
5621 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5622 IOERR_SLI_ABORTED);
5623}
5624
5625static void
5626lpfc_disc_flush_list(struct lpfc_vport *vport)
5627{
5628 struct lpfc_nodelist *ndlp, *next_ndlp;
5629 struct lpfc_hba *phba = vport->phba;
5630
5631 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
5632 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5633 nlp_listp) {
5634 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5635 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
5636 lpfc_free_tx(phba, ndlp);
5637 }
5638 }
5639 }
5640}
5641
5642void
5643lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
5644{
5645 lpfc_els_flush_rscn(vport);
5646 lpfc_els_flush_cmd(vport);
5647 lpfc_disc_flush_list(vport);
5648}
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665void
5666lpfc_disc_timeout(struct timer_list *t)
5667{
5668 struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
5669 struct lpfc_hba *phba = vport->phba;
5670 uint32_t tmo_posted;
5671 unsigned long flags = 0;
5672
5673 if (unlikely(!phba))
5674 return;
5675
5676 spin_lock_irqsave(&vport->work_port_lock, flags);
5677 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
5678 if (!tmo_posted)
5679 vport->work_port_events |= WORKER_DISC_TMO;
5680 spin_unlock_irqrestore(&vport->work_port_lock, flags);
5681
5682 if (!tmo_posted)
5683 lpfc_worker_wake_up(phba);
5684 return;
5685}
5686
5687static void
5688lpfc_disc_timeout_handler(struct lpfc_vport *vport)
5689{
5690 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5691 struct lpfc_hba *phba = vport->phba;
5692 struct lpfc_sli *psli = &phba->sli;
5693 struct lpfc_nodelist *ndlp, *next_ndlp;
5694 LPFC_MBOXQ_t *initlinkmbox;
5695 int rc, clrlaerr = 0;
5696
5697 if (!(vport->fc_flag & FC_DISC_TMO))
5698 return;
5699
5700 spin_lock_irq(shost->host_lock);
5701 vport->fc_flag &= ~FC_DISC_TMO;
5702 spin_unlock_irq(shost->host_lock);
5703
5704 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5705 "disc timeout: state:x%x rtry:x%x flg:x%x",
5706 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5707
5708 switch (vport->port_state) {
5709
5710 case LPFC_LOCAL_CFG_LINK:
5711
5712
5713
5714
5715 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
5716 "0221 FAN timeout\n");
5717
5718
5719 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5720 nlp_listp) {
5721 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
5722 continue;
5723 if (ndlp->nlp_type & NLP_FABRIC) {
5724
5725 lpfc_drop_node(vport, ndlp);
5726
5727 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
5728
5729
5730
5731 lpfc_unreg_rpi(vport, ndlp);
5732 }
5733 }
5734 if (vport->port_state != LPFC_FLOGI) {
5735 if (phba->sli_rev <= LPFC_SLI_REV3)
5736 lpfc_initial_flogi(vport);
5737 else
5738 lpfc_issue_init_vfi(vport);
5739 return;
5740 }
5741 break;
5742
5743 case LPFC_FDISC:
5744 case LPFC_FLOGI:
5745
5746
5747 lpfc_printf_vlog(vport, KERN_ERR,
5748 LOG_TRACE_EVENT,
5749 "0222 Initial %s timeout\n",
5750 vport->vpi ? "FDISC" : "FLOGI");
5751
5752
5753
5754
5755
5756
5757 lpfc_disc_list_loopmap(vport);
5758
5759
5760 lpfc_disc_start(vport);
5761 break;
5762
5763 case LPFC_FABRIC_CFG_LINK:
5764
5765
5766 lpfc_printf_vlog(vport, KERN_ERR,
5767 LOG_TRACE_EVENT,
5768 "0223 Timeout while waiting for "
5769 "NameServer login\n");
5770
5771 ndlp = lpfc_findnode_did(vport, NameServer_DID);
5772 if (ndlp)
5773 lpfc_els_abort(phba, ndlp);
5774
5775
5776 goto restart_disc;
5777
5778 case LPFC_NS_QRY:
5779
5780 lpfc_printf_vlog(vport, KERN_ERR,
5781 LOG_TRACE_EVENT,
5782 "0224 NameServer Query timeout "
5783 "Data: x%x x%x\n",
5784 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5785
5786 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
5787
5788 vport->fc_ns_retry++;
5789 vport->gidft_inp = 0;
5790 rc = lpfc_issue_gidft(vport);
5791 if (rc == 0)
5792 break;
5793 }
5794 vport->fc_ns_retry = 0;
5795
5796restart_disc:
5797
5798
5799
5800
5801
5802 if (phba->sli_rev < LPFC_SLI_REV4) {
5803 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5804 lpfc_issue_reg_vpi(phba, vport);
5805 else {
5806 lpfc_issue_clear_la(phba, vport);
5807 vport->port_state = LPFC_VPORT_READY;
5808 }
5809 }
5810
5811
5812 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5813 if (!initlinkmbox) {
5814 lpfc_printf_vlog(vport, KERN_ERR,
5815 LOG_TRACE_EVENT,
5816 "0206 Device Discovery "
5817 "completion error\n");
5818 phba->link_state = LPFC_HBA_ERROR;
5819 break;
5820 }
5821
5822 lpfc_linkdown(phba);
5823 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
5824 phba->cfg_link_speed);
5825 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
5826 initlinkmbox->vport = vport;
5827 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5828 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
5829 lpfc_set_loopback_flag(phba);
5830 if (rc == MBX_NOT_FINISHED)
5831 mempool_free(initlinkmbox, phba->mbox_mem_pool);
5832
5833 break;
5834
5835 case LPFC_DISC_AUTH:
5836
5837 lpfc_printf_vlog(vport, KERN_ERR,
5838 LOG_TRACE_EVENT,
5839 "0227 Node Authentication timeout\n");
5840 lpfc_disc_flush_list(vport);
5841
5842
5843
5844
5845
5846 if (phba->sli_rev < LPFC_SLI_REV4) {
5847 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5848 lpfc_issue_reg_vpi(phba, vport);
5849 else {
5850 lpfc_issue_clear_la(phba, vport);
5851 vport->port_state = LPFC_VPORT_READY;
5852 }
5853 }
5854 break;
5855
5856 case LPFC_VPORT_READY:
5857 if (vport->fc_flag & FC_RSCN_MODE) {
5858 lpfc_printf_vlog(vport, KERN_ERR,
5859 LOG_TRACE_EVENT,
5860 "0231 RSCN timeout Data: x%x "
5861 "x%x\n",
5862 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5863
5864
5865 lpfc_els_flush_cmd(vport);
5866
5867 lpfc_els_flush_rscn(vport);
5868 lpfc_disc_flush_list(vport);
5869 }
5870 break;
5871
5872 default:
5873 lpfc_printf_vlog(vport, KERN_ERR,
5874 LOG_TRACE_EVENT,
5875 "0273 Unexpected discovery timeout, "
5876 "vport State x%x\n", vport->port_state);
5877 break;
5878 }
5879
5880 switch (phba->link_state) {
5881 case LPFC_CLEAR_LA:
5882
5883 lpfc_printf_vlog(vport, KERN_ERR,
5884 LOG_TRACE_EVENT,
5885 "0228 CLEAR LA timeout\n");
5886 clrlaerr = 1;
5887 break;
5888
5889 case LPFC_LINK_UP:
5890 lpfc_issue_clear_la(phba, vport);
5891 fallthrough;
5892 case LPFC_LINK_UNKNOWN:
5893 case LPFC_WARM_START:
5894 case LPFC_INIT_START:
5895 case LPFC_INIT_MBX_CMDS:
5896 case LPFC_LINK_DOWN:
5897 case LPFC_HBA_ERROR:
5898 lpfc_printf_vlog(vport, KERN_ERR,
5899 LOG_TRACE_EVENT,
5900 "0230 Unexpected timeout, hba link "
5901 "state x%x\n", phba->link_state);
5902 clrlaerr = 1;
5903 break;
5904
5905 case LPFC_HBA_READY:
5906 break;
5907 }
5908
5909 if (clrlaerr) {
5910 lpfc_disc_flush_list(vport);
5911 if (phba->sli_rev != LPFC_SLI_REV4) {
5912 psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
5913 ~LPFC_STOP_IOCB_EVENT;
5914 psli->sli3_ring[LPFC_FCP_RING].flag &=
5915 ~LPFC_STOP_IOCB_EVENT;
5916 }
5917 vport->port_state = LPFC_VPORT_READY;
5918 }
5919 return;
5920}
5921
5922
5923
5924
5925
5926
5927
5928void
5929lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5930{
5931 MAILBOX_t *mb = &pmb->u.mb;
5932 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
5933 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
5934 struct lpfc_vport *vport = pmb->vport;
5935
5936 pmb->ctx_buf = NULL;
5937 pmb->ctx_ndlp = NULL;
5938
5939 if (phba->sli_rev < LPFC_SLI_REV4)
5940 ndlp->nlp_rpi = mb->un.varWords[0];
5941 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
5942 ndlp->nlp_type |= NLP_FABRIC;
5943 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
5944 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
5945 "0004 rpi:%x DID:%x flg:%x %d x%px\n",
5946 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
5947 kref_read(&ndlp->kref),
5948 ndlp);
5949
5950
5951
5952
5953
5954
5955 if (vport->port_type == LPFC_PHYSICAL_PORT) {
5956 phba->link_flag &= ~LS_CT_VEN_RPA;
5957 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
5958 } else {
5959 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
5960 }
5961
5962
5963
5964
5965
5966 lpfc_nlp_put(ndlp);
5967 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5968 kfree(mp);
5969 mempool_free(pmb, phba->mbox_mem_pool);
5970
5971 return;
5972}
5973
5974static int
5975lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
5976{
5977 uint16_t *rpi = param;
5978
5979 return ndlp->nlp_rpi == *rpi;
5980}
5981
5982static int
5983lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
5984{
5985 return memcmp(&ndlp->nlp_portname, param,
5986 sizeof(ndlp->nlp_portname)) == 0;
5987}
5988
5989static struct lpfc_nodelist *
5990__lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
5991{
5992 struct lpfc_nodelist *ndlp;
5993
5994 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5995 if (filter(ndlp, param)) {
5996 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5997 "3185 FIND node filter %ps DID "
5998 "ndlp x%px did x%x flg x%x st x%x "
5999 "xri x%x type x%x rpi x%x\n",
6000 filter, ndlp, ndlp->nlp_DID,
6001 ndlp->nlp_flag, ndlp->nlp_state,
6002 ndlp->nlp_xri, ndlp->nlp_type,
6003 ndlp->nlp_rpi);
6004 return ndlp;
6005 }
6006 }
6007 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6008 "3186 FIND node filter %ps NOT FOUND.\n", filter);
6009 return NULL;
6010}
6011
6012
6013
6014
6015
6016struct lpfc_nodelist *
6017__lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6018{
6019 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
6020}
6021
6022
6023
6024
6025
6026struct lpfc_nodelist *
6027lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
6028{
6029 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6030 struct lpfc_nodelist *ndlp;
6031
6032 spin_lock_irq(shost->host_lock);
6033 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
6034 spin_unlock_irq(shost->host_lock);
6035 return ndlp;
6036}
6037
6038
6039
6040
6041
6042
6043struct lpfc_nodelist *
6044lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
6045{
6046 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6047 struct lpfc_nodelist *ndlp;
6048 unsigned long flags;
6049
6050 spin_lock_irqsave(shost->host_lock, flags);
6051 ndlp = __lpfc_findnode_rpi(vport, rpi);
6052 spin_unlock_irqrestore(shost->host_lock, flags);
6053 return ndlp;
6054}
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069struct lpfc_vport *
6070lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
6071{
6072 struct lpfc_vport *vport;
6073 unsigned long flags;
6074 int i = 0;
6075
6076
6077 if (vpi > 0) {
6078
6079
6080
6081
6082 for (i = 0; i <= phba->max_vpi; i++) {
6083 if (vpi == phba->vpi_ids[i])
6084 break;
6085 }
6086
6087 if (i > phba->max_vpi) {
6088 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6089 "2936 Could not find Vport mapped "
6090 "to vpi %d\n", vpi);
6091 return NULL;
6092 }
6093 }
6094
6095 spin_lock_irqsave(&phba->port_list_lock, flags);
6096 list_for_each_entry(vport, &phba->port_list, listentry) {
6097 if (vport->vpi == i) {
6098 spin_unlock_irqrestore(&phba->port_list_lock, flags);
6099 return vport;
6100 }
6101 }
6102 spin_unlock_irqrestore(&phba->port_list_lock, flags);
6103 return NULL;
6104}
6105
6106struct lpfc_nodelist *
6107lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
6108{
6109 struct lpfc_nodelist *ndlp;
6110 int rpi = LPFC_RPI_ALLOC_ERROR;
6111
6112 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6113 rpi = lpfc_sli4_alloc_rpi(vport->phba);
6114 if (rpi == LPFC_RPI_ALLOC_ERROR)
6115 return NULL;
6116 }
6117
6118 ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
6119 if (!ndlp) {
6120 if (vport->phba->sli_rev == LPFC_SLI_REV4)
6121 lpfc_sli4_free_rpi(vport->phba, rpi);
6122 return NULL;
6123 }
6124
6125 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
6126
6127 spin_lock_init(&ndlp->lock);
6128
6129 lpfc_initialize_node(vport, ndlp, did);
6130 INIT_LIST_HEAD(&ndlp->nlp_listp);
6131 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
6132 ndlp->nlp_rpi = rpi;
6133 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
6134 "0007 Init New ndlp x%px, rpi:x%x DID:%x "
6135 "flg:x%x refcnt:%d\n",
6136 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
6137 ndlp->nlp_flag, kref_read(&ndlp->kref));
6138
6139 ndlp->active_rrqs_xri_bitmap =
6140 mempool_alloc(vport->phba->active_rrq_pool,
6141 GFP_KERNEL);
6142 if (ndlp->active_rrqs_xri_bitmap)
6143 memset(ndlp->active_rrqs_xri_bitmap, 0,
6144 ndlp->phba->cfg_rrq_xri_bitmap_sz);
6145 }
6146
6147
6148
6149 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
6150 "node init: did:x%x",
6151 ndlp->nlp_DID, 0, 0);
6152
6153 return ndlp;
6154}
6155
6156
6157
6158
6159static void
6160lpfc_nlp_release(struct kref *kref)
6161{
6162 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
6163 kref);
6164 struct lpfc_vport *vport = ndlp->vport;
6165
6166 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6167 "node release: did:x%x flg:x%x type:x%x",
6168 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
6169
6170 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
6171 "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n",
6172 __func__, ndlp, ndlp->nlp_DID,
6173 kref_read(&ndlp->kref), ndlp->nlp_rpi);
6174
6175
6176 lpfc_cancel_retry_delay_tmo(vport, ndlp);
6177 lpfc_cleanup_node(vport, ndlp);
6178
6179
6180
6181
6182 ndlp->vport = NULL;
6183 ndlp->nlp_state = NLP_STE_FREED_NODE;
6184 ndlp->nlp_flag = 0;
6185 ndlp->fc4_xpt_flags = 0;
6186
6187
6188 kfree(ndlp->lat_data);
6189 if (ndlp->phba->sli_rev == LPFC_SLI_REV4)
6190 mempool_free(ndlp->active_rrqs_xri_bitmap,
6191 ndlp->phba->active_rrq_pool);
6192 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
6193}
6194
6195
6196
6197
6198
6199struct lpfc_nodelist *
6200lpfc_nlp_get(struct lpfc_nodelist *ndlp)
6201{
6202 unsigned long flags;
6203
6204 if (ndlp) {
6205 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6206 "node get: did:x%x flg:x%x refcnt:x%x",
6207 ndlp->nlp_DID, ndlp->nlp_flag,
6208 kref_read(&ndlp->kref));
6209
6210
6211
6212
6213
6214 spin_lock_irqsave(&ndlp->lock, flags);
6215 if (!kref_get_unless_zero(&ndlp->kref)) {
6216 spin_unlock_irqrestore(&ndlp->lock, flags);
6217 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
6218 "0276 %s: ndlp:x%px refcnt:%d\n",
6219 __func__, (void *)ndlp, kref_read(&ndlp->kref));
6220 return NULL;
6221 }
6222 spin_unlock_irqrestore(&ndlp->lock, flags);
6223 } else {
6224 WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__);
6225 }
6226
6227 return ndlp;
6228}
6229
6230
6231
6232
6233int
6234lpfc_nlp_put(struct lpfc_nodelist *ndlp)
6235{
6236 if (ndlp) {
6237 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6238 "node put: did:x%x flg:x%x refcnt:x%x",
6239 ndlp->nlp_DID, ndlp->nlp_flag,
6240 kref_read(&ndlp->kref));
6241 } else {
6242 WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__);
6243 }
6244
6245 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0;
6246}
6247
6248
6249
6250
6251
6252
6253int
6254lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
6255{
6256 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
6257 "node not used: did:x%x flg:x%x refcnt:x%x",
6258 ndlp->nlp_DID, ndlp->nlp_flag,
6259 kref_read(&ndlp->kref));
6260 if (kref_read(&ndlp->kref) == 1)
6261 if (lpfc_nlp_put(ndlp))
6262 return 1;
6263 return 0;
6264}
6265
6266
6267
6268
6269
6270
6271
6272
6273
6274
6275
6276static int
6277lpfc_fcf_inuse(struct lpfc_hba *phba)
6278{
6279 struct lpfc_vport **vports;
6280 int i, ret = 0;
6281 struct lpfc_nodelist *ndlp;
6282 struct Scsi_Host *shost;
6283
6284 vports = lpfc_create_vport_work_array(phba);
6285
6286
6287 if (!vports)
6288 return 1;
6289
6290 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6291 shost = lpfc_shost_from_vport(vports[i]);
6292 spin_lock_irq(shost->host_lock);
6293
6294
6295
6296
6297
6298
6299 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
6300 spin_unlock_irq(shost->host_lock);
6301 ret = 1;
6302 goto out;
6303 }
6304 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
6305 if (ndlp->rport &&
6306 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
6307 ret = 1;
6308 spin_unlock_irq(shost->host_lock);
6309 goto out;
6310 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
6311 ret = 1;
6312 lpfc_printf_log(phba, KERN_INFO,
6313 LOG_NODE | LOG_DISCOVERY,
6314 "2624 RPI %x DID %x flag %x "
6315 "still logged in\n",
6316 ndlp->nlp_rpi, ndlp->nlp_DID,
6317 ndlp->nlp_flag);
6318 }
6319 }
6320 spin_unlock_irq(shost->host_lock);
6321 }
6322out:
6323 lpfc_destroy_vport_work_array(phba, vports);
6324 return ret;
6325}
6326
6327
6328
6329
6330
6331
6332
6333
6334void
6335lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6336{
6337 struct lpfc_vport *vport = mboxq->vport;
6338 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6339
6340 if (mboxq->u.mb.mbxStatus) {
6341 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6342 "2555 UNREG_VFI mbxStatus error x%x "
6343 "HBA state x%x\n",
6344 mboxq->u.mb.mbxStatus, vport->port_state);
6345 }
6346 spin_lock_irq(shost->host_lock);
6347 phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
6348 spin_unlock_irq(shost->host_lock);
6349 mempool_free(mboxq, phba->mbox_mem_pool);
6350 return;
6351}
6352
6353
6354
6355
6356
6357
6358
6359
6360static void
6361lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6362{
6363 struct lpfc_vport *vport = mboxq->vport;
6364
6365 if (mboxq->u.mb.mbxStatus) {
6366 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6367 "2550 UNREG_FCFI mbxStatus error x%x "
6368 "HBA state x%x\n",
6369 mboxq->u.mb.mbxStatus, vport->port_state);
6370 }
6371 mempool_free(mboxq, phba->mbox_mem_pool);
6372 return;
6373}
6374
6375
6376
6377
6378
6379
6380
6381
6382
6383int
6384lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
6385{
6386 struct lpfc_vport **vports;
6387 struct lpfc_nodelist *ndlp;
6388 struct Scsi_Host *shost;
6389 int i = 0, rc;
6390
6391
6392 if (lpfc_fcf_inuse(phba))
6393 lpfc_unreg_hba_rpis(phba);
6394
6395
6396 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
6397
6398
6399 vports = lpfc_create_vport_work_array(phba);
6400 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
6401 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6402
6403 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6404 if (ndlp)
6405 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6406 lpfc_cleanup_pending_mbox(vports[i]);
6407 if (phba->sli_rev == LPFC_SLI_REV4)
6408 lpfc_sli4_unreg_all_rpis(vports[i]);
6409 lpfc_mbx_unreg_vpi(vports[i]);
6410 shost = lpfc_shost_from_vport(vports[i]);
6411 spin_lock_irq(shost->host_lock);
6412 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6413 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
6414 spin_unlock_irq(shost->host_lock);
6415 }
6416 lpfc_destroy_vport_work_array(phba, vports);
6417 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
6418 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6419 if (ndlp)
6420 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
6421 lpfc_cleanup_pending_mbox(phba->pport);
6422 if (phba->sli_rev == LPFC_SLI_REV4)
6423 lpfc_sli4_unreg_all_rpis(phba->pport);
6424 lpfc_mbx_unreg_vpi(phba->pport);
6425 shost = lpfc_shost_from_vport(phba->pport);
6426 spin_lock_irq(shost->host_lock);
6427 phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6428 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
6429 spin_unlock_irq(shost->host_lock);
6430 }
6431
6432
6433 lpfc_els_flush_all_cmd(phba);
6434
6435
6436 rc = lpfc_issue_unreg_vfi(phba->pport);
6437 return rc;
6438}
6439
6440
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450int
6451lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
6452{
6453 LPFC_MBOXQ_t *mbox;
6454 int rc;
6455
6456 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6457 if (!mbox) {
6458 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6459 "2551 UNREG_FCFI mbox allocation failed"
6460 "HBA state x%x\n", phba->pport->port_state);
6461 return -ENOMEM;
6462 }
6463 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
6464 mbox->vport = phba->pport;
6465 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
6466 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6467
6468 if (rc == MBX_NOT_FINISHED) {
6469 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6470 "2552 Unregister FCFI command failed rc x%x "
6471 "HBA state x%x\n",
6472 rc, phba->pport->port_state);
6473 return -EINVAL;
6474 }
6475 return 0;
6476}
6477
6478
6479
6480
6481
6482
6483
6484
6485void
6486lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
6487{
6488 int rc;
6489
6490
6491 rc = lpfc_unregister_fcf_prep(phba);
6492 if (rc) {
6493 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6494 "2748 Failed to prepare for unregistering "
6495 "HBA's FCF record: rc=%d\n", rc);
6496 return;
6497 }
6498
6499
6500 rc = lpfc_sli4_unregister_fcf(phba);
6501 if (rc)
6502 return;
6503
6504 phba->fcf.fcf_flag = 0;
6505 phba->fcf.current_rec.flag = 0;
6506
6507
6508
6509
6510
6511 if ((phba->pport->load_flag & FC_UNLOADING) ||
6512 (phba->link_state < LPFC_LINK_UP))
6513 return;
6514
6515
6516 spin_lock_irq(&phba->hbalock);
6517 phba->fcf.fcf_flag |= FCF_INIT_DISC;
6518 spin_unlock_irq(&phba->hbalock);
6519
6520
6521 lpfc_sli4_clear_fcf_rr_bmask(phba);
6522
6523 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
6524
6525 if (rc) {
6526 spin_lock_irq(&phba->hbalock);
6527 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
6528 spin_unlock_irq(&phba->hbalock);
6529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6530 "2553 lpfc_unregister_unused_fcf failed "
6531 "to read FCF record HBA state x%x\n",
6532 phba->pport->port_state);
6533 }
6534}
6535
6536
6537
6538
6539
6540
6541
6542
6543void
6544lpfc_unregister_fcf(struct lpfc_hba *phba)
6545{
6546 int rc;
6547
6548
6549 rc = lpfc_unregister_fcf_prep(phba);
6550 if (rc) {
6551 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6552 "2749 Failed to prepare for unregistering "
6553 "HBA's FCF record: rc=%d\n", rc);
6554 return;
6555 }
6556
6557
6558 rc = lpfc_sli4_unregister_fcf(phba);
6559 if (rc)
6560 return;
6561
6562 spin_lock_irq(&phba->hbalock);
6563 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
6564 spin_unlock_irq(&phba->hbalock);
6565}
6566
6567
6568
6569
6570
6571
6572
6573
6574
6575void
6576lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
6577{
6578
6579
6580
6581
6582
6583 spin_lock_irq(&phba->hbalock);
6584 if (!(phba->hba_flag & HBA_FCOE_MODE) ||
6585 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
6586 !(phba->hba_flag & HBA_FIP_SUPPORT) ||
6587 (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
6588 (phba->pport->port_state == LPFC_FLOGI)) {
6589 spin_unlock_irq(&phba->hbalock);
6590 return;
6591 }
6592 spin_unlock_irq(&phba->hbalock);
6593
6594 if (lpfc_fcf_inuse(phba))
6595 return;
6596
6597 lpfc_unregister_fcf_rescan(phba);
6598}
6599
6600
6601
6602
6603
6604
6605
6606
6607
6608static void
6609lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
6610 uint8_t *buff)
6611{
6612 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6613 struct lpfc_fcf_conn_hdr *conn_hdr;
6614 struct lpfc_fcf_conn_rec *conn_rec;
6615 uint32_t record_count;
6616 int i;
6617
6618
6619 list_for_each_entry_safe(conn_entry, next_conn_entry,
6620 &phba->fcf_conn_rec_list, list) {
6621 list_del_init(&conn_entry->list);
6622 kfree(conn_entry);
6623 }
6624
6625 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
6626 record_count = conn_hdr->length * sizeof(uint32_t)/
6627 sizeof(struct lpfc_fcf_conn_rec);
6628
6629 conn_rec = (struct lpfc_fcf_conn_rec *)
6630 (buff + sizeof(struct lpfc_fcf_conn_hdr));
6631
6632 for (i = 0; i < record_count; i++) {
6633 if (!(conn_rec[i].flags & FCFCNCT_VALID))
6634 continue;
6635 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
6636 GFP_KERNEL);
6637 if (!conn_entry) {
6638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6639 "2566 Failed to allocate connection"
6640 " table entry\n");
6641 return;
6642 }
6643
6644 memcpy(&conn_entry->conn_rec, &conn_rec[i],
6645 sizeof(struct lpfc_fcf_conn_rec));
6646 list_add_tail(&conn_entry->list,
6647 &phba->fcf_conn_rec_list);
6648 }
6649
6650 if (!list_empty(&phba->fcf_conn_rec_list)) {
6651 i = 0;
6652 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
6653 list) {
6654 conn_rec = &conn_entry->conn_rec;
6655 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6656 "3345 FCF connection list rec[%02d]: "
6657 "flags:x%04x, vtag:x%04x, "
6658 "fabric_name:x%02x:%02x:%02x:%02x:"
6659 "%02x:%02x:%02x:%02x, "
6660 "switch_name:x%02x:%02x:%02x:%02x:"
6661 "%02x:%02x:%02x:%02x\n", i++,
6662 conn_rec->flags, conn_rec->vlan_tag,
6663 conn_rec->fabric_name[0],
6664 conn_rec->fabric_name[1],
6665 conn_rec->fabric_name[2],
6666 conn_rec->fabric_name[3],
6667 conn_rec->fabric_name[4],
6668 conn_rec->fabric_name[5],
6669 conn_rec->fabric_name[6],
6670 conn_rec->fabric_name[7],
6671 conn_rec->switch_name[0],
6672 conn_rec->switch_name[1],
6673 conn_rec->switch_name[2],
6674 conn_rec->switch_name[3],
6675 conn_rec->switch_name[4],
6676 conn_rec->switch_name[5],
6677 conn_rec->switch_name[6],
6678 conn_rec->switch_name[7]);
6679 }
6680 }
6681}
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691static void
6692lpfc_read_fcoe_param(struct lpfc_hba *phba,
6693 uint8_t *buff)
6694{
6695 struct lpfc_fip_param_hdr *fcoe_param_hdr;
6696 struct lpfc_fcoe_params *fcoe_param;
6697
6698 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
6699 buff;
6700 fcoe_param = (struct lpfc_fcoe_params *)
6701 (buff + sizeof(struct lpfc_fip_param_hdr));
6702
6703 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
6704 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
6705 return;
6706
6707 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
6708 phba->valid_vlan = 1;
6709 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
6710 0xFFF;
6711 }
6712
6713 phba->fc_map[0] = fcoe_param->fc_map[0];
6714 phba->fc_map[1] = fcoe_param->fc_map[1];
6715 phba->fc_map[2] = fcoe_param->fc_map[2];
6716 return;
6717}
6718
6719
6720
6721
6722
6723
6724
6725
6726
6727
6728
6729static uint8_t *
6730lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
6731{
6732 uint32_t offset = 0, rec_length;
6733
6734 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
6735 (size < sizeof(uint32_t)))
6736 return NULL;
6737
6738 rec_length = buff[offset + 1];
6739
6740
6741
6742
6743
6744 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
6745 <= size) {
6746 if (buff[offset] == rec_type)
6747 return &buff[offset];
6748
6749 if (buff[offset] == LPFC_REGION23_LAST_REC)
6750 return NULL;
6751
6752 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
6753 rec_length = buff[offset + 1];
6754 }
6755 return NULL;
6756}
6757
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767void
6768lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
6769 uint8_t *buff,
6770 uint32_t size)
6771{
6772 uint32_t offset = 0;
6773 uint8_t *rec_ptr;
6774
6775
6776
6777
6778
6779 if (size < 2*sizeof(uint32_t))
6780 return;
6781
6782
6783 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
6784 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6785 "2567 Config region 23 has bad signature\n");
6786 return;
6787 }
6788
6789 offset += 4;
6790
6791
6792 if (buff[offset] != LPFC_REGION23_VERSION) {
6793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6794 "2568 Config region 23 has bad version\n");
6795 return;
6796 }
6797 offset += 4;
6798
6799
6800 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6801 size - offset, FCOE_PARAM_TYPE);
6802 if (rec_ptr)
6803 lpfc_read_fcoe_param(phba, rec_ptr);
6804
6805
6806 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6807 size - offset, FCOE_CONN_TBL_TYPE);
6808 if (rec_ptr)
6809 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
6810
6811}
6812