1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_transport_fc.h>
29#include <scsi/scsi.h>
30#include <scsi/fc/fc_fs.h>
31
32#include "lpfc_hw4.h"
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_sli4.h"
36#include "lpfc_nl.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_compat.h"
43
44
45
46
47
48
49
50
51
52
53
54
55
56int
57lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
58 uint16_t offset)
59{
60 MAILBOX_t *mb;
61 struct lpfc_dmabuf *mp;
62
63 mb = &pmb->u.mb;
64
65
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.type = DMP_NV_PARAMS;
69 mb->un.varDmp.entry_index = offset;
70 mb->un.varDmp.region_id = DMP_REGION_VPORT;
71 mb->mbxOwner = OWN_HOST;
72
73
74 if (phba->sli_rev != LPFC_SLI_REV4) {
75 mb->un.varDmp.cv = 1;
76 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
77 return 0;
78 }
79
80
81 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
82 if (mp)
83 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
84
85 if (!mp || !mp->virt) {
86 kfree(mp);
87 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
88 "2605 lpfc_dump_static_vport: memory"
89 " allocation failed\n");
90 return 1;
91 }
92 memset(mp->virt, 0, LPFC_BPL_SIZE);
93 INIT_LIST_HEAD(&mp->list);
94
95 pmb->context1 = (uint8_t *)mp;
96 mb->un.varWords[3] = putPaddrLow(mp->phys);
97 mb->un.varWords[4] = putPaddrHigh(mp->phys);
98 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
99
100 return 0;
101}
102
103
104
105
106
107
108
109
110void
111lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
112{
113 MAILBOX_t *mb;
114 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
115 mb = &pmb->u.mb;
116 mb->mbxCommand = MBX_DOWN_LINK;
117 mb->mbxOwner = OWN_HOST;
118}
119
120
121
122
123
124
125
126
127
128
129
130
131
132void
133lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
134 uint16_t region_id)
135{
136 MAILBOX_t *mb;
137 void *ctx;
138
139 mb = &pmb->u.mb;
140 ctx = pmb->context2;
141
142
143 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
144 mb->mbxCommand = MBX_DUMP_MEMORY;
145 mb->un.varDmp.cv = 1;
146 mb->un.varDmp.type = DMP_NV_PARAMS;
147 mb->un.varDmp.entry_index = offset;
148 mb->un.varDmp.region_id = region_id;
149 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
150 mb->un.varDmp.co = 0;
151 mb->un.varDmp.resp_offset = 0;
152 pmb->context2 = ctx;
153 mb->mbxOwner = OWN_HOST;
154 return;
155}
156
157
158
159
160
161
162
163
164
165void
166lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
167{
168 MAILBOX_t *mb;
169 void *ctx;
170
171 mb = &pmb->u.mb;
172
173 ctx = pmb->context2;
174
175
176 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
177 mb->mbxCommand = MBX_DUMP_MEMORY;
178 mb->mbxOwner = OWN_HOST;
179 mb->un.varDmp.cv = 1;
180 mb->un.varDmp.type = DMP_NV_PARAMS;
181 if (phba->sli_rev < LPFC_SLI_REV4)
182 mb->un.varDmp.entry_index = 0;
183 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
184 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
185 mb->un.varDmp.co = 0;
186 mb->un.varDmp.resp_offset = 0;
187 pmb->context2 = ctx;
188 return;
189}
190
191
192
193
194
195
196
197
198
199
200
201
202void
203lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
204{
205 MAILBOX_t *mb;
206
207 mb = &pmb->u.mb;
208 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
209 mb->mbxCommand = MBX_READ_NV;
210 mb->mbxOwner = OWN_HOST;
211 return;
212}
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227void
228lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
229 uint32_t ring)
230{
231 MAILBOX_t *mb;
232
233 mb = &pmb->u.mb;
234 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
235 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
236 mb->un.varCfgAsyncEvent.ring = ring;
237 mb->mbxOwner = OWN_HOST;
238 return;
239}
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254void
255lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
256{
257 MAILBOX_t *mb;
258
259 mb = &pmb->u.mb;
260 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
261 mb->mbxCommand = MBX_HEARTBEAT;
262 mb->mbxOwner = OWN_HOST;
263 return;
264}
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287int
288lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
289 struct lpfc_dmabuf *mp)
290{
291 MAILBOX_t *mb;
292
293 mb = &pmb->u.mb;
294 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
295
296 INIT_LIST_HEAD(&mp->list);
297 mb->mbxCommand = MBX_READ_TOPOLOGY;
298 mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
299 mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
300 mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
301
302
303
304
305 pmb->context1 = (uint8_t *)mp;
306 mb->mbxOwner = OWN_HOST;
307 return (0);
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325void
326lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
327{
328 MAILBOX_t *mb;
329
330 mb = &pmb->u.mb;
331 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
332
333 mb->un.varClearLA.eventTag = phba->fc_eventTag;
334 mb->mbxCommand = MBX_CLEAR_LA;
335 mb->mbxOwner = OWN_HOST;
336 return;
337}
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353void
354lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
355{
356 struct lpfc_vport *vport = phba->pport;
357 MAILBOX_t *mb = &pmb->u.mb;
358 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
359
360
361
362
363 if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) {
364 mb->un.varCfgLnk.cr = 1;
365 mb->un.varCfgLnk.ci = 1;
366 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
367 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
368 }
369
370 mb->un.varCfgLnk.myId = vport->fc_myDID;
371 mb->un.varCfgLnk.edtov = phba->fc_edtov;
372 mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
373 mb->un.varCfgLnk.ratov = phba->fc_ratov;
374 mb->un.varCfgLnk.rttov = phba->fc_rttov;
375 mb->un.varCfgLnk.altov = phba->fc_altov;
376 mb->un.varCfgLnk.crtov = phba->fc_crtov;
377 mb->un.varCfgLnk.citov = phba->fc_citov;
378
379 if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
380 mb->un.varCfgLnk.ack0_enable = 1;
381
382 mb->mbxCommand = MBX_CONFIG_LINK;
383 mb->mbxOwner = OWN_HOST;
384 return;
385}
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400int
401lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
402{
403 MAILBOX_t *mb = &pmb->u.mb;
404 uint32_t attentionConditions[2];
405
406
407 if (phba->cfg_use_msi != 2) {
408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
409 "0475 Not configured for supporting MSI-X "
410 "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
411 return -EINVAL;
412 }
413
414 if (phba->sli_rev < 3) {
415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
416 "0476 HBA not supporting SLI-3 or later "
417 "SLI Revision: 0x%x\n", phba->sli_rev);
418 return -EINVAL;
419 }
420
421
422 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
423
424
425
426
427
428
429 attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
430 HA_LATT | HA_MBATT);
431 attentionConditions[1] = 0;
432
433 mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
434 mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
435
436
437
438
439#ifdef __BIG_ENDIAN_BITFIELD
440
441 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
442
443 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
444#else
445
446 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
447
448 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
449#endif
450
451 mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
452 mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
453
454
455 mb->un.varCfgMSI.autoClearHA[0] = 0;
456 mb->un.varCfgMSI.autoClearHA[1] = 0;
457
458
459 mb->mbxCommand = MBX_CONFIG_MSI;
460 mb->mbxOwner = OWN_HOST;
461
462 return 0;
463}
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479void
480lpfc_init_link(struct lpfc_hba * phba,
481 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
482{
483 lpfc_vpd_t *vpd;
484 MAILBOX_t *mb;
485
486 mb = &pmb->u.mb;
487 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
488
489 switch (topology) {
490 case FLAGS_TOPOLOGY_MODE_LOOP_PT:
491 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
492 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
493 break;
494 case FLAGS_TOPOLOGY_MODE_PT_PT:
495 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
496 break;
497 case FLAGS_TOPOLOGY_MODE_LOOP:
498 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
499 break;
500 case FLAGS_TOPOLOGY_MODE_PT_LOOP:
501 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
502 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
503 break;
504 case FLAGS_LOCAL_LB:
505 mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
506 break;
507 }
508
509 if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
510 mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
511
512 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
513 phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
514 }
515
516
517 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
518
519
520
521
522 vpd = &phba->vpd;
523 if (vpd->rev.feaLevelHigh >= 0x02){
524 switch(linkspeed){
525 case LPFC_USER_LINK_SPEED_1G:
526 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
527 mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
528 break;
529 case LPFC_USER_LINK_SPEED_2G:
530 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
531 mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
532 break;
533 case LPFC_USER_LINK_SPEED_4G:
534 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
535 mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
536 break;
537 case LPFC_USER_LINK_SPEED_8G:
538 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
539 mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
540 break;
541 case LPFC_USER_LINK_SPEED_10G:
542 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
543 mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
544 break;
545 case LPFC_USER_LINK_SPEED_16G:
546 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
547 mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
548 break;
549 case LPFC_USER_LINK_SPEED_32G:
550 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
551 mb->un.varInitLnk.link_speed = LINK_SPEED_32G;
552 break;
553 case LPFC_USER_LINK_SPEED_AUTO:
554 default:
555 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
556 break;
557 }
558
559 }
560 else
561 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
562
563 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
564 mb->mbxOwner = OWN_HOST;
565 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
566 return;
567}
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590int
591lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
592{
593 struct lpfc_dmabuf *mp;
594 MAILBOX_t *mb;
595
596 mb = &pmb->u.mb;
597 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
598
599 mb->mbxOwner = OWN_HOST;
600
601
602
603 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
604 if (mp)
605 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
606 if (!mp || !mp->virt) {
607 kfree(mp);
608 mb->mbxCommand = MBX_READ_SPARM64;
609
610 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
611 "0301 READ_SPARAM: no buffers\n");
612 return (1);
613 }
614 INIT_LIST_HEAD(&mp->list);
615 mb->mbxCommand = MBX_READ_SPARM64;
616 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
617 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
618 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
619 if (phba->sli_rev >= LPFC_SLI_REV3)
620 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
621
622
623 pmb->context1 = mp;
624
625 return (0);
626}
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643void
644lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
645 LPFC_MBOXQ_t * pmb)
646{
647 MAILBOX_t *mb;
648
649 mb = &pmb->u.mb;
650 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
651
652 mb->un.varUnregDID.did = did;
653 mb->un.varUnregDID.vpi = vpi;
654 if ((vpi != 0xffff) &&
655 (phba->sli_rev == LPFC_SLI_REV4))
656 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
657
658 mb->mbxCommand = MBX_UNREG_D_ID;
659 mb->mbxOwner = OWN_HOST;
660 return;
661}
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676void
677lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
678{
679 MAILBOX_t *mb;
680
681 mb = &pmb->u.mb;
682 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
683
684 mb->mbxCommand = MBX_READ_CONFIG;
685 mb->mbxOwner = OWN_HOST;
686 return;
687}
688
689
690
691
692
693
694
695
696
697
698
699
700
701void
702lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
703{
704 MAILBOX_t *mb;
705
706 mb = &pmb->u.mb;
707 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
708
709 mb->mbxCommand = MBX_READ_LNK_STAT;
710 mb->mbxOwner = OWN_HOST;
711 return;
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738int
739lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
740 uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
741{
742 MAILBOX_t *mb = &pmb->u.mb;
743 uint8_t *sparam;
744 struct lpfc_dmabuf *mp;
745
746 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
747
748 mb->un.varRegLogin.rpi = 0;
749 if (phba->sli_rev == LPFC_SLI_REV4)
750 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
751 if (phba->sli_rev >= LPFC_SLI_REV3)
752 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
753 mb->un.varRegLogin.did = did;
754 mb->mbxOwner = OWN_HOST;
755
756 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
757 if (mp)
758 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
759 if (!mp || !mp->virt) {
760 kfree(mp);
761 mb->mbxCommand = MBX_REG_LOGIN64;
762
763 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
764 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
765 "rpi x%x\n", vpi, did, rpi);
766 return 1;
767 }
768 INIT_LIST_HEAD(&mp->list);
769 sparam = mp->virt;
770
771
772 memcpy(sparam, param, sizeof (struct serv_parm));
773
774
775 pmb->context1 = (uint8_t *) mp;
776
777 mb->mbxCommand = MBX_REG_LOGIN64;
778 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
779 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
780 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
781
782 return 0;
783}
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802void
803lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
804 LPFC_MBOXQ_t * pmb)
805{
806 MAILBOX_t *mb;
807
808 mb = &pmb->u.mb;
809 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
810
811 mb->un.varUnregLogin.rpi = rpi;
812 mb->un.varUnregLogin.rsvd1 = 0;
813 if (phba->sli_rev >= LPFC_SLI_REV3)
814 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
815
816 mb->mbxCommand = MBX_UNREG_LOGIN;
817 mb->mbxOwner = OWN_HOST;
818
819 return;
820}
821
822
823
824
825
826
827
828
829void
830lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
831{
832 struct lpfc_hba *phba = vport->phba;
833 LPFC_MBOXQ_t *mbox;
834 int rc;
835
836 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
837 if (mbox) {
838
839
840
841
842
843
844
845 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
846 mbox);
847 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
848 mbox->vport = vport;
849 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
850 mbox->context1 = NULL;
851 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
852 if (rc == MBX_NOT_FINISHED)
853 mempool_free(mbox, phba->mbox_mem_pool);
854 }
855}
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872void
873lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
874{
875 MAILBOX_t *mb = &pmb->u.mb;
876 struct lpfc_hba *phba = vport->phba;
877
878 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
879
880
881
882 if ((phba->sli_rev == LPFC_SLI_REV4) &&
883 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
884 mb->un.varRegVpi.upd = 1;
885
886 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
887 mb->un.varRegVpi.sid = vport->fc_myDID;
888 if (phba->sli_rev == LPFC_SLI_REV4)
889 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
890 else
891 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
892 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
893 sizeof(struct lpfc_name));
894 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
895 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
896
897 mb->mbxCommand = MBX_REG_VPI;
898 mb->mbxOwner = OWN_HOST;
899 return;
900
901}
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919void
920lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
921{
922 MAILBOX_t *mb = &pmb->u.mb;
923 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
924
925 if (phba->sli_rev == LPFC_SLI_REV3)
926 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
927 else if (phba->sli_rev >= LPFC_SLI_REV4)
928 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
929
930 mb->mbxCommand = MBX_UNREG_VPI;
931 mb->mbxOwner = OWN_HOST;
932 return;
933
934}
935
936
937
938
939
940
941
942
943static void
944lpfc_config_pcb_setup(struct lpfc_hba * phba)
945{
946 struct lpfc_sli *psli = &phba->sli;
947 struct lpfc_sli_ring *pring;
948 PCB_t *pcbp = phba->pcb;
949 dma_addr_t pdma_addr;
950 uint32_t offset;
951 uint32_t iocbCnt = 0;
952 int i;
953
954 pcbp->maxRing = (psli->num_rings - 1);
955
956 for (i = 0; i < psli->num_rings; i++) {
957 pring = &psli->ring[i];
958
959 pring->sli.sli3.sizeCiocb =
960 phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
961 SLI2_IOCB_CMD_SIZE;
962 pring->sli.sli3.sizeRiocb =
963 phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
964 SLI2_IOCB_RSP_SIZE;
965
966
967 if ((pring->sli.sli3.numCiocb == 0) ||
968 (pring->sli.sli3.numRiocb == 0)) {
969 pcbp->rdsc[i].cmdEntries = 0;
970 pcbp->rdsc[i].rspEntries = 0;
971 pcbp->rdsc[i].cmdAddrHigh = 0;
972 pcbp->rdsc[i].rspAddrHigh = 0;
973 pcbp->rdsc[i].cmdAddrLow = 0;
974 pcbp->rdsc[i].rspAddrLow = 0;
975 pring->sli.sli3.cmdringaddr = NULL;
976 pring->sli.sli3.rspringaddr = NULL;
977 continue;
978 }
979
980 pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
981 pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
982
983 offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
984 (uint8_t *) phba->slim2p.virt;
985 pdma_addr = phba->slim2p.phys + offset;
986 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
987 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
988 iocbCnt += pring->sli.sli3.numCiocb;
989
990
991 pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
992
993 pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
994 offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
995 (uint8_t *)phba->slim2p.virt;
996 pdma_addr = phba->slim2p.phys + offset;
997 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
998 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
999 iocbCnt += pring->sli.sli3.numRiocb;
1000 }
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017void
1018lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1019{
1020 MAILBOX_t *mb = &pmb->u.mb;
1021 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1022 mb->un.varRdRev.cv = 1;
1023 mb->un.varRdRev.v3req = 1;
1024 mb->mbxCommand = MBX_READ_REV;
1025 mb->mbxOwner = OWN_HOST;
1026 return;
1027}
1028
1029void
1030lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1031{
1032 MAILBOX_t *mb = &pmb->u.mb;
1033 struct lpfc_mqe *mqe;
1034
1035 switch (mb->mbxCommand) {
1036 case MBX_READ_REV:
1037 mqe = &pmb->u.mqe;
1038 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
1039 mqe->un.read_rev.fw_name, 16);
1040 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
1041 mqe->un.read_rev.ulp_fw_name, 16);
1042 break;
1043 default:
1044 break;
1045 }
1046 return;
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059static void
1060lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
1061 struct lpfc_hbq_init *hbq_desc)
1062{
1063 hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
1064 hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
1065 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078static void
1079lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
1080 struct lpfc_hbq_init *hbq_desc)
1081{
1082 hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
1083 hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
1084 hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
1085 hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
1086 memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
1087 sizeof(hbqmb->profiles.profile3.cmdmatch));
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101static void
1102lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
1103 struct lpfc_hbq_init *hbq_desc)
1104{
1105 hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
1106 hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
1107 hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
1108 hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
1109 memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
1110 sizeof(hbqmb->profiles.profile5.cmdmatch));
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127void
1128lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
1129 struct lpfc_hbq_init *hbq_desc,
1130 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
1131{
1132 int i;
1133 MAILBOX_t *mb = &pmb->u.mb;
1134 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
1135
1136 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1137 hbqmb->hbqId = id;
1138 hbqmb->entry_count = hbq_desc->entry_count;
1139 hbqmb->recvNotify = hbq_desc->rn;
1140
1141 hbqmb->numMask = hbq_desc->mask_count;
1142
1143 hbqmb->profile = hbq_desc->profile;
1144
1145
1146 hbqmb->ringMask = hbq_desc->ring_mask;
1147
1148
1149 hbqmb->headerLen = hbq_desc->headerLen;
1150
1151 hbqmb->logEntry = hbq_desc->logEntry;
1152
1153
1154
1155 hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
1156 hbq_entry_index * sizeof(struct lpfc_hbq_entry);
1157 hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
1158
1159 mb->mbxCommand = MBX_CONFIG_HBQ;
1160 mb->mbxOwner = OWN_HOST;
1161
1162
1163
1164
1165 if (hbq_desc->profile == 2)
1166 lpfc_build_hbq_profile2(hbqmb, hbq_desc);
1167 else if (hbq_desc->profile == 3)
1168 lpfc_build_hbq_profile3(hbqmb, hbq_desc);
1169 else if (hbq_desc->profile == 5)
1170 lpfc_build_hbq_profile5(hbqmb, hbq_desc);
1171
1172
1173 if (!hbq_desc->mask_count)
1174 return;
1175
1176
1177 for (i = 0; i < hbq_desc->mask_count; i++) {
1178 hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
1179 hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
1180 hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
1181 hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
1182 }
1183
1184 return;
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204void
1205lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1206{
1207 int i;
1208 MAILBOX_t *mb = &pmb->u.mb;
1209 struct lpfc_sli *psli;
1210 struct lpfc_sli_ring *pring;
1211
1212 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1213
1214 mb->un.varCfgRing.ring = ring;
1215 mb->un.varCfgRing.maxOrigXchg = 0;
1216 mb->un.varCfgRing.maxRespXchg = 0;
1217 mb->un.varCfgRing.recvNotify = 1;
1218
1219 psli = &phba->sli;
1220 pring = &psli->ring[ring];
1221 mb->un.varCfgRing.numMask = pring->num_mask;
1222 mb->mbxCommand = MBX_CONFIG_RING;
1223 mb->mbxOwner = OWN_HOST;
1224
1225
1226 if (pring->prt[0].profile) {
1227 mb->un.varCfgRing.profile = pring->prt[0].profile;
1228 return;
1229 }
1230
1231
1232 for (i = 0; i < pring->num_mask; i++) {
1233 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1234 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1235 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1236 else
1237 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
1238 mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
1239 mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
1240 }
1241
1242 return;
1243}
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259void
1260lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1261{
1262 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1263 MAILBOX_t *mb = &pmb->u.mb;
1264 dma_addr_t pdma_addr;
1265 uint32_t bar_low, bar_high;
1266 size_t offset;
1267 struct lpfc_hgp hgp;
1268 int i;
1269 uint32_t pgp_offset;
1270
1271 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1272 mb->mbxCommand = MBX_CONFIG_PORT;
1273 mb->mbxOwner = OWN_HOST;
1274
1275 mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
1276
1277 offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
1278 pdma_addr = phba->slim2p.phys + offset;
1279 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
1280 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
1281
1282
1283 mb->un.varCfgPort.hps = 1;
1284
1285
1286
1287 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1288 if (phba->cfg_enable_bg)
1289 mb->un.varCfgPort.cbg = 1;
1290 if (phba->cfg_enable_dss)
1291 mb->un.varCfgPort.cdss = 1;
1292 mb->un.varCfgPort.cerbm = 1;
1293 mb->un.varCfgPort.ccrp = 1;
1294 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1295 if (phba->max_vpi && phba->cfg_enable_npiv &&
1296 phba->vpd.sli3Feat.cmv) {
1297 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1298 mb->un.varCfgPort.cmv = 1;
1299 } else
1300 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1301 } else
1302 phba->sli_rev = LPFC_SLI_REV2;
1303 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1304
1305
1306 if (phba->sli_rev == LPFC_SLI_REV3)
1307 mb->un.varCfgPort.casabt = 1;
1308
1309
1310 phba->pcb->type = TYPE_NATIVE_SLI2;
1311 phba->pcb->feature = FEATURE_INITIAL_SLI2;
1312
1313
1314 phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
1315 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
1316 pdma_addr = phba->slim2p.phys + offset;
1317 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
1318 phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
1340 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370 if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
1371 phba->host_gp = &phba->mbox->us.s2.host[0];
1372 phba->hbq_put = NULL;
1373 offset = (uint8_t *)&phba->mbox->us.s2.host -
1374 (uint8_t *)phba->slim2p.virt;
1375 pdma_addr = phba->slim2p.phys + offset;
1376 phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
1377 phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
1378 } else {
1379
1380 mb->un.varCfgPort.hps = 1;
1381
1382 if (phba->sli_rev == 3) {
1383 phba->host_gp = &mb_slim->us.s3.host[0];
1384 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
1385 } else {
1386 phba->host_gp = &mb_slim->us.s2.host[0];
1387 phba->hbq_put = NULL;
1388 }
1389
1390
1391 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
1392 (void __iomem *)phba->host_gp -
1393 (void __iomem *)phba->MBslimaddr;
1394 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
1395 phba->pcb->hgpAddrHigh = bar_high;
1396 else
1397 phba->pcb->hgpAddrHigh = 0;
1398
1399 memset(&hgp, 0, sizeof(struct lpfc_hgp));
1400
1401 for (i = 0; i < phba->sli.num_rings; i++) {
1402 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
1403 sizeof(*phba->host_gp));
1404 }
1405 }
1406
1407
1408 if (phba->sli_rev == 3)
1409 pgp_offset = offsetof(struct lpfc_sli2_slim,
1410 mbx.us.s3_pgp.port);
1411 else
1412 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
1413 pdma_addr = phba->slim2p.phys + pgp_offset;
1414 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
1415 phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
1416
1417
1418 lpfc_config_pcb_setup(phba);
1419
1420
1421 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
1422 uint32_t hbainit[5];
1423
1424 lpfc_hba_init(phba, hbainit);
1425
1426 memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
1427 }
1428
1429
1430 lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
1431}
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448void
1449lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1450{
1451 MAILBOX_t *mb = &pmb->u.mb;
1452
1453 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1454 mb->mbxCommand = MBX_KILL_BOARD;
1455 mb->mbxOwner = OWN_HOST;
1456 return;
1457}
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469void
1470lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
1471{
1472 struct lpfc_sli *psli;
1473
1474 psli = &phba->sli;
1475
1476 list_add_tail(&mbq->list, &psli->mboxq);
1477
1478 psli->mboxq_cnt++;
1479
1480 return;
1481}
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497LPFC_MBOXQ_t *
1498lpfc_mbox_get(struct lpfc_hba * phba)
1499{
1500 LPFC_MBOXQ_t *mbq = NULL;
1501 struct lpfc_sli *psli = &phba->sli;
1502
1503 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
1504 if (mbq)
1505 psli->mboxq_cnt--;
1506
1507 return mbq;
1508}
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520void
1521__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1522{
1523 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1524}
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536void
1537lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1538{
1539 unsigned long iflag;
1540
1541
1542 spin_lock_irqsave(&phba->hbalock, iflag);
1543 __lpfc_mbox_cmpl_put(phba, mbq);
1544 spin_unlock_irqrestore(&phba->hbalock, iflag);
1545 return;
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559int
1560lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1561{
1562
1563
1564
1565 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1566 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1567 if (!mboxq->vport) {
1568 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1569 "1814 Mbox x%x failed, no vport\n",
1570 mboxq->u.mb.mbxCommand);
1571 dump_stack();
1572 return -ENODEV;
1573 }
1574 }
1575 return 0;
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588int
1589lpfc_mbox_dev_check(struct lpfc_hba *phba)
1590{
1591
1592 if (unlikely(pci_channel_offline(phba->pcidev)))
1593 return -ENODEV;
1594
1595
1596 if (phba->link_state == LPFC_HBA_ERROR)
1597 return -ENODEV;
1598
1599 return 0;
1600}
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613int
1614lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1615{
1616 MAILBOX_t *mbox = &mboxq->u.mb;
1617 uint8_t subsys, opcode;
1618
1619 switch (mbox->mbxCommand) {
1620 case MBX_WRITE_NV:
1621 case MBX_DUMP_MEMORY:
1622 case MBX_UPDATE_CFG:
1623 case MBX_DOWN_LOAD:
1624 case MBX_DEL_LD_ENTRY:
1625 case MBX_WRITE_VPARMS:
1626 case MBX_LOAD_AREA:
1627 case MBX_WRITE_WWN:
1628 case MBX_LOAD_EXP_ROM:
1629 case MBX_ACCESS_VDATA:
1630 return LPFC_MBOX_TMO_FLASH_CMD;
1631 case MBX_SLI4_CONFIG:
1632 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
1633 opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
1634 if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
1635 switch (opcode) {
1636 case LPFC_MBOX_OPCODE_READ_OBJECT:
1637 case LPFC_MBOX_OPCODE_WRITE_OBJECT:
1638 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
1639 case LPFC_MBOX_OPCODE_DELETE_OBJECT:
1640 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
1641 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
1642 case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
1643 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
1644 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
1645 case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
1646 case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
1647 case LPFC_MBOX_OPCODE_RESET_LICENSES:
1648 case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
1649 case LPFC_MBOX_OPCODE_GET_VPD_DATA:
1650 case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
1651 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1652 }
1653 }
1654 if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
1655 switch (opcode) {
1656 case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
1657 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1658 }
1659 }
1660 return LPFC_MBOX_SLI4_CONFIG_TMO;
1661 }
1662 return LPFC_MBOX_TMO;
1663}
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675void
1676lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1677 dma_addr_t phyaddr, uint32_t length)
1678{
1679 struct lpfc_mbx_nembed_cmd *nembed_sge;
1680
1681 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1682 &mbox->u.mqe.un.nembed_cmd;
1683 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1684 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1685 nembed_sge->sge[sgentry].length = length;
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696void
1697lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1698 struct lpfc_mbx_sge *sge)
1699{
1700 struct lpfc_mbx_nembed_cmd *nembed_sge;
1701
1702 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1703 &mbox->u.mqe.un.nembed_cmd;
1704 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1705 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1706 sge->length = nembed_sge->sge[sgentry].length;
1707}
1708
1709
1710
1711
1712
1713
1714
1715
1716void
1717lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1718{
1719 struct lpfc_mbx_sli4_config *sli4_cfg;
1720 struct lpfc_mbx_sge sge;
1721 dma_addr_t phyaddr;
1722 uint32_t sgecount, sgentry;
1723
1724 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1725
1726
1727 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1728 mempool_free(mbox, phba->mbox_mem_pool);
1729 return;
1730 }
1731
1732
1733 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1734
1735 if (unlikely(!mbox->sge_array)) {
1736 mempool_free(mbox, phba->mbox_mem_pool);
1737 return;
1738 }
1739
1740 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1741 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1742 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1743 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1744 mbox->sge_array->addr[sgentry], phyaddr);
1745 }
1746
1747 kfree(mbox->sge_array);
1748
1749 mempool_free(mbox, phba->mbox_mem_pool);
1750}
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766int
1767lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1768 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1769{
1770 struct lpfc_mbx_sli4_config *sli4_config;
1771 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1772 uint32_t alloc_len;
1773 uint32_t resid_len;
1774 uint32_t pagen, pcount;
1775 void *viraddr;
1776 dma_addr_t phyaddr;
1777
1778
1779 memset(mbox, 0, sizeof(*mbox));
1780 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1781
1782
1783 sli4_config = &mbox->u.mqe.un.sli4_config;
1784
1785
1786 if (emb) {
1787
1788 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1789 sli4_config->header.cfg_mhdr.payload_length = length;
1790
1791 bf_set(lpfc_mbox_hdr_opcode,
1792 &sli4_config->header.cfg_shdr.request, opcode);
1793 bf_set(lpfc_mbox_hdr_subsystem,
1794 &sli4_config->header.cfg_shdr.request, subsystem);
1795 sli4_config->header.cfg_shdr.request.request_length =
1796 length - LPFC_MBX_CMD_HDR_LENGTH;
1797 return length;
1798 }
1799
1800
1801 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1802 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1803 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1804
1805 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1806 GFP_KERNEL);
1807 if (!mbox->sge_array) {
1808 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1809 "2527 Failed to allocate non-embedded SGE "
1810 "array.\n");
1811 return 0;
1812 }
1813 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1814
1815
1816
1817
1818
1819 viraddr = dma_zalloc_coherent(&phba->pcidev->dev,
1820 SLI4_PAGE_SIZE, &phyaddr,
1821 GFP_KERNEL);
1822
1823 if (!viraddr)
1824 break;
1825 mbox->sge_array->addr[pagen] = viraddr;
1826
1827 if (pagen == 0)
1828 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1829 resid_len = length - alloc_len;
1830 if (resid_len > SLI4_PAGE_SIZE) {
1831 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1832 SLI4_PAGE_SIZE);
1833 alloc_len += SLI4_PAGE_SIZE;
1834 } else {
1835 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1836 resid_len);
1837 alloc_len = length;
1838 }
1839 }
1840
1841
1842 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1843 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1844
1845
1846 if (pagen > 0) {
1847 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1848 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1849 cfg_shdr->request.request_length =
1850 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1851 }
1852
1853 if (cfg_shdr)
1854 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1855 sizeof(union lpfc_sli4_cfg_shdr));
1856 return alloc_len;
1857}
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874int
1875lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1876 uint16_t exts_count, uint16_t rsrc_type, bool emb)
1877{
1878 uint8_t opcode = 0;
1879 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1880 void *virtaddr = NULL;
1881
1882
1883 if (emb == LPFC_SLI4_MBX_NEMBED) {
1884
1885 virtaddr = mbox->sge_array->addr[0];
1886 if (virtaddr == NULL)
1887 return 1;
1888 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1889 }
1890
1891
1892
1893
1894
1895 if (emb == LPFC_SLI4_MBX_EMBED)
1896 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1897 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1898 rsrc_type);
1899 else {
1900
1901 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1902 n_rsrc_extnt, rsrc_type);
1903 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1904 &n_rsrc_extnt->word4,
1905 sizeof(uint32_t));
1906 }
1907
1908
1909 opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
1910 switch (opcode) {
1911 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1912 if (emb == LPFC_SLI4_MBX_EMBED)
1913 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1914 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1915 exts_count);
1916 else
1917 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1918 n_rsrc_extnt, exts_count);
1919 break;
1920 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
1921 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
1922 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
1923
1924 break;
1925 default:
1926 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1927 "2929 Resource Extent Opcode x%x is "
1928 "unsupported\n", opcode);
1929 return 1;
1930 }
1931
1932 return 0;
1933}
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945uint8_t
1946lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1947{
1948 struct lpfc_mbx_sli4_config *sli4_cfg;
1949 union lpfc_sli4_cfg_shdr *cfg_shdr;
1950
1951 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1952 return LPFC_MBOX_SUBSYSTEM_NA;
1953 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1954
1955
1956 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1957 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1958 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1959 }
1960
1961
1962 if (unlikely(!mbox->sge_array))
1963 return LPFC_MBOX_SUBSYSTEM_NA;
1964 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1965 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978uint8_t
1979lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1980{
1981 struct lpfc_mbx_sli4_config *sli4_cfg;
1982 union lpfc_sli4_cfg_shdr *cfg_shdr;
1983
1984 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1985 return LPFC_MBOX_OPCODE_NA;
1986 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1987
1988
1989 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1990 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1991 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1992 }
1993
1994
1995 if (unlikely(!mbox->sge_array))
1996 return LPFC_MBOX_OPCODE_NA;
1997 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1998 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012int
2013lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
2014 struct lpfcMboxq *mboxq,
2015 uint16_t fcf_index)
2016{
2017 void *virt_addr;
2018 uint8_t *bytep;
2019 struct lpfc_mbx_sge sge;
2020 uint32_t alloc_len, req_len;
2021 struct lpfc_mbx_read_fcf_tbl *read_fcf;
2022
2023 if (!mboxq)
2024 return -ENOMEM;
2025
2026 req_len = sizeof(struct fcf_record) +
2027 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
2028
2029
2030 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2031 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
2032 LPFC_SLI4_MBX_NEMBED);
2033
2034 if (alloc_len < req_len) {
2035 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2036 "0291 Allocated DMA memory size (x%x) is "
2037 "less than the requested DMA memory "
2038 "size (x%x)\n", alloc_len, req_len);
2039 return -ENOMEM;
2040 }
2041
2042
2043
2044
2045 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
2046 virt_addr = mboxq->sge_array->addr[0];
2047 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
2048
2049
2050 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
2051
2052 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
2053 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
2054
2055 return 0;
2056}
2057
2058
2059
2060
2061
2062
2063
2064
2065void
2066lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
2067{
2068
2069 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
2070 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
2071
2072
2073 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
2074 bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
2075
2076
2077 if (phba->cfg_enable_bg)
2078 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
2079
2080
2081 if (phba->max_vpi && phba->cfg_enable_npiv)
2082 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
2083
2084 return;
2085}
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098void
2099lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2100{
2101 struct lpfc_mbx_init_vfi *init_vfi;
2102
2103 memset(mbox, 0, sizeof(*mbox));
2104 mbox->vport = vport;
2105 init_vfi = &mbox->u.mqe.un.init_vfi;
2106 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
2107 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
2108 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
2109 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
2110 bf_set(lpfc_init_vfi_vfi, init_vfi,
2111 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2112 bf_set(lpfc_init_vfi_vpi, init_vfi,
2113 vport->phba->vpi_ids[vport->vpi]);
2114 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2115 vport->phba->fcf.fcfi);
2116}
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129void
2130lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2131{
2132 struct lpfc_mbx_reg_vfi *reg_vfi;
2133 struct lpfc_hba *phba = vport->phba;
2134
2135 memset(mbox, 0, sizeof(*mbox));
2136 reg_vfi = &mbox->u.mqe.un.reg_vfi;
2137 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
2138 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
2139 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2140 phba->sli4_hba.vfi_ids[vport->vfi]);
2141 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
2142 bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
2143 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
2144 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
2145 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
2146 reg_vfi->e_d_tov = phba->fc_edtov;
2147 reg_vfi->r_a_tov = phba->fc_ratov;
2148 if (phys) {
2149 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
2150 reg_vfi->bde.addrLow = putPaddrLow(phys);
2151 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
2152 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2153 }
2154 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
2155
2156
2157 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
2158 (vport->fc_flag & FC_VFI_REGISTERED) &&
2159 (!phba->fc_topology_changed)) {
2160 bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
2161 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
2162 }
2163 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2164 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2165 " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
2166 " port_state:x%x topology chg:%d\n",
2167 vport->fc_myDID,
2168 phba->fcf.fcfi,
2169 phba->sli4_hba.vfi_ids[vport->vfi],
2170 phba->vpi_ids[vport->vpi],
2171 reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
2172 vport->port_state, phba->fc_topology_changed);
2173}
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187void
2188lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
2189{
2190 memset(mbox, 0, sizeof(*mbox));
2191 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
2192 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2193 phba->vpi_ids[vpi]);
2194 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2195 phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2196}
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209void
2210lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2211{
2212 memset(mbox, 0, sizeof(*mbox));
2213 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2214 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2215 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2216}
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226int
2227lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2228{
2229 struct lpfc_dmabuf *mp = NULL;
2230 MAILBOX_t *mb;
2231
2232 memset(mbox, 0, sizeof(*mbox));
2233 mb = &mbox->u.mb;
2234
2235 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2236 if (mp)
2237 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2238
2239 if (!mp || !mp->virt) {
2240 kfree(mp);
2241
2242 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2243 "2569 lpfc dump config region 23: memory"
2244 " allocation failed\n");
2245 return 1;
2246 }
2247
2248 memset(mp->virt, 0, LPFC_BPL_SIZE);
2249 INIT_LIST_HEAD(&mp->list);
2250
2251
2252 mbox->context1 = (uint8_t *) mp;
2253
2254 mb->mbxCommand = MBX_DUMP_MEMORY;
2255 mb->un.varDmp.type = DMP_NV_PARAMS;
2256 mb->un.varDmp.region_id = DMP_REGION_23;
2257 mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
2258 mb->un.varWords[3] = putPaddrLow(mp->phys);
2259 mb->un.varWords[4] = putPaddrHigh(mp->phys);
2260 return 0;
2261}
2262
2263static void
2264lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2265{
2266 MAILBOX_t *mb;
2267 int rc = FAILURE;
2268 struct lpfc_rdp_context *rdp_context =
2269 (struct lpfc_rdp_context *)(mboxq->context2);
2270
2271 mb = &mboxq->u.mb;
2272 if (mb->mbxStatus)
2273 goto mbx_failed;
2274
2275 memcpy(&rdp_context->link_stat, &mb->un.varRdLnk, sizeof(READ_LNK_VAR));
2276
2277 rc = SUCCESS;
2278
2279mbx_failed:
2280 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2281 rdp_context->cmpl(phba, rdp_context, rc);
2282}
2283
2284static void
2285lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2286{
2287 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1;
2288 struct lpfc_rdp_context *rdp_context =
2289 (struct lpfc_rdp_context *)(mbox->context2);
2290
2291 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2292 goto error_mbuf_free;
2293
2294 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
2295 DMP_SFF_PAGE_A2_SIZE);
2296
2297
2298 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2299 kfree(mp);
2300
2301 memset(mbox, 0, sizeof(*mbox));
2302 lpfc_read_lnk_stat(phba, mbox);
2303 mbox->vport = rdp_context->ndlp->vport;
2304 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
2305 mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
2306 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
2307 goto error_cmd_free;
2308
2309 return;
2310
2311error_mbuf_free:
2312 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2313 kfree(mp);
2314error_cmd_free:
2315 lpfc_sli4_mbox_cmd_free(phba, mbox);
2316 rdp_context->cmpl(phba, rdp_context, FAILURE);
2317}
2318
2319void
2320lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2321{
2322 int rc;
2323 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (mbox->context1);
2324 struct lpfc_rdp_context *rdp_context =
2325 (struct lpfc_rdp_context *)(mbox->context2);
2326
2327 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2328 goto error;
2329
2330 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0,
2331 DMP_SFF_PAGE_A0_SIZE);
2332
2333 memset(mbox, 0, sizeof(*mbox));
2334
2335 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE);
2336 INIT_LIST_HEAD(&mp->list);
2337
2338
2339 mbox->context1 = mp;
2340 mbox->vport = rdp_context->ndlp->vport;
2341
2342 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
2343 bf_set(lpfc_mbx_memory_dump_type3_type,
2344 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
2345 bf_set(lpfc_mbx_memory_dump_type3_link,
2346 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
2347 bf_set(lpfc_mbx_memory_dump_type3_page_no,
2348 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2);
2349 bf_set(lpfc_mbx_memory_dump_type3_length,
2350 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
2351 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
2352 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
2353
2354 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
2355 mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
2356 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2357 if (rc == MBX_NOT_FINISHED)
2358 goto error;
2359
2360 return;
2361
2362error:
2363 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2364 kfree(mp);
2365 lpfc_sli4_mbox_cmd_free(phba, mbox);
2366 rdp_context->cmpl(phba, rdp_context, FAILURE);
2367}
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378int
2379lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2380{
2381 struct lpfc_dmabuf *mp = NULL;
2382
2383 memset(mbox, 0, sizeof(*mbox));
2384
2385 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2386 if (mp)
2387 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2388 if (!mp || !mp->virt) {
2389 kfree(mp);
2390 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2391 "3569 dump type 3 page 0xA0 allocation failed\n");
2392 return 1;
2393 }
2394
2395 memset(mp->virt, 0, LPFC_BPL_SIZE);
2396 INIT_LIST_HEAD(&mp->list);
2397
2398 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
2399
2400 mbox->context1 = mp;
2401
2402 bf_set(lpfc_mbx_memory_dump_type3_type,
2403 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
2404 bf_set(lpfc_mbx_memory_dump_type3_link,
2405 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
2406 bf_set(lpfc_mbx_memory_dump_type3_page_no,
2407 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
2408 bf_set(lpfc_mbx_memory_dump_type3_length,
2409 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
2410 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
2411 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
2412
2413 return 0;
2414}
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429void
2430lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2431{
2432 struct lpfc_mbx_reg_fcfi *reg_fcfi;
2433
2434 memset(mbox, 0, sizeof(*mbox));
2435 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
2436 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
2437 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
2438 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
2439 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2440 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2441 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2442 phba->fcf.current_rec.fcf_indx);
2443
2444 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
2445 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2446 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
2447 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2448 phba->fcf.current_rec.vlan_id);
2449 }
2450}
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460void
2461lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2462{
2463 memset(mbox, 0, sizeof(*mbox));
2464 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
2465 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
2466}
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476void
2477lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2478{
2479 struct lpfc_hba *phba = ndlp->phba;
2480 struct lpfc_mbx_resume_rpi *resume_rpi;
2481
2482 memset(mbox, 0, sizeof(*mbox));
2483 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2484 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2485 bf_set(lpfc_resume_rpi_index, resume_rpi,
2486 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2487 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2488 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2489}
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499void
2500lpfc_supported_pages(struct lpfcMboxq *mbox)
2501{
2502 struct lpfc_mbx_supp_pages *supp_pages;
2503
2504 memset(mbox, 0, sizeof(*mbox));
2505 supp_pages = &mbox->u.mqe.un.supp_pages;
2506 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2507 bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
2508}
2509
2510
2511
2512
2513
2514
2515
2516
2517void
2518lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
2519{
2520 struct lpfc_mbx_pc_sli4_params *sli4_params;
2521
2522 memset(mbox, 0, sizeof(*mbox));
2523 sli4_params = &mbox->u.mqe.un.sli4_params;
2524 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2525 bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
2526}
2527