1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_transport_fc.h>
29#include <scsi/scsi.h>
30#include <scsi/fc/fc_fs.h>
31
32#include "lpfc_hw4.h"
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_sli4.h"
36#include "lpfc_nl.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_compat.h"
43
44
45
46
47
48
49
50
51
52
53
54
55
56int
57lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
58 uint16_t offset)
59{
60 MAILBOX_t *mb;
61 struct lpfc_dmabuf *mp;
62
63 mb = &pmb->u.mb;
64
65
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.type = DMP_NV_PARAMS;
69 mb->un.varDmp.entry_index = offset;
70 mb->un.varDmp.region_id = DMP_REGION_VPORT;
71 mb->mbxOwner = OWN_HOST;
72
73
74 if (phba->sli_rev != LPFC_SLI_REV4) {
75 mb->un.varDmp.cv = 1;
76 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
77 return 0;
78 }
79
80
81 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
82 if (mp)
83 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
84
85 if (!mp || !mp->virt) {
86 kfree(mp);
87 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
88 "2605 lpfc_dump_static_vport: memory"
89 " allocation failed\n");
90 return 1;
91 }
92 memset(mp->virt, 0, LPFC_BPL_SIZE);
93 INIT_LIST_HEAD(&mp->list);
94
95 pmb->context1 = (uint8_t *)mp;
96 mb->un.varWords[3] = putPaddrLow(mp->phys);
97 mb->un.varWords[4] = putPaddrHigh(mp->phys);
98 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
99
100 return 0;
101}
102
103
104
105
106
107
108
109
110void
111lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
112{
113 MAILBOX_t *mb;
114 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
115 mb = &pmb->u.mb;
116 mb->mbxCommand = MBX_DOWN_LINK;
117 mb->mbxOwner = OWN_HOST;
118}
119
120
121
122
123
124
125
126
127
128
129
130
131
132void
133lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
134 uint16_t region_id)
135{
136 MAILBOX_t *mb;
137 void *ctx;
138
139 mb = &pmb->u.mb;
140 ctx = pmb->context2;
141
142
143 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
144 mb->mbxCommand = MBX_DUMP_MEMORY;
145 mb->un.varDmp.cv = 1;
146 mb->un.varDmp.type = DMP_NV_PARAMS;
147 mb->un.varDmp.entry_index = offset;
148 mb->un.varDmp.region_id = region_id;
149 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
150 mb->un.varDmp.co = 0;
151 mb->un.varDmp.resp_offset = 0;
152 pmb->context2 = ctx;
153 mb->mbxOwner = OWN_HOST;
154 return;
155}
156
157
158
159
160
161
162
163
164
165void
166lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
167{
168 MAILBOX_t *mb;
169 void *ctx;
170
171 mb = &pmb->u.mb;
172
173 ctx = pmb->context2;
174
175
176 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
177 mb->mbxCommand = MBX_DUMP_MEMORY;
178 mb->mbxOwner = OWN_HOST;
179 mb->un.varDmp.cv = 1;
180 mb->un.varDmp.type = DMP_NV_PARAMS;
181 mb->un.varDmp.entry_index = 0;
182 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
183 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
184 mb->un.varDmp.co = 0;
185 mb->un.varDmp.resp_offset = 0;
186 pmb->context2 = ctx;
187 return;
188}
189
190
191
192
193
194
195
196
197
198
199
200
201void
202lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
203{
204 MAILBOX_t *mb;
205
206 mb = &pmb->u.mb;
207 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
208 mb->mbxCommand = MBX_READ_NV;
209 mb->mbxOwner = OWN_HOST;
210 return;
211}
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226void
227lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
228 uint32_t ring)
229{
230 MAILBOX_t *mb;
231
232 mb = &pmb->u.mb;
233 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
234 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
235 mb->un.varCfgAsyncEvent.ring = ring;
236 mb->mbxOwner = OWN_HOST;
237 return;
238}
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253void
254lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
255{
256 MAILBOX_t *mb;
257
258 mb = &pmb->u.mb;
259 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
260 mb->mbxCommand = MBX_HEARTBEAT;
261 mb->mbxOwner = OWN_HOST;
262 return;
263}
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286int
287lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
288 struct lpfc_dmabuf *mp)
289{
290 MAILBOX_t *mb;
291 struct lpfc_sli *psli;
292
293 psli = &phba->sli;
294 mb = &pmb->u.mb;
295 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
296
297 INIT_LIST_HEAD(&mp->list);
298 mb->mbxCommand = MBX_READ_TOPOLOGY;
299 mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
300 mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
301 mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
302
303
304
305
306 pmb->context1 = (uint8_t *)mp;
307 mb->mbxOwner = OWN_HOST;
308 return (0);
309}
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326void
327lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
328{
329 MAILBOX_t *mb;
330
331 mb = &pmb->u.mb;
332 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
333
334 mb->un.varClearLA.eventTag = phba->fc_eventTag;
335 mb->mbxCommand = MBX_CLEAR_LA;
336 mb->mbxOwner = OWN_HOST;
337 return;
338}
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354void
355lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
356{
357 struct lpfc_vport *vport = phba->pport;
358 MAILBOX_t *mb = &pmb->u.mb;
359 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
360
361
362
363
364 if (phba->cfg_cr_delay) {
365 mb->un.varCfgLnk.cr = 1;
366 mb->un.varCfgLnk.ci = 1;
367 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
368 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
369 }
370
371 mb->un.varCfgLnk.myId = vport->fc_myDID;
372 mb->un.varCfgLnk.edtov = phba->fc_edtov;
373 mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
374 mb->un.varCfgLnk.ratov = phba->fc_ratov;
375 mb->un.varCfgLnk.rttov = phba->fc_rttov;
376 mb->un.varCfgLnk.altov = phba->fc_altov;
377 mb->un.varCfgLnk.crtov = phba->fc_crtov;
378 mb->un.varCfgLnk.citov = phba->fc_citov;
379
380 if (phba->cfg_ack0)
381 mb->un.varCfgLnk.ack0_enable = 1;
382
383 mb->mbxCommand = MBX_CONFIG_LINK;
384 mb->mbxOwner = OWN_HOST;
385 return;
386}
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401int
402lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
403{
404 MAILBOX_t *mb = &pmb->u.mb;
405 uint32_t attentionConditions[2];
406
407
408 if (phba->cfg_use_msi != 2) {
409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
410 "0475 Not configured for supporting MSI-X "
411 "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
412 return -EINVAL;
413 }
414
415 if (phba->sli_rev < 3) {
416 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
417 "0476 HBA not supporting SLI-3 or later "
418 "SLI Revision: 0x%x\n", phba->sli_rev);
419 return -EINVAL;
420 }
421
422
423 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
424
425
426
427
428
429
430 attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
431 HA_LATT | HA_MBATT);
432 attentionConditions[1] = 0;
433
434 mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
435 mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
436
437
438
439
440#ifdef __BIG_ENDIAN_BITFIELD
441
442 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
443
444 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
445#else
446
447 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
448
449 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
450#endif
451
452 mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
453 mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
454
455
456 mb->un.varCfgMSI.autoClearHA[0] = 0;
457 mb->un.varCfgMSI.autoClearHA[1] = 0;
458
459
460 mb->mbxCommand = MBX_CONFIG_MSI;
461 mb->mbxOwner = OWN_HOST;
462
463 return 0;
464}
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480void
481lpfc_init_link(struct lpfc_hba * phba,
482 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
483{
484 lpfc_vpd_t *vpd;
485 struct lpfc_sli *psli;
486 MAILBOX_t *mb;
487
488 mb = &pmb->u.mb;
489 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
490
491 psli = &phba->sli;
492 switch (topology) {
493 case FLAGS_TOPOLOGY_MODE_LOOP_PT:
494 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
495 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
496 break;
497 case FLAGS_TOPOLOGY_MODE_PT_PT:
498 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
499 break;
500 case FLAGS_TOPOLOGY_MODE_LOOP:
501 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
502 break;
503 case FLAGS_TOPOLOGY_MODE_PT_LOOP:
504 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
505 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
506 break;
507 case FLAGS_LOCAL_LB:
508 mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
509 break;
510 }
511
512
513 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
514
515
516
517
518 vpd = &phba->vpd;
519 if (vpd->rev.feaLevelHigh >= 0x02){
520 switch(linkspeed){
521 case LPFC_USER_LINK_SPEED_1G:
522 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
523 mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
524 break;
525 case LPFC_USER_LINK_SPEED_2G:
526 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
527 mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
528 break;
529 case LPFC_USER_LINK_SPEED_4G:
530 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
531 mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
532 break;
533 case LPFC_USER_LINK_SPEED_8G:
534 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
535 mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
536 break;
537 case LPFC_USER_LINK_SPEED_10G:
538 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
539 mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
540 break;
541 case LPFC_USER_LINK_SPEED_16G:
542 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
543 mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
544 break;
545 case LPFC_USER_LINK_SPEED_AUTO:
546 default:
547 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
548 break;
549 }
550
551 }
552 else
553 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
554
555 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
556 mb->mbxOwner = OWN_HOST;
557 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
558 return;
559}
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582int
583lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
584{
585 struct lpfc_dmabuf *mp;
586 MAILBOX_t *mb;
587 struct lpfc_sli *psli;
588
589 psli = &phba->sli;
590 mb = &pmb->u.mb;
591 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
592
593 mb->mbxOwner = OWN_HOST;
594
595
596
597 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
598 if (mp)
599 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
600 if (!mp || !mp->virt) {
601 kfree(mp);
602 mb->mbxCommand = MBX_READ_SPARM64;
603
604 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
605 "0301 READ_SPARAM: no buffers\n");
606 return (1);
607 }
608 INIT_LIST_HEAD(&mp->list);
609 mb->mbxCommand = MBX_READ_SPARM64;
610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
613 if (phba->sli_rev >= LPFC_SLI_REV3)
614 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
615
616
617 pmb->context1 = mp;
618
619 return (0);
620}
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637void
638lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
639 LPFC_MBOXQ_t * pmb)
640{
641 MAILBOX_t *mb;
642
643 mb = &pmb->u.mb;
644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
645
646 mb->un.varUnregDID.did = did;
647 mb->un.varUnregDID.vpi = vpi;
648 if ((vpi != 0xffff) &&
649 (phba->sli_rev == LPFC_SLI_REV4))
650 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
651
652 mb->mbxCommand = MBX_UNREG_D_ID;
653 mb->mbxOwner = OWN_HOST;
654 return;
655}
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670void
671lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
672{
673 MAILBOX_t *mb;
674
675 mb = &pmb->u.mb;
676 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
677
678 mb->mbxCommand = MBX_READ_CONFIG;
679 mb->mbxOwner = OWN_HOST;
680 return;
681}
682
683
684
685
686
687
688
689
690
691
692
693
694
695void
696lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
697{
698 MAILBOX_t *mb;
699
700 mb = &pmb->u.mb;
701 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
702
703 mb->mbxCommand = MBX_READ_LNK_STAT;
704 mb->mbxOwner = OWN_HOST;
705 return;
706}
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732int
733lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
734 uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
735{
736 MAILBOX_t *mb = &pmb->u.mb;
737 uint8_t *sparam;
738 struct lpfc_dmabuf *mp;
739
740 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
741
742 mb->un.varRegLogin.rpi = 0;
743 if (phba->sli_rev == LPFC_SLI_REV4)
744 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
745 if (phba->sli_rev >= LPFC_SLI_REV3)
746 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
747 mb->un.varRegLogin.did = did;
748 mb->mbxOwner = OWN_HOST;
749
750 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
751 if (mp)
752 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
753 if (!mp || !mp->virt) {
754 kfree(mp);
755 mb->mbxCommand = MBX_REG_LOGIN64;
756
757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
759 "rpi x%x\n", vpi, did, rpi);
760 return 1;
761 }
762 INIT_LIST_HEAD(&mp->list);
763 sparam = mp->virt;
764
765
766 memcpy(sparam, param, sizeof (struct serv_parm));
767
768
769 pmb->context1 = (uint8_t *) mp;
770
771 mb->mbxCommand = MBX_REG_LOGIN64;
772 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
775
776 return 0;
777}
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796void
797lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
798 LPFC_MBOXQ_t * pmb)
799{
800 MAILBOX_t *mb;
801
802 mb = &pmb->u.mb;
803 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
804
805 mb->un.varUnregLogin.rpi = rpi;
806 mb->un.varUnregLogin.rsvd1 = 0;
807 if (phba->sli_rev >= LPFC_SLI_REV3)
808 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
809
810 mb->mbxCommand = MBX_UNREG_LOGIN;
811 mb->mbxOwner = OWN_HOST;
812
813 return;
814}
815
816
817
818
819
820
821
822
823void
824lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
825{
826 struct lpfc_hba *phba = vport->phba;
827 LPFC_MBOXQ_t *mbox;
828 int rc;
829
830 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
831 if (mbox) {
832
833
834
835
836
837
838
839 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
840 mbox);
841 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
842 mbox->vport = vport;
843 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
844 mbox->context1 = NULL;
845 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
846 if (rc == MBX_NOT_FINISHED)
847 mempool_free(mbox, phba->mbox_mem_pool);
848 }
849}
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866void
867lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
868{
869 MAILBOX_t *mb = &pmb->u.mb;
870 struct lpfc_hba *phba = vport->phba;
871
872 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
873
874
875
876 if ((phba->sli_rev == LPFC_SLI_REV4) &&
877 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
878 mb->un.varRegVpi.upd = 1;
879
880 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
881 mb->un.varRegVpi.sid = vport->fc_myDID;
882 if (phba->sli_rev == LPFC_SLI_REV4)
883 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
884 else
885 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
886 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
887 sizeof(struct lpfc_name));
888 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
889 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
890
891 mb->mbxCommand = MBX_REG_VPI;
892 mb->mbxOwner = OWN_HOST;
893 return;
894
895}
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913void
914lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
915{
916 MAILBOX_t *mb = &pmb->u.mb;
917 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
918
919 if (phba->sli_rev == LPFC_SLI_REV3)
920 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
921 else if (phba->sli_rev >= LPFC_SLI_REV4)
922 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
923
924 mb->mbxCommand = MBX_UNREG_VPI;
925 mb->mbxOwner = OWN_HOST;
926 return;
927
928}
929
930
931
932
933
934
935
936
937static void
938lpfc_config_pcb_setup(struct lpfc_hba * phba)
939{
940 struct lpfc_sli *psli = &phba->sli;
941 struct lpfc_sli_ring *pring;
942 PCB_t *pcbp = phba->pcb;
943 dma_addr_t pdma_addr;
944 uint32_t offset;
945 uint32_t iocbCnt = 0;
946 int i;
947
948 pcbp->maxRing = (psli->num_rings - 1);
949
950 for (i = 0; i < psli->num_rings; i++) {
951 pring = &psli->ring[i];
952
953 pring->sli.sli3.sizeCiocb =
954 phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
955 SLI2_IOCB_CMD_SIZE;
956 pring->sli.sli3.sizeRiocb =
957 phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
958 SLI2_IOCB_RSP_SIZE;
959
960
961 if ((pring->sli.sli3.numCiocb == 0) ||
962 (pring->sli.sli3.numRiocb == 0)) {
963 pcbp->rdsc[i].cmdEntries = 0;
964 pcbp->rdsc[i].rspEntries = 0;
965 pcbp->rdsc[i].cmdAddrHigh = 0;
966 pcbp->rdsc[i].rspAddrHigh = 0;
967 pcbp->rdsc[i].cmdAddrLow = 0;
968 pcbp->rdsc[i].rspAddrLow = 0;
969 pring->sli.sli3.cmdringaddr = NULL;
970 pring->sli.sli3.rspringaddr = NULL;
971 continue;
972 }
973
974 pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
975 pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
976
977 offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
978 (uint8_t *) phba->slim2p.virt;
979 pdma_addr = phba->slim2p.phys + offset;
980 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
981 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
982 iocbCnt += pring->sli.sli3.numCiocb;
983
984
985 pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
986
987 pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
988 offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
989 (uint8_t *)phba->slim2p.virt;
990 pdma_addr = phba->slim2p.phys + offset;
991 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
992 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
993 iocbCnt += pring->sli.sli3.numRiocb;
994 }
995}
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011void
1012lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1013{
1014 MAILBOX_t *mb = &pmb->u.mb;
1015 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1016 mb->un.varRdRev.cv = 1;
1017 mb->un.varRdRev.v3req = 1;
1018 mb->mbxCommand = MBX_READ_REV;
1019 mb->mbxOwner = OWN_HOST;
1020 return;
1021}
1022
1023void
1024lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1025{
1026 MAILBOX_t *mb = &pmb->u.mb;
1027 struct lpfc_mqe *mqe;
1028
1029 switch (mb->mbxCommand) {
1030 case MBX_READ_REV:
1031 mqe = &pmb->u.mqe;
1032 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
1033 mqe->un.read_rev.fw_name, 16);
1034 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
1035 mqe->un.read_rev.ulp_fw_name, 16);
1036 break;
1037 default:
1038 break;
1039 }
1040 return;
1041}
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053static void
1054lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
1055 struct lpfc_hbq_init *hbq_desc)
1056{
1057 hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
1058 hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
1059 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072static void
1073lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
1074 struct lpfc_hbq_init *hbq_desc)
1075{
1076 hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
1077 hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
1078 hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
1079 hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
1080 memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
1081 sizeof(hbqmb->profiles.profile3.cmdmatch));
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static void
1096lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
1097 struct lpfc_hbq_init *hbq_desc)
1098{
1099 hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
1100 hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
1101 hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
1102 hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
1103 memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
1104 sizeof(hbqmb->profiles.profile5.cmdmatch));
1105}
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121void
1122lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
1123 struct lpfc_hbq_init *hbq_desc,
1124 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
1125{
1126 int i;
1127 MAILBOX_t *mb = &pmb->u.mb;
1128 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
1129
1130 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1131 hbqmb->hbqId = id;
1132 hbqmb->entry_count = hbq_desc->entry_count;
1133 hbqmb->recvNotify = hbq_desc->rn;
1134
1135 hbqmb->numMask = hbq_desc->mask_count;
1136
1137 hbqmb->profile = hbq_desc->profile;
1138
1139
1140 hbqmb->ringMask = hbq_desc->ring_mask;
1141
1142
1143 hbqmb->headerLen = hbq_desc->headerLen;
1144
1145 hbqmb->logEntry = hbq_desc->logEntry;
1146
1147
1148
1149 hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
1150 hbq_entry_index * sizeof(struct lpfc_hbq_entry);
1151 hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
1152
1153 mb->mbxCommand = MBX_CONFIG_HBQ;
1154 mb->mbxOwner = OWN_HOST;
1155
1156
1157
1158
1159 if (hbq_desc->profile == 2)
1160 lpfc_build_hbq_profile2(hbqmb, hbq_desc);
1161 else if (hbq_desc->profile == 3)
1162 lpfc_build_hbq_profile3(hbqmb, hbq_desc);
1163 else if (hbq_desc->profile == 5)
1164 lpfc_build_hbq_profile5(hbqmb, hbq_desc);
1165
1166
1167 if (!hbq_desc->mask_count)
1168 return;
1169
1170
1171 for (i = 0; i < hbq_desc->mask_count; i++) {
1172 hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
1173 hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
1174 hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
1175 hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
1176 }
1177
1178 return;
1179}
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198void
1199lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1200{
1201 int i;
1202 MAILBOX_t *mb = &pmb->u.mb;
1203 struct lpfc_sli *psli;
1204 struct lpfc_sli_ring *pring;
1205
1206 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1207
1208 mb->un.varCfgRing.ring = ring;
1209 mb->un.varCfgRing.maxOrigXchg = 0;
1210 mb->un.varCfgRing.maxRespXchg = 0;
1211 mb->un.varCfgRing.recvNotify = 1;
1212
1213 psli = &phba->sli;
1214 pring = &psli->ring[ring];
1215 mb->un.varCfgRing.numMask = pring->num_mask;
1216 mb->mbxCommand = MBX_CONFIG_RING;
1217 mb->mbxOwner = OWN_HOST;
1218
1219
1220 if (pring->prt[0].profile) {
1221 mb->un.varCfgRing.profile = pring->prt[0].profile;
1222 return;
1223 }
1224
1225
1226 for (i = 0; i < pring->num_mask; i++) {
1227 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1228 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1229 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1230 else
1231 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
1232 mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
1233 mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
1234 }
1235
1236 return;
1237}
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253void
1254lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1255{
1256 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1257 MAILBOX_t *mb = &pmb->u.mb;
1258 dma_addr_t pdma_addr;
1259 uint32_t bar_low, bar_high;
1260 size_t offset;
1261 struct lpfc_hgp hgp;
1262 int i;
1263 uint32_t pgp_offset;
1264
1265 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1266 mb->mbxCommand = MBX_CONFIG_PORT;
1267 mb->mbxOwner = OWN_HOST;
1268
1269 mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
1270
1271 offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
1272 pdma_addr = phba->slim2p.phys + offset;
1273 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
1274 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
1275
1276
1277 mb->un.varCfgPort.hps = 1;
1278
1279
1280
1281 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1282 if (phba->cfg_enable_bg)
1283 mb->un.varCfgPort.cbg = 1;
1284 if (phba->cfg_enable_dss)
1285 mb->un.varCfgPort.cdss = 1;
1286 mb->un.varCfgPort.cerbm = 1;
1287 mb->un.varCfgPort.ccrp = 1;
1288 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1289 if (phba->max_vpi && phba->cfg_enable_npiv &&
1290 phba->vpd.sli3Feat.cmv) {
1291 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1292 mb->un.varCfgPort.cmv = 1;
1293 } else
1294 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1295 } else
1296 phba->sli_rev = LPFC_SLI_REV2;
1297 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1298
1299
1300 if (phba->sli_rev == LPFC_SLI_REV3)
1301 mb->un.varCfgPort.casabt = 1;
1302
1303
1304 phba->pcb->type = TYPE_NATIVE_SLI2;
1305 phba->pcb->feature = FEATURE_INITIAL_SLI2;
1306
1307
1308 phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
1309 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
1310 pdma_addr = phba->slim2p.phys + offset;
1311 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
1312 phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
1334 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
1365 phba->host_gp = &phba->mbox->us.s2.host[0];
1366 phba->hbq_put = NULL;
1367 offset = (uint8_t *)&phba->mbox->us.s2.host -
1368 (uint8_t *)phba->slim2p.virt;
1369 pdma_addr = phba->slim2p.phys + offset;
1370 phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
1371 phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
1372 } else {
1373
1374 mb->un.varCfgPort.hps = 1;
1375
1376 if (phba->sli_rev == 3) {
1377 phba->host_gp = &mb_slim->us.s3.host[0];
1378 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
1379 } else {
1380 phba->host_gp = &mb_slim->us.s2.host[0];
1381 phba->hbq_put = NULL;
1382 }
1383
1384
1385 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
1386 (void __iomem *)phba->host_gp -
1387 (void __iomem *)phba->MBslimaddr;
1388 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
1389 phba->pcb->hgpAddrHigh = bar_high;
1390 else
1391 phba->pcb->hgpAddrHigh = 0;
1392
1393 memset(&hgp, 0, sizeof(struct lpfc_hgp));
1394
1395 for (i = 0; i < phba->sli.num_rings; i++) {
1396 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
1397 sizeof(*phba->host_gp));
1398 }
1399 }
1400
1401
1402 if (phba->sli_rev == 3)
1403 pgp_offset = offsetof(struct lpfc_sli2_slim,
1404 mbx.us.s3_pgp.port);
1405 else
1406 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
1407 pdma_addr = phba->slim2p.phys + pgp_offset;
1408 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
1409 phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
1410
1411
1412 lpfc_config_pcb_setup(phba);
1413
1414
1415 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
1416 uint32_t hbainit[5];
1417
1418 lpfc_hba_init(phba, hbainit);
1419
1420 memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
1421 }
1422
1423
1424 lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
1425}
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442void
1443lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1444{
1445 MAILBOX_t *mb = &pmb->u.mb;
1446
1447 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1448 mb->mbxCommand = MBX_KILL_BOARD;
1449 mb->mbxOwner = OWN_HOST;
1450 return;
1451}
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463void
1464lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
1465{
1466 struct lpfc_sli *psli;
1467
1468 psli = &phba->sli;
1469
1470 list_add_tail(&mbq->list, &psli->mboxq);
1471
1472 psli->mboxq_cnt++;
1473
1474 return;
1475}
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491LPFC_MBOXQ_t *
1492lpfc_mbox_get(struct lpfc_hba * phba)
1493{
1494 LPFC_MBOXQ_t *mbq = NULL;
1495 struct lpfc_sli *psli = &phba->sli;
1496
1497 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
1498 if (mbq)
1499 psli->mboxq_cnt--;
1500
1501 return mbq;
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514void
1515__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1516{
1517 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1518}
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530void
1531lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1532{
1533 unsigned long iflag;
1534
1535
1536 spin_lock_irqsave(&phba->hbalock, iflag);
1537 __lpfc_mbox_cmpl_put(phba, mbq);
1538 spin_unlock_irqrestore(&phba->hbalock, iflag);
1539 return;
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553int
1554lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1555{
1556
1557
1558
1559 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1560 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1561 if (!mboxq->vport) {
1562 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1563 "1814 Mbox x%x failed, no vport\n",
1564 mboxq->u.mb.mbxCommand);
1565 dump_stack();
1566 return -ENODEV;
1567 }
1568 }
1569 return 0;
1570}
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582int
1583lpfc_mbox_dev_check(struct lpfc_hba *phba)
1584{
1585
1586 if (unlikely(pci_channel_offline(phba->pcidev)))
1587 return -ENODEV;
1588
1589
1590 if (phba->link_state == LPFC_HBA_ERROR)
1591 return -ENODEV;
1592
1593 return 0;
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607int
1608lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1609{
1610 MAILBOX_t *mbox = &mboxq->u.mb;
1611 uint8_t subsys, opcode;
1612
1613 switch (mbox->mbxCommand) {
1614 case MBX_WRITE_NV:
1615 case MBX_DUMP_MEMORY:
1616 case MBX_UPDATE_CFG:
1617 case MBX_DOWN_LOAD:
1618 case MBX_DEL_LD_ENTRY:
1619 case MBX_WRITE_VPARMS:
1620 case MBX_LOAD_AREA:
1621 case MBX_WRITE_WWN:
1622 case MBX_LOAD_EXP_ROM:
1623 case MBX_ACCESS_VDATA:
1624 return LPFC_MBOX_TMO_FLASH_CMD;
1625 case MBX_SLI4_CONFIG:
1626 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
1627 opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
1628 if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
1629 switch (opcode) {
1630 case LPFC_MBOX_OPCODE_READ_OBJECT:
1631 case LPFC_MBOX_OPCODE_WRITE_OBJECT:
1632 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
1633 case LPFC_MBOX_OPCODE_DELETE_OBJECT:
1634 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
1635 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
1636 case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
1637 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
1638 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
1639 case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
1640 case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
1641 case LPFC_MBOX_OPCODE_RESET_LICENSES:
1642 case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
1643 case LPFC_MBOX_OPCODE_GET_VPD_DATA:
1644 case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
1645 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1646 }
1647 }
1648 if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
1649 switch (opcode) {
1650 case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
1651 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1652 }
1653 }
1654 return LPFC_MBOX_SLI4_CONFIG_TMO;
1655 }
1656 return LPFC_MBOX_TMO;
1657}
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669void
1670lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1671 dma_addr_t phyaddr, uint32_t length)
1672{
1673 struct lpfc_mbx_nembed_cmd *nembed_sge;
1674
1675 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1676 &mbox->u.mqe.un.nembed_cmd;
1677 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1678 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1679 nembed_sge->sge[sgentry].length = length;
1680}
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690void
1691lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1692 struct lpfc_mbx_sge *sge)
1693{
1694 struct lpfc_mbx_nembed_cmd *nembed_sge;
1695
1696 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1697 &mbox->u.mqe.un.nembed_cmd;
1698 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1699 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1700 sge->length = nembed_sge->sge[sgentry].length;
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710void
1711lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1712{
1713 struct lpfc_mbx_sli4_config *sli4_cfg;
1714 struct lpfc_mbx_sge sge;
1715 dma_addr_t phyaddr;
1716 uint32_t sgecount, sgentry;
1717
1718 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1719
1720
1721 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1722 mempool_free(mbox, phba->mbox_mem_pool);
1723 return;
1724 }
1725
1726
1727 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1728
1729 if (unlikely(!mbox->sge_array)) {
1730 mempool_free(mbox, phba->mbox_mem_pool);
1731 return;
1732 }
1733
1734 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1735 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1736 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1737 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1738 mbox->sge_array->addr[sgentry], phyaddr);
1739 }
1740
1741 kfree(mbox->sge_array);
1742
1743 mempool_free(mbox, phba->mbox_mem_pool);
1744}
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760int
1761lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1762 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1763{
1764 struct lpfc_mbx_sli4_config *sli4_config;
1765 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1766 uint32_t alloc_len;
1767 uint32_t resid_len;
1768 uint32_t pagen, pcount;
1769 void *viraddr;
1770 dma_addr_t phyaddr;
1771
1772
1773 memset(mbox, 0, sizeof(*mbox));
1774 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1775
1776
1777 sli4_config = &mbox->u.mqe.un.sli4_config;
1778
1779
1780 if (emb) {
1781
1782 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1783 sli4_config->header.cfg_mhdr.payload_length = length;
1784
1785 bf_set(lpfc_mbox_hdr_opcode,
1786 &sli4_config->header.cfg_shdr.request, opcode);
1787 bf_set(lpfc_mbox_hdr_subsystem,
1788 &sli4_config->header.cfg_shdr.request, subsystem);
1789 sli4_config->header.cfg_shdr.request.request_length =
1790 length - LPFC_MBX_CMD_HDR_LENGTH;
1791 return length;
1792 }
1793
1794
1795 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1796 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1797 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1798
1799 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1800 GFP_KERNEL);
1801 if (!mbox->sge_array) {
1802 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1803 "2527 Failed to allocate non-embedded SGE "
1804 "array.\n");
1805 return 0;
1806 }
1807 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1808
1809
1810
1811
1812
1813 viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1814 &phyaddr, GFP_KERNEL);
1815
1816 if (!viraddr)
1817 break;
1818 memset(viraddr, 0, SLI4_PAGE_SIZE);
1819 mbox->sge_array->addr[pagen] = viraddr;
1820
1821 if (pagen == 0)
1822 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1823 resid_len = length - alloc_len;
1824 if (resid_len > SLI4_PAGE_SIZE) {
1825 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1826 SLI4_PAGE_SIZE);
1827 alloc_len += SLI4_PAGE_SIZE;
1828 } else {
1829 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1830 resid_len);
1831 alloc_len = length;
1832 }
1833 }
1834
1835
1836 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1837 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1838
1839
1840 if (pagen > 0) {
1841 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1842 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1843 cfg_shdr->request.request_length =
1844 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1845 }
1846
1847 if (cfg_shdr)
1848 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1849 sizeof(union lpfc_sli4_cfg_shdr));
1850 return alloc_len;
1851}
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868int
1869lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1870 uint16_t exts_count, uint16_t rsrc_type, bool emb)
1871{
1872 uint8_t opcode = 0;
1873 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1874 void *virtaddr = NULL;
1875
1876
1877 if (emb == LPFC_SLI4_MBX_NEMBED) {
1878
1879 virtaddr = mbox->sge_array->addr[0];
1880 if (virtaddr == NULL)
1881 return 1;
1882 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1883 }
1884
1885
1886
1887
1888
1889 if (emb == LPFC_SLI4_MBX_EMBED)
1890 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1891 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1892 rsrc_type);
1893 else {
1894
1895 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1896 n_rsrc_extnt, rsrc_type);
1897 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1898 &n_rsrc_extnt->word4,
1899 sizeof(uint32_t));
1900 }
1901
1902
1903 opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
1904 switch (opcode) {
1905 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1906 if (emb == LPFC_SLI4_MBX_EMBED)
1907 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1908 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1909 exts_count);
1910 else
1911 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1912 n_rsrc_extnt, exts_count);
1913 break;
1914 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
1915 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
1916 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
1917
1918 break;
1919 default:
1920 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1921 "2929 Resource Extent Opcode x%x is "
1922 "unsupported\n", opcode);
1923 return 1;
1924 }
1925
1926 return 0;
1927}
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939uint8_t
1940lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1941{
1942 struct lpfc_mbx_sli4_config *sli4_cfg;
1943 union lpfc_sli4_cfg_shdr *cfg_shdr;
1944
1945 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1946 return LPFC_MBOX_SUBSYSTEM_NA;
1947 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1948
1949
1950 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1951 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1952 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1953 }
1954
1955
1956 if (unlikely(!mbox->sge_array))
1957 return LPFC_MBOX_SUBSYSTEM_NA;
1958 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1959 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972uint8_t
1973lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1974{
1975 struct lpfc_mbx_sli4_config *sli4_cfg;
1976 union lpfc_sli4_cfg_shdr *cfg_shdr;
1977
1978 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1979 return LPFC_MBOX_OPCODE_NA;
1980 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1981
1982
1983 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1984 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1985 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1986 }
1987
1988
1989 if (unlikely(!mbox->sge_array))
1990 return LPFC_MBOX_OPCODE_NA;
1991 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1992 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1993}
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006int
2007lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
2008 struct lpfcMboxq *mboxq,
2009 uint16_t fcf_index)
2010{
2011 void *virt_addr;
2012 dma_addr_t phys_addr;
2013 uint8_t *bytep;
2014 struct lpfc_mbx_sge sge;
2015 uint32_t alloc_len, req_len;
2016 struct lpfc_mbx_read_fcf_tbl *read_fcf;
2017
2018 if (!mboxq)
2019 return -ENOMEM;
2020
2021 req_len = sizeof(struct fcf_record) +
2022 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
2023
2024
2025 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2026 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
2027 LPFC_SLI4_MBX_NEMBED);
2028
2029 if (alloc_len < req_len) {
2030 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2031 "0291 Allocated DMA memory size (x%x) is "
2032 "less than the requested DMA memory "
2033 "size (x%x)\n", alloc_len, req_len);
2034 return -ENOMEM;
2035 }
2036
2037
2038
2039
2040 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
2041 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
2042 virt_addr = mboxq->sge_array->addr[0];
2043 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
2044
2045
2046 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
2047
2048 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
2049 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
2050
2051 return 0;
2052}
2053
2054
2055
2056
2057
2058
2059
2060
2061void
2062lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
2063{
2064
2065 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
2066 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
2067
2068
2069 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
2070 bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
2071
2072
2073 if (phba->cfg_enable_bg)
2074 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
2075
2076
2077 if (phba->max_vpi && phba->cfg_enable_npiv)
2078 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
2079
2080 return;
2081}
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094void
2095lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2096{
2097 struct lpfc_mbx_init_vfi *init_vfi;
2098
2099 memset(mbox, 0, sizeof(*mbox));
2100 mbox->vport = vport;
2101 init_vfi = &mbox->u.mqe.un.init_vfi;
2102 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
2103 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
2104 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
2105 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
2106 bf_set(lpfc_init_vfi_vfi, init_vfi,
2107 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2108 bf_set(lpfc_init_vfi_vpi, init_vfi,
2109 vport->phba->vpi_ids[vport->vpi]);
2110 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2111 vport->phba->fcf.fcfi);
2112}
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125void
2126lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2127{
2128 struct lpfc_mbx_reg_vfi *reg_vfi;
2129
2130 memset(mbox, 0, sizeof(*mbox));
2131 reg_vfi = &mbox->u.mqe.un.reg_vfi;
2132 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
2133 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
2134 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2135 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2136 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
2137 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
2138 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
2139 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
2140 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
2141 reg_vfi->e_d_tov = vport->phba->fc_edtov;
2142 reg_vfi->r_a_tov = vport->phba->fc_ratov;
2143 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
2144 reg_vfi->bde.addrLow = putPaddrLow(phys);
2145 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
2146 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2147 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
2148 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2149 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2150 " vfi:%d, vpi:%d, fc_pname:%x%x\n",
2151 vport->fc_myDID,
2152 vport->phba->fcf.fcfi,
2153 vport->phba->sli4_hba.vfi_ids[vport->vfi],
2154 vport->phba->vpi_ids[vport->vpi],
2155 reg_vfi->wwn[0], reg_vfi->wwn[1]);
2156}
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170void
2171lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
2172{
2173 memset(mbox, 0, sizeof(*mbox));
2174 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
2175 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2176 phba->vpi_ids[vpi]);
2177 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2178 phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2179}
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192void
2193lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2194{
2195 memset(mbox, 0, sizeof(*mbox));
2196 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2197 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2198 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2199}
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209int
2210lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2211{
2212 struct lpfc_dmabuf *mp = NULL;
2213 MAILBOX_t *mb;
2214
2215 memset(mbox, 0, sizeof(*mbox));
2216 mb = &mbox->u.mb;
2217
2218 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2219 if (mp)
2220 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2221
2222 if (!mp || !mp->virt) {
2223 kfree(mp);
2224
2225 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2226 "2569 lpfc dump config region 23: memory"
2227 " allocation failed\n");
2228 return 1;
2229 }
2230
2231 memset(mp->virt, 0, LPFC_BPL_SIZE);
2232 INIT_LIST_HEAD(&mp->list);
2233
2234
2235 mbox->context1 = (uint8_t *) mp;
2236
2237 mb->mbxCommand = MBX_DUMP_MEMORY;
2238 mb->un.varDmp.type = DMP_NV_PARAMS;
2239 mb->un.varDmp.region_id = DMP_REGION_23;
2240 mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
2241 mb->un.varWords[3] = putPaddrLow(mp->phys);
2242 mb->un.varWords[4] = putPaddrHigh(mp->phys);
2243 return 0;
2244}
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259void
2260lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2261{
2262 struct lpfc_mbx_reg_fcfi *reg_fcfi;
2263
2264 memset(mbox, 0, sizeof(*mbox));
2265 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
2266 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
2267 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
2268 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
2269 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2270 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2271 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2272 phba->fcf.current_rec.fcf_indx);
2273
2274 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
2275 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2276 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
2277 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2278 phba->fcf.current_rec.vlan_id);
2279 }
2280}
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290void
2291lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2292{
2293 memset(mbox, 0, sizeof(*mbox));
2294 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
2295 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
2296}
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306void
2307lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2308{
2309 struct lpfc_hba *phba = ndlp->phba;
2310 struct lpfc_mbx_resume_rpi *resume_rpi;
2311
2312 memset(mbox, 0, sizeof(*mbox));
2313 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2314 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2315 bf_set(lpfc_resume_rpi_index, resume_rpi,
2316 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2317 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2318 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2319}
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329void
2330lpfc_supported_pages(struct lpfcMboxq *mbox)
2331{
2332 struct lpfc_mbx_supp_pages *supp_pages;
2333
2334 memset(mbox, 0, sizeof(*mbox));
2335 supp_pages = &mbox->u.mqe.un.supp_pages;
2336 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2337 bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
2338}
2339
2340
2341
2342
2343
2344
2345
2346
2347void
2348lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
2349{
2350 struct lpfc_mbx_pc_sli4_params *sli4_params;
2351
2352 memset(mbox, 0, sizeof(*mbox));
2353 sli4_params = &mbox->u.mqe.un.sli4_params;
2354 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2355 bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
2356}
2357