1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_transport_fc.h>
29#include <scsi/scsi.h>
30#include <scsi/fc/fc_fs.h>
31
32#include "lpfc_hw4.h"
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_sli4.h"
36#include "lpfc_nl.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_compat.h"
43
44
45
46
47
48
49
50
51
52
53
54
55
56int
57lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
58 uint16_t offset)
59{
60 MAILBOX_t *mb;
61 struct lpfc_dmabuf *mp;
62
63 mb = &pmb->u.mb;
64
65
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.type = DMP_NV_PARAMS;
69 mb->un.varDmp.entry_index = offset;
70 mb->un.varDmp.region_id = DMP_REGION_VPORT;
71 mb->mbxOwner = OWN_HOST;
72
73
74 if (phba->sli_rev != LPFC_SLI_REV4) {
75 mb->un.varDmp.cv = 1;
76 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
77 return 0;
78 }
79
80
81 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
82 if (mp)
83 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
84
85 if (!mp || !mp->virt) {
86 kfree(mp);
87 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
88 "2605 lpfc_dump_static_vport: memory"
89 " allocation failed\n");
90 return 1;
91 }
92 memset(mp->virt, 0, LPFC_BPL_SIZE);
93 INIT_LIST_HEAD(&mp->list);
94
95 pmb->context1 = (uint8_t *)mp;
96 mb->un.varWords[3] = putPaddrLow(mp->phys);
97 mb->un.varWords[4] = putPaddrHigh(mp->phys);
98 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
99
100 return 0;
101}
102
103
104
105
106
107
108
109
110void
111lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
112{
113 MAILBOX_t *mb;
114 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
115 mb = &pmb->u.mb;
116 mb->mbxCommand = MBX_DOWN_LINK;
117 mb->mbxOwner = OWN_HOST;
118}
119
120
121
122
123
124
125
126
127
128
129
130
131
132void
133lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
134 uint16_t region_id)
135{
136 MAILBOX_t *mb;
137 void *ctx;
138
139 mb = &pmb->u.mb;
140 ctx = pmb->context2;
141
142
143 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
144 mb->mbxCommand = MBX_DUMP_MEMORY;
145 mb->un.varDmp.cv = 1;
146 mb->un.varDmp.type = DMP_NV_PARAMS;
147 mb->un.varDmp.entry_index = offset;
148 mb->un.varDmp.region_id = region_id;
149 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
150 mb->un.varDmp.co = 0;
151 mb->un.varDmp.resp_offset = 0;
152 pmb->context2 = ctx;
153 mb->mbxOwner = OWN_HOST;
154 return;
155}
156
157
158
159
160
161
162
163
164
165void
166lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
167{
168 MAILBOX_t *mb;
169 void *ctx;
170
171 mb = &pmb->u.mb;
172
173 ctx = pmb->context2;
174
175
176 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
177 mb->mbxCommand = MBX_DUMP_MEMORY;
178 mb->mbxOwner = OWN_HOST;
179 mb->un.varDmp.cv = 1;
180 mb->un.varDmp.type = DMP_NV_PARAMS;
181 if (phba->sli_rev < LPFC_SLI_REV4)
182 mb->un.varDmp.entry_index = 0;
183 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
184 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
185 mb->un.varDmp.co = 0;
186 mb->un.varDmp.resp_offset = 0;
187 pmb->context2 = ctx;
188 return;
189}
190
191
192
193
194
195
196
197
198
199
200
201
202void
203lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
204{
205 MAILBOX_t *mb;
206
207 mb = &pmb->u.mb;
208 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
209 mb->mbxCommand = MBX_READ_NV;
210 mb->mbxOwner = OWN_HOST;
211 return;
212}
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227void
228lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
229 uint32_t ring)
230{
231 MAILBOX_t *mb;
232
233 mb = &pmb->u.mb;
234 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
235 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
236 mb->un.varCfgAsyncEvent.ring = ring;
237 mb->mbxOwner = OWN_HOST;
238 return;
239}
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254void
255lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
256{
257 MAILBOX_t *mb;
258
259 mb = &pmb->u.mb;
260 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
261 mb->mbxCommand = MBX_HEARTBEAT;
262 mb->mbxOwner = OWN_HOST;
263 return;
264}
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287int
288lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
289 struct lpfc_dmabuf *mp)
290{
291 MAILBOX_t *mb;
292 struct lpfc_sli *psli;
293
294 psli = &phba->sli;
295 mb = &pmb->u.mb;
296 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
297
298 INIT_LIST_HEAD(&mp->list);
299 mb->mbxCommand = MBX_READ_TOPOLOGY;
300 mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
301 mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
302 mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
303
304
305
306
307 pmb->context1 = (uint8_t *)mp;
308 mb->mbxOwner = OWN_HOST;
309 return (0);
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327void
328lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
329{
330 MAILBOX_t *mb;
331
332 mb = &pmb->u.mb;
333 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
334
335 mb->un.varClearLA.eventTag = phba->fc_eventTag;
336 mb->mbxCommand = MBX_CLEAR_LA;
337 mb->mbxOwner = OWN_HOST;
338 return;
339}
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355void
356lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
357{
358 struct lpfc_vport *vport = phba->pport;
359 MAILBOX_t *mb = &pmb->u.mb;
360 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
361
362
363
364
365 if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) {
366 mb->un.varCfgLnk.cr = 1;
367 mb->un.varCfgLnk.ci = 1;
368 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
369 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
370 }
371
372 mb->un.varCfgLnk.myId = vport->fc_myDID;
373 mb->un.varCfgLnk.edtov = phba->fc_edtov;
374 mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
375 mb->un.varCfgLnk.ratov = phba->fc_ratov;
376 mb->un.varCfgLnk.rttov = phba->fc_rttov;
377 mb->un.varCfgLnk.altov = phba->fc_altov;
378 mb->un.varCfgLnk.crtov = phba->fc_crtov;
379 mb->un.varCfgLnk.citov = phba->fc_citov;
380
381 if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
382 mb->un.varCfgLnk.ack0_enable = 1;
383
384 mb->mbxCommand = MBX_CONFIG_LINK;
385 mb->mbxOwner = OWN_HOST;
386 return;
387}
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402int
403lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
404{
405 MAILBOX_t *mb = &pmb->u.mb;
406 uint32_t attentionConditions[2];
407
408
409 if (phba->cfg_use_msi != 2) {
410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
411 "0475 Not configured for supporting MSI-X "
412 "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
413 return -EINVAL;
414 }
415
416 if (phba->sli_rev < 3) {
417 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
418 "0476 HBA not supporting SLI-3 or later "
419 "SLI Revision: 0x%x\n", phba->sli_rev);
420 return -EINVAL;
421 }
422
423
424 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
425
426
427
428
429
430
431 attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
432 HA_LATT | HA_MBATT);
433 attentionConditions[1] = 0;
434
435 mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
436 mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
437
438
439
440
441#ifdef __BIG_ENDIAN_BITFIELD
442
443 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
444
445 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
446#else
447
448 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
449
450 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
451#endif
452
453 mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
454 mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
455
456
457 mb->un.varCfgMSI.autoClearHA[0] = 0;
458 mb->un.varCfgMSI.autoClearHA[1] = 0;
459
460
461 mb->mbxCommand = MBX_CONFIG_MSI;
462 mb->mbxOwner = OWN_HOST;
463
464 return 0;
465}
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481void
482lpfc_init_link(struct lpfc_hba * phba,
483 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
484{
485 lpfc_vpd_t *vpd;
486 struct lpfc_sli *psli;
487 MAILBOX_t *mb;
488
489 mb = &pmb->u.mb;
490 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
491
492 psli = &phba->sli;
493 switch (topology) {
494 case FLAGS_TOPOLOGY_MODE_LOOP_PT:
495 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
496 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
497 break;
498 case FLAGS_TOPOLOGY_MODE_PT_PT:
499 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
500 break;
501 case FLAGS_TOPOLOGY_MODE_LOOP:
502 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
503 break;
504 case FLAGS_TOPOLOGY_MODE_PT_LOOP:
505 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
506 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
507 break;
508 case FLAGS_LOCAL_LB:
509 mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
510 break;
511 }
512
513
514 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
515
516
517
518
519 vpd = &phba->vpd;
520 if (vpd->rev.feaLevelHigh >= 0x02){
521 switch(linkspeed){
522 case LPFC_USER_LINK_SPEED_1G:
523 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
524 mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
525 break;
526 case LPFC_USER_LINK_SPEED_2G:
527 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
528 mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
529 break;
530 case LPFC_USER_LINK_SPEED_4G:
531 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
532 mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
533 break;
534 case LPFC_USER_LINK_SPEED_8G:
535 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
536 mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
537 break;
538 case LPFC_USER_LINK_SPEED_10G:
539 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
540 mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
541 break;
542 case LPFC_USER_LINK_SPEED_16G:
543 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
544 mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
545 break;
546 case LPFC_USER_LINK_SPEED_AUTO:
547 default:
548 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
549 break;
550 }
551
552 }
553 else
554 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
555
556 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
557 mb->mbxOwner = OWN_HOST;
558 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
559 return;
560}
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583int
584lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
585{
586 struct lpfc_dmabuf *mp;
587 MAILBOX_t *mb;
588 struct lpfc_sli *psli;
589
590 psli = &phba->sli;
591 mb = &pmb->u.mb;
592 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
593
594 mb->mbxOwner = OWN_HOST;
595
596
597
598 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
599 if (mp)
600 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
601 if (!mp || !mp->virt) {
602 kfree(mp);
603 mb->mbxCommand = MBX_READ_SPARM64;
604
605 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
606 "0301 READ_SPARAM: no buffers\n");
607 return (1);
608 }
609 INIT_LIST_HEAD(&mp->list);
610 mb->mbxCommand = MBX_READ_SPARM64;
611 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
612 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
613 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
614 if (phba->sli_rev >= LPFC_SLI_REV3)
615 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
616
617
618 pmb->context1 = mp;
619
620 return (0);
621}
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638void
639lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
640 LPFC_MBOXQ_t * pmb)
641{
642 MAILBOX_t *mb;
643
644 mb = &pmb->u.mb;
645 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
646
647 mb->un.varUnregDID.did = did;
648 mb->un.varUnregDID.vpi = vpi;
649 if ((vpi != 0xffff) &&
650 (phba->sli_rev == LPFC_SLI_REV4))
651 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
652
653 mb->mbxCommand = MBX_UNREG_D_ID;
654 mb->mbxOwner = OWN_HOST;
655 return;
656}
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671void
672lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
673{
674 MAILBOX_t *mb;
675
676 mb = &pmb->u.mb;
677 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
678
679 mb->mbxCommand = MBX_READ_CONFIG;
680 mb->mbxOwner = OWN_HOST;
681 return;
682}
683
684
685
686
687
688
689
690
691
692
693
694
695
696void
697lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
698{
699 MAILBOX_t *mb;
700
701 mb = &pmb->u.mb;
702 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
703
704 mb->mbxCommand = MBX_READ_LNK_STAT;
705 mb->mbxOwner = OWN_HOST;
706 return;
707}
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733int
734lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
735 uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
736{
737 MAILBOX_t *mb = &pmb->u.mb;
738 uint8_t *sparam;
739 struct lpfc_dmabuf *mp;
740
741 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
742
743 mb->un.varRegLogin.rpi = 0;
744 if (phba->sli_rev == LPFC_SLI_REV4)
745 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
746 if (phba->sli_rev >= LPFC_SLI_REV3)
747 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
748 mb->un.varRegLogin.did = did;
749 mb->mbxOwner = OWN_HOST;
750
751 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
752 if (mp)
753 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
754 if (!mp || !mp->virt) {
755 kfree(mp);
756 mb->mbxCommand = MBX_REG_LOGIN64;
757
758 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
759 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
760 "rpi x%x\n", vpi, did, rpi);
761 return 1;
762 }
763 INIT_LIST_HEAD(&mp->list);
764 sparam = mp->virt;
765
766
767 memcpy(sparam, param, sizeof (struct serv_parm));
768
769
770 pmb->context1 = (uint8_t *) mp;
771
772 mb->mbxCommand = MBX_REG_LOGIN64;
773 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
774 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
775 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
776
777 return 0;
778}
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797void
798lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
799 LPFC_MBOXQ_t * pmb)
800{
801 MAILBOX_t *mb;
802
803 mb = &pmb->u.mb;
804 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
805
806 mb->un.varUnregLogin.rpi = rpi;
807 mb->un.varUnregLogin.rsvd1 = 0;
808 if (phba->sli_rev >= LPFC_SLI_REV3)
809 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
810
811 mb->mbxCommand = MBX_UNREG_LOGIN;
812 mb->mbxOwner = OWN_HOST;
813
814 return;
815}
816
817
818
819
820
821
822
823
824void
825lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
826{
827 struct lpfc_hba *phba = vport->phba;
828 LPFC_MBOXQ_t *mbox;
829 int rc;
830
831 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
832 if (mbox) {
833
834
835
836
837
838
839
840 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
841 mbox);
842 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
843 mbox->vport = vport;
844 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
845 mbox->context1 = NULL;
846 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
847 if (rc == MBX_NOT_FINISHED)
848 mempool_free(mbox, phba->mbox_mem_pool);
849 }
850}
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867void
868lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
869{
870 MAILBOX_t *mb = &pmb->u.mb;
871 struct lpfc_hba *phba = vport->phba;
872
873 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
874
875
876
877 if ((phba->sli_rev == LPFC_SLI_REV4) &&
878 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
879 mb->un.varRegVpi.upd = 1;
880
881 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
882 mb->un.varRegVpi.sid = vport->fc_myDID;
883 if (phba->sli_rev == LPFC_SLI_REV4)
884 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
885 else
886 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
887 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
888 sizeof(struct lpfc_name));
889 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
890 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
891
892 mb->mbxCommand = MBX_REG_VPI;
893 mb->mbxOwner = OWN_HOST;
894 return;
895
896}
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914void
915lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
916{
917 MAILBOX_t *mb = &pmb->u.mb;
918 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
919
920 if (phba->sli_rev == LPFC_SLI_REV3)
921 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
922 else if (phba->sli_rev >= LPFC_SLI_REV4)
923 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
924
925 mb->mbxCommand = MBX_UNREG_VPI;
926 mb->mbxOwner = OWN_HOST;
927 return;
928
929}
930
931
932
933
934
935
936
937
938static void
939lpfc_config_pcb_setup(struct lpfc_hba * phba)
940{
941 struct lpfc_sli *psli = &phba->sli;
942 struct lpfc_sli_ring *pring;
943 PCB_t *pcbp = phba->pcb;
944 dma_addr_t pdma_addr;
945 uint32_t offset;
946 uint32_t iocbCnt = 0;
947 int i;
948
949 pcbp->maxRing = (psli->num_rings - 1);
950
951 for (i = 0; i < psli->num_rings; i++) {
952 pring = &psli->ring[i];
953
954 pring->sli.sli3.sizeCiocb =
955 phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
956 SLI2_IOCB_CMD_SIZE;
957 pring->sli.sli3.sizeRiocb =
958 phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
959 SLI2_IOCB_RSP_SIZE;
960
961
962 if ((pring->sli.sli3.numCiocb == 0) ||
963 (pring->sli.sli3.numRiocb == 0)) {
964 pcbp->rdsc[i].cmdEntries = 0;
965 pcbp->rdsc[i].rspEntries = 0;
966 pcbp->rdsc[i].cmdAddrHigh = 0;
967 pcbp->rdsc[i].rspAddrHigh = 0;
968 pcbp->rdsc[i].cmdAddrLow = 0;
969 pcbp->rdsc[i].rspAddrLow = 0;
970 pring->sli.sli3.cmdringaddr = NULL;
971 pring->sli.sli3.rspringaddr = NULL;
972 continue;
973 }
974
975 pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
976 pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
977
978 offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
979 (uint8_t *) phba->slim2p.virt;
980 pdma_addr = phba->slim2p.phys + offset;
981 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
982 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
983 iocbCnt += pring->sli.sli3.numCiocb;
984
985
986 pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
987
988 pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
989 offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
990 (uint8_t *)phba->slim2p.virt;
991 pdma_addr = phba->slim2p.phys + offset;
992 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
993 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
994 iocbCnt += pring->sli.sli3.numRiocb;
995 }
996}
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012void
1013lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1014{
1015 MAILBOX_t *mb = &pmb->u.mb;
1016 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1017 mb->un.varRdRev.cv = 1;
1018 mb->un.varRdRev.v3req = 1;
1019 mb->mbxCommand = MBX_READ_REV;
1020 mb->mbxOwner = OWN_HOST;
1021 return;
1022}
1023
1024void
1025lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1026{
1027 MAILBOX_t *mb = &pmb->u.mb;
1028 struct lpfc_mqe *mqe;
1029
1030 switch (mb->mbxCommand) {
1031 case MBX_READ_REV:
1032 mqe = &pmb->u.mqe;
1033 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
1034 mqe->un.read_rev.fw_name, 16);
1035 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
1036 mqe->un.read_rev.ulp_fw_name, 16);
1037 break;
1038 default:
1039 break;
1040 }
1041 return;
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054static void
1055lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
1056 struct lpfc_hbq_init *hbq_desc)
1057{
1058 hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
1059 hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
1060 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073static void
1074lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
1075 struct lpfc_hbq_init *hbq_desc)
1076{
1077 hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
1078 hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
1079 hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
1080 hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
1081 memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
1082 sizeof(hbqmb->profiles.profile3.cmdmatch));
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static void
1097lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
1098 struct lpfc_hbq_init *hbq_desc)
1099{
1100 hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
1101 hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
1102 hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
1103 hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
1104 memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
1105 sizeof(hbqmb->profiles.profile5.cmdmatch));
1106}
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122void
1123lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
1124 struct lpfc_hbq_init *hbq_desc,
1125 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
1126{
1127 int i;
1128 MAILBOX_t *mb = &pmb->u.mb;
1129 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
1130
1131 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1132 hbqmb->hbqId = id;
1133 hbqmb->entry_count = hbq_desc->entry_count;
1134 hbqmb->recvNotify = hbq_desc->rn;
1135
1136 hbqmb->numMask = hbq_desc->mask_count;
1137
1138 hbqmb->profile = hbq_desc->profile;
1139
1140
1141 hbqmb->ringMask = hbq_desc->ring_mask;
1142
1143
1144 hbqmb->headerLen = hbq_desc->headerLen;
1145
1146 hbqmb->logEntry = hbq_desc->logEntry;
1147
1148
1149
1150 hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
1151 hbq_entry_index * sizeof(struct lpfc_hbq_entry);
1152 hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
1153
1154 mb->mbxCommand = MBX_CONFIG_HBQ;
1155 mb->mbxOwner = OWN_HOST;
1156
1157
1158
1159
1160 if (hbq_desc->profile == 2)
1161 lpfc_build_hbq_profile2(hbqmb, hbq_desc);
1162 else if (hbq_desc->profile == 3)
1163 lpfc_build_hbq_profile3(hbqmb, hbq_desc);
1164 else if (hbq_desc->profile == 5)
1165 lpfc_build_hbq_profile5(hbqmb, hbq_desc);
1166
1167
1168 if (!hbq_desc->mask_count)
1169 return;
1170
1171
1172 for (i = 0; i < hbq_desc->mask_count; i++) {
1173 hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
1174 hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
1175 hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
1176 hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
1177 }
1178
1179 return;
1180}
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199void
1200lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1201{
1202 int i;
1203 MAILBOX_t *mb = &pmb->u.mb;
1204 struct lpfc_sli *psli;
1205 struct lpfc_sli_ring *pring;
1206
1207 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1208
1209 mb->un.varCfgRing.ring = ring;
1210 mb->un.varCfgRing.maxOrigXchg = 0;
1211 mb->un.varCfgRing.maxRespXchg = 0;
1212 mb->un.varCfgRing.recvNotify = 1;
1213
1214 psli = &phba->sli;
1215 pring = &psli->ring[ring];
1216 mb->un.varCfgRing.numMask = pring->num_mask;
1217 mb->mbxCommand = MBX_CONFIG_RING;
1218 mb->mbxOwner = OWN_HOST;
1219
1220
1221 if (pring->prt[0].profile) {
1222 mb->un.varCfgRing.profile = pring->prt[0].profile;
1223 return;
1224 }
1225
1226
1227 for (i = 0; i < pring->num_mask; i++) {
1228 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1229 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1230 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1231 else
1232 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
1233 mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
1234 mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
1235 }
1236
1237 return;
1238}
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254void
1255lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1256{
1257 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1258 MAILBOX_t *mb = &pmb->u.mb;
1259 dma_addr_t pdma_addr;
1260 uint32_t bar_low, bar_high;
1261 size_t offset;
1262 struct lpfc_hgp hgp;
1263 int i;
1264 uint32_t pgp_offset;
1265
1266 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1267 mb->mbxCommand = MBX_CONFIG_PORT;
1268 mb->mbxOwner = OWN_HOST;
1269
1270 mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
1271
1272 offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
1273 pdma_addr = phba->slim2p.phys + offset;
1274 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
1275 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
1276
1277
1278 mb->un.varCfgPort.hps = 1;
1279
1280
1281
1282 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1283 if (phba->cfg_enable_bg)
1284 mb->un.varCfgPort.cbg = 1;
1285 if (phba->cfg_enable_dss)
1286 mb->un.varCfgPort.cdss = 1;
1287 mb->un.varCfgPort.cerbm = 1;
1288 mb->un.varCfgPort.ccrp = 1;
1289 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1290 if (phba->max_vpi && phba->cfg_enable_npiv &&
1291 phba->vpd.sli3Feat.cmv) {
1292 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1293 mb->un.varCfgPort.cmv = 1;
1294 } else
1295 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1296 } else
1297 phba->sli_rev = LPFC_SLI_REV2;
1298 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1299
1300
1301 if (phba->sli_rev == LPFC_SLI_REV3)
1302 mb->un.varCfgPort.casabt = 1;
1303
1304
1305 phba->pcb->type = TYPE_NATIVE_SLI2;
1306 phba->pcb->feature = FEATURE_INITIAL_SLI2;
1307
1308
1309 phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
1310 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
1311 pdma_addr = phba->slim2p.phys + offset;
1312 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
1313 phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
1335 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365 if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
1366 phba->host_gp = &phba->mbox->us.s2.host[0];
1367 phba->hbq_put = NULL;
1368 offset = (uint8_t *)&phba->mbox->us.s2.host -
1369 (uint8_t *)phba->slim2p.virt;
1370 pdma_addr = phba->slim2p.phys + offset;
1371 phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
1372 phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
1373 } else {
1374
1375 mb->un.varCfgPort.hps = 1;
1376
1377 if (phba->sli_rev == 3) {
1378 phba->host_gp = &mb_slim->us.s3.host[0];
1379 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
1380 } else {
1381 phba->host_gp = &mb_slim->us.s2.host[0];
1382 phba->hbq_put = NULL;
1383 }
1384
1385
1386 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
1387 (void __iomem *)phba->host_gp -
1388 (void __iomem *)phba->MBslimaddr;
1389 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
1390 phba->pcb->hgpAddrHigh = bar_high;
1391 else
1392 phba->pcb->hgpAddrHigh = 0;
1393
1394 memset(&hgp, 0, sizeof(struct lpfc_hgp));
1395
1396 for (i = 0; i < phba->sli.num_rings; i++) {
1397 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
1398 sizeof(*phba->host_gp));
1399 }
1400 }
1401
1402
1403 if (phba->sli_rev == 3)
1404 pgp_offset = offsetof(struct lpfc_sli2_slim,
1405 mbx.us.s3_pgp.port);
1406 else
1407 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
1408 pdma_addr = phba->slim2p.phys + pgp_offset;
1409 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
1410 phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
1411
1412
1413 lpfc_config_pcb_setup(phba);
1414
1415
1416 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
1417 uint32_t hbainit[5];
1418
1419 lpfc_hba_init(phba, hbainit);
1420
1421 memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
1422 }
1423
1424
1425 lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443void
1444lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1445{
1446 MAILBOX_t *mb = &pmb->u.mb;
1447
1448 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1449 mb->mbxCommand = MBX_KILL_BOARD;
1450 mb->mbxOwner = OWN_HOST;
1451 return;
1452}
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464void
1465lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
1466{
1467 struct lpfc_sli *psli;
1468
1469 psli = &phba->sli;
1470
1471 list_add_tail(&mbq->list, &psli->mboxq);
1472
1473 psli->mboxq_cnt++;
1474
1475 return;
1476}
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492LPFC_MBOXQ_t *
1493lpfc_mbox_get(struct lpfc_hba * phba)
1494{
1495 LPFC_MBOXQ_t *mbq = NULL;
1496 struct lpfc_sli *psli = &phba->sli;
1497
1498 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
1499 if (mbq)
1500 psli->mboxq_cnt--;
1501
1502 return mbq;
1503}
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515void
1516__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1517{
1518 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1519}
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531void
1532lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1533{
1534 unsigned long iflag;
1535
1536
1537 spin_lock_irqsave(&phba->hbalock, iflag);
1538 __lpfc_mbox_cmpl_put(phba, mbq);
1539 spin_unlock_irqrestore(&phba->hbalock, iflag);
1540 return;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554int
1555lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1556{
1557
1558
1559
1560 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1561 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1562 if (!mboxq->vport) {
1563 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1564 "1814 Mbox x%x failed, no vport\n",
1565 mboxq->u.mb.mbxCommand);
1566 dump_stack();
1567 return -ENODEV;
1568 }
1569 }
1570 return 0;
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583int
1584lpfc_mbox_dev_check(struct lpfc_hba *phba)
1585{
1586
1587 if (unlikely(pci_channel_offline(phba->pcidev)))
1588 return -ENODEV;
1589
1590
1591 if (phba->link_state == LPFC_HBA_ERROR)
1592 return -ENODEV;
1593
1594 return 0;
1595}
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608int
1609lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1610{
1611 MAILBOX_t *mbox = &mboxq->u.mb;
1612 uint8_t subsys, opcode;
1613
1614 switch (mbox->mbxCommand) {
1615 case MBX_WRITE_NV:
1616 case MBX_DUMP_MEMORY:
1617 case MBX_UPDATE_CFG:
1618 case MBX_DOWN_LOAD:
1619 case MBX_DEL_LD_ENTRY:
1620 case MBX_WRITE_VPARMS:
1621 case MBX_LOAD_AREA:
1622 case MBX_WRITE_WWN:
1623 case MBX_LOAD_EXP_ROM:
1624 case MBX_ACCESS_VDATA:
1625 return LPFC_MBOX_TMO_FLASH_CMD;
1626 case MBX_SLI4_CONFIG:
1627 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
1628 opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
1629 if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
1630 switch (opcode) {
1631 case LPFC_MBOX_OPCODE_READ_OBJECT:
1632 case LPFC_MBOX_OPCODE_WRITE_OBJECT:
1633 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
1634 case LPFC_MBOX_OPCODE_DELETE_OBJECT:
1635 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
1636 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
1637 case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
1638 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
1639 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
1640 case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
1641 case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
1642 case LPFC_MBOX_OPCODE_RESET_LICENSES:
1643 case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
1644 case LPFC_MBOX_OPCODE_GET_VPD_DATA:
1645 case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
1646 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1647 }
1648 }
1649 if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
1650 switch (opcode) {
1651 case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
1652 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1653 }
1654 }
1655 return LPFC_MBOX_SLI4_CONFIG_TMO;
1656 }
1657 return LPFC_MBOX_TMO;
1658}
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670void
1671lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1672 dma_addr_t phyaddr, uint32_t length)
1673{
1674 struct lpfc_mbx_nembed_cmd *nembed_sge;
1675
1676 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1677 &mbox->u.mqe.un.nembed_cmd;
1678 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1679 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1680 nembed_sge->sge[sgentry].length = length;
1681}
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691void
1692lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1693 struct lpfc_mbx_sge *sge)
1694{
1695 struct lpfc_mbx_nembed_cmd *nembed_sge;
1696
1697 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1698 &mbox->u.mqe.un.nembed_cmd;
1699 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1700 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1701 sge->length = nembed_sge->sge[sgentry].length;
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711void
1712lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1713{
1714 struct lpfc_mbx_sli4_config *sli4_cfg;
1715 struct lpfc_mbx_sge sge;
1716 dma_addr_t phyaddr;
1717 uint32_t sgecount, sgentry;
1718
1719 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1720
1721
1722 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1723 mempool_free(mbox, phba->mbox_mem_pool);
1724 return;
1725 }
1726
1727
1728 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1729
1730 if (unlikely(!mbox->sge_array)) {
1731 mempool_free(mbox, phba->mbox_mem_pool);
1732 return;
1733 }
1734
1735 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1736 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1737 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1738 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1739 mbox->sge_array->addr[sgentry], phyaddr);
1740 }
1741
1742 kfree(mbox->sge_array);
1743
1744 mempool_free(mbox, phba->mbox_mem_pool);
1745}
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761int
1762lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1763 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1764{
1765 struct lpfc_mbx_sli4_config *sli4_config;
1766 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1767 uint32_t alloc_len;
1768 uint32_t resid_len;
1769 uint32_t pagen, pcount;
1770 void *viraddr;
1771 dma_addr_t phyaddr;
1772
1773
1774 memset(mbox, 0, sizeof(*mbox));
1775 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1776
1777
1778 sli4_config = &mbox->u.mqe.un.sli4_config;
1779
1780
1781 if (emb) {
1782
1783 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1784 sli4_config->header.cfg_mhdr.payload_length = length;
1785
1786 bf_set(lpfc_mbox_hdr_opcode,
1787 &sli4_config->header.cfg_shdr.request, opcode);
1788 bf_set(lpfc_mbox_hdr_subsystem,
1789 &sli4_config->header.cfg_shdr.request, subsystem);
1790 sli4_config->header.cfg_shdr.request.request_length =
1791 length - LPFC_MBX_CMD_HDR_LENGTH;
1792 return length;
1793 }
1794
1795
1796 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1797 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1798 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1799
1800 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1801 GFP_KERNEL);
1802 if (!mbox->sge_array) {
1803 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1804 "2527 Failed to allocate non-embedded SGE "
1805 "array.\n");
1806 return 0;
1807 }
1808 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1809
1810
1811
1812
1813
1814 viraddr = dma_zalloc_coherent(&phba->pcidev->dev,
1815 SLI4_PAGE_SIZE, &phyaddr,
1816 GFP_KERNEL);
1817
1818 if (!viraddr)
1819 break;
1820 mbox->sge_array->addr[pagen] = viraddr;
1821
1822 if (pagen == 0)
1823 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1824 resid_len = length - alloc_len;
1825 if (resid_len > SLI4_PAGE_SIZE) {
1826 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1827 SLI4_PAGE_SIZE);
1828 alloc_len += SLI4_PAGE_SIZE;
1829 } else {
1830 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1831 resid_len);
1832 alloc_len = length;
1833 }
1834 }
1835
1836
1837 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1838 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1839
1840
1841 if (pagen > 0) {
1842 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1843 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1844 cfg_shdr->request.request_length =
1845 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1846 }
1847
1848 if (cfg_shdr)
1849 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1850 sizeof(union lpfc_sli4_cfg_shdr));
1851 return alloc_len;
1852}
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869int
1870lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1871 uint16_t exts_count, uint16_t rsrc_type, bool emb)
1872{
1873 uint8_t opcode = 0;
1874 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1875 void *virtaddr = NULL;
1876
1877
1878 if (emb == LPFC_SLI4_MBX_NEMBED) {
1879
1880 virtaddr = mbox->sge_array->addr[0];
1881 if (virtaddr == NULL)
1882 return 1;
1883 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1884 }
1885
1886
1887
1888
1889
1890 if (emb == LPFC_SLI4_MBX_EMBED)
1891 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1892 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1893 rsrc_type);
1894 else {
1895
1896 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1897 n_rsrc_extnt, rsrc_type);
1898 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1899 &n_rsrc_extnt->word4,
1900 sizeof(uint32_t));
1901 }
1902
1903
1904 opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
1905 switch (opcode) {
1906 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1907 if (emb == LPFC_SLI4_MBX_EMBED)
1908 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1909 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1910 exts_count);
1911 else
1912 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1913 n_rsrc_extnt, exts_count);
1914 break;
1915 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
1916 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
1917 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
1918
1919 break;
1920 default:
1921 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1922 "2929 Resource Extent Opcode x%x is "
1923 "unsupported\n", opcode);
1924 return 1;
1925 }
1926
1927 return 0;
1928}
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940uint8_t
1941lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1942{
1943 struct lpfc_mbx_sli4_config *sli4_cfg;
1944 union lpfc_sli4_cfg_shdr *cfg_shdr;
1945
1946 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1947 return LPFC_MBOX_SUBSYSTEM_NA;
1948 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1949
1950
1951 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1952 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1953 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1954 }
1955
1956
1957 if (unlikely(!mbox->sge_array))
1958 return LPFC_MBOX_SUBSYSTEM_NA;
1959 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1960 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1961}
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973uint8_t
1974lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1975{
1976 struct lpfc_mbx_sli4_config *sli4_cfg;
1977 union lpfc_sli4_cfg_shdr *cfg_shdr;
1978
1979 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1980 return LPFC_MBOX_OPCODE_NA;
1981 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1982
1983
1984 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1985 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1986 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1987 }
1988
1989
1990 if (unlikely(!mbox->sge_array))
1991 return LPFC_MBOX_OPCODE_NA;
1992 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1993 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1994}
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007int
2008lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
2009 struct lpfcMboxq *mboxq,
2010 uint16_t fcf_index)
2011{
2012 void *virt_addr;
2013 dma_addr_t phys_addr;
2014 uint8_t *bytep;
2015 struct lpfc_mbx_sge sge;
2016 uint32_t alloc_len, req_len;
2017 struct lpfc_mbx_read_fcf_tbl *read_fcf;
2018
2019 if (!mboxq)
2020 return -ENOMEM;
2021
2022 req_len = sizeof(struct fcf_record) +
2023 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
2024
2025
2026 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2027 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
2028 LPFC_SLI4_MBX_NEMBED);
2029
2030 if (alloc_len < req_len) {
2031 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2032 "0291 Allocated DMA memory size (x%x) is "
2033 "less than the requested DMA memory "
2034 "size (x%x)\n", alloc_len, req_len);
2035 return -ENOMEM;
2036 }
2037
2038
2039
2040
2041 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
2042 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
2043 virt_addr = mboxq->sge_array->addr[0];
2044 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
2045
2046
2047 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
2048
2049 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
2050 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
2051
2052 return 0;
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062void
2063lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
2064{
2065
2066 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
2067 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
2068
2069
2070 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
2071 bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
2072
2073
2074 if (phba->cfg_enable_bg)
2075 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
2076
2077
2078 if (phba->max_vpi && phba->cfg_enable_npiv)
2079 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
2080
2081 return;
2082}
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095void
2096lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2097{
2098 struct lpfc_mbx_init_vfi *init_vfi;
2099
2100 memset(mbox, 0, sizeof(*mbox));
2101 mbox->vport = vport;
2102 init_vfi = &mbox->u.mqe.un.init_vfi;
2103 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
2104 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
2105 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
2106 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
2107 bf_set(lpfc_init_vfi_vfi, init_vfi,
2108 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2109 bf_set(lpfc_init_vfi_vpi, init_vfi,
2110 vport->phba->vpi_ids[vport->vpi]);
2111 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2112 vport->phba->fcf.fcfi);
2113}
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126void
2127lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2128{
2129 struct lpfc_mbx_reg_vfi *reg_vfi;
2130 struct lpfc_hba *phba = vport->phba;
2131
2132 memset(mbox, 0, sizeof(*mbox));
2133 reg_vfi = &mbox->u.mqe.un.reg_vfi;
2134 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
2135 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
2136 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2137 phba->sli4_hba.vfi_ids[vport->vfi]);
2138 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
2139 bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
2140 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
2141 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
2142 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
2143 reg_vfi->e_d_tov = phba->fc_edtov;
2144 reg_vfi->r_a_tov = phba->fc_ratov;
2145 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
2146 reg_vfi->bde.addrLow = putPaddrLow(phys);
2147 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
2148 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2149 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
2150
2151
2152 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
2153 (vport->fc_flag & FC_VFI_REGISTERED) &&
2154 (!phba->fc_topology_changed)) {
2155 bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
2156 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
2157 }
2158 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2159 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2160 " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
2161 " port_state:x%x topology chg:%d\n",
2162 vport->fc_myDID,
2163 phba->fcf.fcfi,
2164 phba->sli4_hba.vfi_ids[vport->vfi],
2165 phba->vpi_ids[vport->vpi],
2166 reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
2167 vport->port_state, phba->fc_topology_changed);
2168}
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182void
2183lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
2184{
2185 memset(mbox, 0, sizeof(*mbox));
2186 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
2187 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2188 phba->vpi_ids[vpi]);
2189 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2190 phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2191}
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204void
2205lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2206{
2207 memset(mbox, 0, sizeof(*mbox));
2208 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2209 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2210 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2211}
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221int
2222lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2223{
2224 struct lpfc_dmabuf *mp = NULL;
2225 MAILBOX_t *mb;
2226
2227 memset(mbox, 0, sizeof(*mbox));
2228 mb = &mbox->u.mb;
2229
2230 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2231 if (mp)
2232 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2233
2234 if (!mp || !mp->virt) {
2235 kfree(mp);
2236
2237 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2238 "2569 lpfc dump config region 23: memory"
2239 " allocation failed\n");
2240 return 1;
2241 }
2242
2243 memset(mp->virt, 0, LPFC_BPL_SIZE);
2244 INIT_LIST_HEAD(&mp->list);
2245
2246
2247 mbox->context1 = (uint8_t *) mp;
2248
2249 mb->mbxCommand = MBX_DUMP_MEMORY;
2250 mb->un.varDmp.type = DMP_NV_PARAMS;
2251 mb->un.varDmp.region_id = DMP_REGION_23;
2252 mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
2253 mb->un.varWords[3] = putPaddrLow(mp->phys);
2254 mb->un.varWords[4] = putPaddrHigh(mp->phys);
2255 return 0;
2256}
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271void
2272lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2273{
2274 struct lpfc_mbx_reg_fcfi *reg_fcfi;
2275
2276 memset(mbox, 0, sizeof(*mbox));
2277 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
2278 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
2279 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
2280 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
2281 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2282 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2283 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2284 phba->fcf.current_rec.fcf_indx);
2285
2286 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
2287 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2288 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
2289 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2290 phba->fcf.current_rec.vlan_id);
2291 }
2292}
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302void
2303lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2304{
2305 memset(mbox, 0, sizeof(*mbox));
2306 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
2307 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
2308}
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318void
2319lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2320{
2321 struct lpfc_hba *phba = ndlp->phba;
2322 struct lpfc_mbx_resume_rpi *resume_rpi;
2323
2324 memset(mbox, 0, sizeof(*mbox));
2325 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2326 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2327 bf_set(lpfc_resume_rpi_index, resume_rpi,
2328 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2329 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2330 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2331}
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341void
2342lpfc_supported_pages(struct lpfcMboxq *mbox)
2343{
2344 struct lpfc_mbx_supp_pages *supp_pages;
2345
2346 memset(mbox, 0, sizeof(*mbox));
2347 supp_pages = &mbox->u.mqe.un.supp_pages;
2348 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2349 bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
2350}
2351
2352
2353
2354
2355
2356
2357
2358
2359void
2360lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
2361{
2362 struct lpfc_mbx_pc_sli4_params *sli4_params;
2363
2364 memset(mbox, 0, sizeof(*mbox));
2365 sli4_params = &mbox->u.mqe.un.sli4_params;
2366 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2367 bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
2368}
2369