1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_transport_fc.h>
29#include <scsi/scsi.h>
30#include <scsi/fc/fc_fs.h>
31
32#include "lpfc_hw4.h"
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_sli4.h"
36#include "lpfc_nl.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_compat.h"
43
44
45
46
47
48
49
50
51
52
53
54
55
56int
57lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
58 uint16_t offset)
59{
60 MAILBOX_t *mb;
61 struct lpfc_dmabuf *mp;
62
63 mb = &pmb->u.mb;
64
65
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.type = DMP_NV_PARAMS;
69 mb->un.varDmp.entry_index = offset;
70 mb->un.varDmp.region_id = DMP_REGION_VPORT;
71 mb->mbxOwner = OWN_HOST;
72
73
74 if (phba->sli_rev != LPFC_SLI_REV4) {
75 mb->un.varDmp.cv = 1;
76 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
77 return 0;
78 }
79
80
81 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
82 if (mp)
83 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
84
85 if (!mp || !mp->virt) {
86 kfree(mp);
87 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
88 "2605 lpfc_dump_static_vport: memory"
89 " allocation failed\n");
90 return 1;
91 }
92 memset(mp->virt, 0, LPFC_BPL_SIZE);
93 INIT_LIST_HEAD(&mp->list);
94
95 pmb->context2 = (uint8_t *) mp;
96 mb->un.varWords[3] = putPaddrLow(mp->phys);
97 mb->un.varWords[4] = putPaddrHigh(mp->phys);
98 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
99
100 return 0;
101}
102
103
104
105
106
107
108
109
110void
111lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
112{
113 MAILBOX_t *mb;
114 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
115 mb = &pmb->u.mb;
116 mb->mbxCommand = MBX_DOWN_LINK;
117 mb->mbxOwner = OWN_HOST;
118}
119
120
121
122
123
124
125
126
127
128
129
130
131
132void
133lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
134 uint16_t region_id)
135{
136 MAILBOX_t *mb;
137 void *ctx;
138
139 mb = &pmb->u.mb;
140 ctx = pmb->context2;
141
142
143 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
144 mb->mbxCommand = MBX_DUMP_MEMORY;
145 mb->un.varDmp.cv = 1;
146 mb->un.varDmp.type = DMP_NV_PARAMS;
147 mb->un.varDmp.entry_index = offset;
148 mb->un.varDmp.region_id = region_id;
149 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
150 mb->un.varDmp.co = 0;
151 mb->un.varDmp.resp_offset = 0;
152 pmb->context2 = ctx;
153 mb->mbxOwner = OWN_HOST;
154 return;
155}
156
157
158
159
160
161
162
163
164
165void
166lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
167{
168 MAILBOX_t *mb;
169 void *ctx;
170
171 mb = &pmb->u.mb;
172
173 ctx = pmb->context2;
174
175
176 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
177 mb->mbxCommand = MBX_DUMP_MEMORY;
178 mb->mbxOwner = OWN_HOST;
179 mb->un.varDmp.cv = 1;
180 mb->un.varDmp.type = DMP_NV_PARAMS;
181 mb->un.varDmp.entry_index = 0;
182 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
183 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
184 mb->un.varDmp.co = 0;
185 mb->un.varDmp.resp_offset = 0;
186 pmb->context2 = ctx;
187 return;
188}
189
190
191
192
193
194
195
196
197
198
199
200
201void
202lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
203{
204 MAILBOX_t *mb;
205
206 mb = &pmb->u.mb;
207 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
208 mb->mbxCommand = MBX_READ_NV;
209 mb->mbxOwner = OWN_HOST;
210 return;
211}
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226void
227lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
228 uint32_t ring)
229{
230 MAILBOX_t *mb;
231
232 mb = &pmb->u.mb;
233 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
234 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
235 mb->un.varCfgAsyncEvent.ring = ring;
236 mb->mbxOwner = OWN_HOST;
237 return;
238}
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253void
254lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
255{
256 MAILBOX_t *mb;
257
258 mb = &pmb->u.mb;
259 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
260 mb->mbxCommand = MBX_HEARTBEAT;
261 mb->mbxOwner = OWN_HOST;
262 return;
263}
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286int
287lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
288 struct lpfc_dmabuf *mp)
289{
290 MAILBOX_t *mb;
291 struct lpfc_sli *psli;
292
293 psli = &phba->sli;
294 mb = &pmb->u.mb;
295 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
296
297 INIT_LIST_HEAD(&mp->list);
298 mb->mbxCommand = MBX_READ_TOPOLOGY;
299 mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
300 mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
301 mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
302
303
304
305
306 pmb->context1 = (uint8_t *)mp;
307 mb->mbxOwner = OWN_HOST;
308 return (0);
309}
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326void
327lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
328{
329 MAILBOX_t *mb;
330
331 mb = &pmb->u.mb;
332 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
333
334 mb->un.varClearLA.eventTag = phba->fc_eventTag;
335 mb->mbxCommand = MBX_CLEAR_LA;
336 mb->mbxOwner = OWN_HOST;
337 return;
338}
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354void
355lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
356{
357 struct lpfc_vport *vport = phba->pport;
358 MAILBOX_t *mb = &pmb->u.mb;
359 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
360
361
362
363
364 if (phba->cfg_cr_delay) {
365 mb->un.varCfgLnk.cr = 1;
366 mb->un.varCfgLnk.ci = 1;
367 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
368 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
369 }
370
371 mb->un.varCfgLnk.myId = vport->fc_myDID;
372 mb->un.varCfgLnk.edtov = phba->fc_edtov;
373 mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
374 mb->un.varCfgLnk.ratov = phba->fc_ratov;
375 mb->un.varCfgLnk.rttov = phba->fc_rttov;
376 mb->un.varCfgLnk.altov = phba->fc_altov;
377 mb->un.varCfgLnk.crtov = phba->fc_crtov;
378 mb->un.varCfgLnk.citov = phba->fc_citov;
379
380 if (phba->cfg_ack0)
381 mb->un.varCfgLnk.ack0_enable = 1;
382
383 mb->mbxCommand = MBX_CONFIG_LINK;
384 mb->mbxOwner = OWN_HOST;
385 return;
386}
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401int
402lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
403{
404 MAILBOX_t *mb = &pmb->u.mb;
405 uint32_t attentionConditions[2];
406
407
408 if (phba->cfg_use_msi != 2) {
409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
410 "0475 Not configured for supporting MSI-X "
411 "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
412 return -EINVAL;
413 }
414
415 if (phba->sli_rev < 3) {
416 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
417 "0476 HBA not supporting SLI-3 or later "
418 "SLI Revision: 0x%x\n", phba->sli_rev);
419 return -EINVAL;
420 }
421
422
423 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
424
425
426
427
428
429
430 attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
431 HA_LATT | HA_MBATT);
432 attentionConditions[1] = 0;
433
434 mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
435 mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
436
437
438
439
440#ifdef __BIG_ENDIAN_BITFIELD
441
442 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
443
444 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
445#else
446
447 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
448
449 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
450#endif
451
452 mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
453 mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
454
455
456 mb->un.varCfgMSI.autoClearHA[0] = 0;
457 mb->un.varCfgMSI.autoClearHA[1] = 0;
458
459
460 mb->mbxCommand = MBX_CONFIG_MSI;
461 mb->mbxOwner = OWN_HOST;
462
463 return 0;
464}
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480void
481lpfc_init_link(struct lpfc_hba * phba,
482 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
483{
484 lpfc_vpd_t *vpd;
485 struct lpfc_sli *psli;
486 MAILBOX_t *mb;
487
488 mb = &pmb->u.mb;
489 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
490
491 psli = &phba->sli;
492 switch (topology) {
493 case FLAGS_TOPOLOGY_MODE_LOOP_PT:
494 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
495 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
496 break;
497 case FLAGS_TOPOLOGY_MODE_PT_PT:
498 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
499 break;
500 case FLAGS_TOPOLOGY_MODE_LOOP:
501 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
502 break;
503 case FLAGS_TOPOLOGY_MODE_PT_LOOP:
504 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
505 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
506 break;
507 case FLAGS_LOCAL_LB:
508 mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
509 break;
510 }
511
512
513 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
514
515
516
517
518 vpd = &phba->vpd;
519 if (vpd->rev.feaLevelHigh >= 0x02){
520 switch(linkspeed){
521 case LPFC_USER_LINK_SPEED_1G:
522 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
523 mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
524 break;
525 case LPFC_USER_LINK_SPEED_2G:
526 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
527 mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
528 break;
529 case LPFC_USER_LINK_SPEED_4G:
530 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
531 mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
532 break;
533 case LPFC_USER_LINK_SPEED_8G:
534 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
535 mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
536 break;
537 case LPFC_USER_LINK_SPEED_10G:
538 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
539 mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
540 break;
541 case LPFC_USER_LINK_SPEED_16G:
542 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
543 mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
544 break;
545 case LPFC_USER_LINK_SPEED_AUTO:
546 default:
547 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
548 break;
549 }
550
551 }
552 else
553 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
554
555 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
556 mb->mbxOwner = OWN_HOST;
557 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
558 return;
559}
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582int
583lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
584{
585 struct lpfc_dmabuf *mp;
586 MAILBOX_t *mb;
587 struct lpfc_sli *psli;
588
589 psli = &phba->sli;
590 mb = &pmb->u.mb;
591 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
592
593 mb->mbxOwner = OWN_HOST;
594
595
596
597 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
598 if (mp)
599 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
600 if (!mp || !mp->virt) {
601 kfree(mp);
602 mb->mbxCommand = MBX_READ_SPARM64;
603
604 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
605 "0301 READ_SPARAM: no buffers\n");
606 return (1);
607 }
608 INIT_LIST_HEAD(&mp->list);
609 mb->mbxCommand = MBX_READ_SPARM64;
610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
613 mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
614
615
616 pmb->context1 = mp;
617
618 return (0);
619}
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636void
637lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
638 LPFC_MBOXQ_t * pmb)
639{
640 MAILBOX_t *mb;
641
642 mb = &pmb->u.mb;
643 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
644
645 mb->un.varUnregDID.did = did;
646 if (vpi != 0xffff)
647 vpi += phba->vpi_base;
648 mb->un.varUnregDID.vpi = vpi;
649
650 mb->mbxCommand = MBX_UNREG_D_ID;
651 mb->mbxOwner = OWN_HOST;
652 return;
653}
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668void
669lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
670{
671 MAILBOX_t *mb;
672
673 mb = &pmb->u.mb;
674 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
675
676 mb->mbxCommand = MBX_READ_CONFIG;
677 mb->mbxOwner = OWN_HOST;
678 return;
679}
680
681
682
683
684
685
686
687
688
689
690
691
692
693void
694lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
695{
696 MAILBOX_t *mb;
697
698 mb = &pmb->u.mb;
699 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
700
701 mb->mbxCommand = MBX_READ_LNK_STAT;
702 mb->mbxOwner = OWN_HOST;
703 return;
704}
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730int
731lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
732 uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
733{
734 MAILBOX_t *mb = &pmb->u.mb;
735 uint8_t *sparam;
736 struct lpfc_dmabuf *mp;
737
738 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
739
740 mb->un.varRegLogin.rpi = 0;
741 if (phba->sli_rev == LPFC_SLI_REV4) {
742 mb->un.varRegLogin.rpi = rpi;
743 if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
744 return 1;
745 }
746 mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
747 mb->un.varRegLogin.did = did;
748 mb->mbxOwner = OWN_HOST;
749
750 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
751 if (mp)
752 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
753 if (!mp || !mp->virt) {
754 kfree(mp);
755 mb->mbxCommand = MBX_REG_LOGIN64;
756
757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
759 "rpi x%x\n", vpi, did, rpi);
760 return (1);
761 }
762 INIT_LIST_HEAD(&mp->list);
763 sparam = mp->virt;
764
765
766 memcpy(sparam, param, sizeof (struct serv_parm));
767
768
769 pmb->context1 = (uint8_t *) mp;
770
771 mb->mbxCommand = MBX_REG_LOGIN64;
772 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
775
776 return (0);
777}
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793void
794lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
795 LPFC_MBOXQ_t * pmb)
796{
797 MAILBOX_t *mb;
798
799 mb = &pmb->u.mb;
800 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
801
802 mb->un.varUnregLogin.rpi = (uint16_t) rpi;
803 mb->un.varUnregLogin.rsvd1 = 0;
804 mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
805
806 mb->mbxCommand = MBX_UNREG_LOGIN;
807 mb->mbxOwner = OWN_HOST;
808
809 return;
810}
811
812
813
814
815
816
817
818
819void
820lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
821{
822 struct lpfc_hba *phba = vport->phba;
823 LPFC_MBOXQ_t *mbox;
824 int rc;
825
826 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
827 if (mbox) {
828 lpfc_unreg_login(phba, vport->vpi,
829 vport->vpi + phba->vpi_base, mbox);
830 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
831 mbox->vport = vport;
832 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
833 mbox->context1 = NULL;
834 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
835 if (rc == MBX_NOT_FINISHED)
836 mempool_free(mbox, phba->mbox_mem_pool);
837 }
838}
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855void
856lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
857{
858 MAILBOX_t *mb = &pmb->u.mb;
859 struct lpfc_hba *phba = vport->phba;
860
861 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
862
863
864
865 if ((phba->sli_rev == LPFC_SLI_REV4) &&
866 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
867 mb->un.varRegVpi.upd = 1;
868 mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
869 mb->un.varRegVpi.sid = vport->fc_myDID;
870 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
871 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
872 sizeof(struct lpfc_name));
873 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
874 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
875
876 mb->mbxCommand = MBX_REG_VPI;
877 mb->mbxOwner = OWN_HOST;
878 return;
879
880}
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898void
899lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
900{
901 MAILBOX_t *mb = &pmb->u.mb;
902 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
903
904 if (phba->sli_rev < LPFC_SLI_REV4)
905 mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
906 else
907 mb->un.varUnregVpi.sli4_vpi = vpi + phba->vpi_base;
908
909 mb->mbxCommand = MBX_UNREG_VPI;
910 mb->mbxOwner = OWN_HOST;
911 return;
912
913}
914
915
916
917
918
919
920
921
922static void
923lpfc_config_pcb_setup(struct lpfc_hba * phba)
924{
925 struct lpfc_sli *psli = &phba->sli;
926 struct lpfc_sli_ring *pring;
927 PCB_t *pcbp = phba->pcb;
928 dma_addr_t pdma_addr;
929 uint32_t offset;
930 uint32_t iocbCnt = 0;
931 int i;
932
933 pcbp->maxRing = (psli->num_rings - 1);
934
935 for (i = 0; i < psli->num_rings; i++) {
936 pring = &psli->ring[i];
937
938 pring->sizeCiocb = phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE:
939 SLI2_IOCB_CMD_SIZE;
940 pring->sizeRiocb = phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE:
941 SLI2_IOCB_RSP_SIZE;
942
943
944 if ((pring->numCiocb == 0) || (pring->numRiocb == 0)) {
945 pcbp->rdsc[i].cmdEntries = 0;
946 pcbp->rdsc[i].rspEntries = 0;
947 pcbp->rdsc[i].cmdAddrHigh = 0;
948 pcbp->rdsc[i].rspAddrHigh = 0;
949 pcbp->rdsc[i].cmdAddrLow = 0;
950 pcbp->rdsc[i].rspAddrLow = 0;
951 pring->cmdringaddr = NULL;
952 pring->rspringaddr = NULL;
953 continue;
954 }
955
956 pring->cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
957 pcbp->rdsc[i].cmdEntries = pring->numCiocb;
958
959 offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
960 (uint8_t *) phba->slim2p.virt;
961 pdma_addr = phba->slim2p.phys + offset;
962 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
963 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
964 iocbCnt += pring->numCiocb;
965
966
967 pring->rspringaddr = (void *) &phba->IOCBs[iocbCnt];
968
969 pcbp->rdsc[i].rspEntries = pring->numRiocb;
970 offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
971 (uint8_t *)phba->slim2p.virt;
972 pdma_addr = phba->slim2p.phys + offset;
973 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
974 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
975 iocbCnt += pring->numRiocb;
976 }
977}
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993void
994lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
995{
996 MAILBOX_t *mb = &pmb->u.mb;
997 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
998 mb->un.varRdRev.cv = 1;
999 mb->un.varRdRev.v3req = 1;
1000 mb->mbxCommand = MBX_READ_REV;
1001 mb->mbxOwner = OWN_HOST;
1002 return;
1003}
1004
1005void
1006lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1007{
1008 MAILBOX_t *mb = &pmb->u.mb;
1009 struct lpfc_mqe *mqe;
1010
1011 switch (mb->mbxCommand) {
1012 case MBX_READ_REV:
1013 mqe = &pmb->u.mqe;
1014 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
1015 mqe->un.read_rev.fw_name, 16);
1016 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
1017 mqe->un.read_rev.ulp_fw_name, 16);
1018 break;
1019 default:
1020 break;
1021 }
1022 return;
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035static void
1036lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
1037 struct lpfc_hbq_init *hbq_desc)
1038{
1039 hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
1040 hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
1041 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
1042}
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054static void
1055lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
1056 struct lpfc_hbq_init *hbq_desc)
1057{
1058 hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
1059 hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
1060 hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
1061 hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
1062 memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
1063 sizeof(hbqmb->profiles.profile3.cmdmatch));
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077static void
1078lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
1079 struct lpfc_hbq_init *hbq_desc)
1080{
1081 hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
1082 hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
1083 hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
1084 hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
1085 memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
1086 sizeof(hbqmb->profiles.profile5.cmdmatch));
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103void
1104lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
1105 struct lpfc_hbq_init *hbq_desc,
1106 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
1107{
1108 int i;
1109 MAILBOX_t *mb = &pmb->u.mb;
1110 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
1111
1112 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1113 hbqmb->hbqId = id;
1114 hbqmb->entry_count = hbq_desc->entry_count;
1115 hbqmb->recvNotify = hbq_desc->rn;
1116
1117 hbqmb->numMask = hbq_desc->mask_count;
1118
1119 hbqmb->profile = hbq_desc->profile;
1120
1121
1122 hbqmb->ringMask = hbq_desc->ring_mask;
1123
1124
1125 hbqmb->headerLen = hbq_desc->headerLen;
1126
1127 hbqmb->logEntry = hbq_desc->logEntry;
1128
1129
1130
1131 hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
1132 hbq_entry_index * sizeof(struct lpfc_hbq_entry);
1133 hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
1134
1135 mb->mbxCommand = MBX_CONFIG_HBQ;
1136 mb->mbxOwner = OWN_HOST;
1137
1138
1139
1140
1141 if (hbq_desc->profile == 2)
1142 lpfc_build_hbq_profile2(hbqmb, hbq_desc);
1143 else if (hbq_desc->profile == 3)
1144 lpfc_build_hbq_profile3(hbqmb, hbq_desc);
1145 else if (hbq_desc->profile == 5)
1146 lpfc_build_hbq_profile5(hbqmb, hbq_desc);
1147
1148
1149 if (!hbq_desc->mask_count)
1150 return;
1151
1152
1153 for (i = 0; i < hbq_desc->mask_count; i++) {
1154 hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
1155 hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
1156 hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
1157 hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
1158 }
1159
1160 return;
1161}
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180void
1181lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1182{
1183 int i;
1184 MAILBOX_t *mb = &pmb->u.mb;
1185 struct lpfc_sli *psli;
1186 struct lpfc_sli_ring *pring;
1187
1188 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1189
1190 mb->un.varCfgRing.ring = ring;
1191 mb->un.varCfgRing.maxOrigXchg = 0;
1192 mb->un.varCfgRing.maxRespXchg = 0;
1193 mb->un.varCfgRing.recvNotify = 1;
1194
1195 psli = &phba->sli;
1196 pring = &psli->ring[ring];
1197 mb->un.varCfgRing.numMask = pring->num_mask;
1198 mb->mbxCommand = MBX_CONFIG_RING;
1199 mb->mbxOwner = OWN_HOST;
1200
1201
1202 if (pring->prt[0].profile) {
1203 mb->un.varCfgRing.profile = pring->prt[0].profile;
1204 return;
1205 }
1206
1207
1208 for (i = 0; i < pring->num_mask; i++) {
1209 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1210 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1211 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1212 else
1213 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
1214 mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
1215 mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
1216 }
1217
1218 return;
1219}
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235void
1236lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1237{
1238 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1239 MAILBOX_t *mb = &pmb->u.mb;
1240 dma_addr_t pdma_addr;
1241 uint32_t bar_low, bar_high;
1242 size_t offset;
1243 struct lpfc_hgp hgp;
1244 int i;
1245 uint32_t pgp_offset;
1246
1247 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1248 mb->mbxCommand = MBX_CONFIG_PORT;
1249 mb->mbxOwner = OWN_HOST;
1250
1251 mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
1252
1253 offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
1254 pdma_addr = phba->slim2p.phys + offset;
1255 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
1256 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
1257
1258
1259 mb->un.varCfgPort.hps = 1;
1260
1261
1262
1263 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1264 if (phba->cfg_enable_bg)
1265 mb->un.varCfgPort.cbg = 1;
1266 mb->un.varCfgPort.cdss = 1;
1267 mb->un.varCfgPort.cerbm = 1;
1268 mb->un.varCfgPort.ccrp = 1;
1269 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1270 if (phba->max_vpi && phba->cfg_enable_npiv &&
1271 phba->vpd.sli3Feat.cmv) {
1272 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1273 mb->un.varCfgPort.cmv = 1;
1274 } else
1275 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1276 } else
1277 phba->sli_rev = LPFC_SLI_REV2;
1278 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1279
1280
1281 phba->pcb->type = TYPE_NATIVE_SLI2;
1282 phba->pcb->feature = FEATURE_INITIAL_SLI2;
1283
1284
1285 phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
1286 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
1287 pdma_addr = phba->slim2p.phys + offset;
1288 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
1289 phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
1311 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341 if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
1342 phba->host_gp = &phba->mbox->us.s2.host[0];
1343 phba->hbq_put = NULL;
1344 offset = (uint8_t *)&phba->mbox->us.s2.host -
1345 (uint8_t *)phba->slim2p.virt;
1346 pdma_addr = phba->slim2p.phys + offset;
1347 phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
1348 phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
1349 } else {
1350
1351 mb->un.varCfgPort.hps = 1;
1352
1353 if (phba->sli_rev == 3) {
1354 phba->host_gp = &mb_slim->us.s3.host[0];
1355 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
1356 } else {
1357 phba->host_gp = &mb_slim->us.s2.host[0];
1358 phba->hbq_put = NULL;
1359 }
1360
1361
1362 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
1363 (void __iomem *)phba->host_gp -
1364 (void __iomem *)phba->MBslimaddr;
1365 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
1366 phba->pcb->hgpAddrHigh = bar_high;
1367 else
1368 phba->pcb->hgpAddrHigh = 0;
1369
1370 memset(&hgp, 0, sizeof(struct lpfc_hgp));
1371
1372 for (i = 0; i < phba->sli.num_rings; i++) {
1373 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
1374 sizeof(*phba->host_gp));
1375 }
1376 }
1377
1378
1379 if (phba->sli_rev == 3)
1380 pgp_offset = offsetof(struct lpfc_sli2_slim,
1381 mbx.us.s3_pgp.port);
1382 else
1383 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
1384 pdma_addr = phba->slim2p.phys + pgp_offset;
1385 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
1386 phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
1387
1388
1389 lpfc_config_pcb_setup(phba);
1390
1391
1392 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
1393 uint32_t hbainit[5];
1394
1395 lpfc_hba_init(phba, hbainit);
1396
1397 memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
1398 }
1399
1400
1401 lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
1402}
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419void
1420lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1421{
1422 MAILBOX_t *mb = &pmb->u.mb;
1423
1424 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1425 mb->mbxCommand = MBX_KILL_BOARD;
1426 mb->mbxOwner = OWN_HOST;
1427 return;
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440void
1441lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
1442{
1443 struct lpfc_sli *psli;
1444
1445 psli = &phba->sli;
1446
1447 list_add_tail(&mbq->list, &psli->mboxq);
1448
1449 psli->mboxq_cnt++;
1450
1451 return;
1452}
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468LPFC_MBOXQ_t *
1469lpfc_mbox_get(struct lpfc_hba * phba)
1470{
1471 LPFC_MBOXQ_t *mbq = NULL;
1472 struct lpfc_sli *psli = &phba->sli;
1473
1474 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
1475 if (mbq)
1476 psli->mboxq_cnt--;
1477
1478 return mbq;
1479}
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491void
1492__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1493{
1494 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1495}
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507void
1508lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1509{
1510 unsigned long iflag;
1511
1512
1513 spin_lock_irqsave(&phba->hbalock, iflag);
1514 __lpfc_mbox_cmpl_put(phba, mbq);
1515 spin_unlock_irqrestore(&phba->hbalock, iflag);
1516 return;
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530int
1531lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1532{
1533
1534
1535
1536 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1537 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1538 if (!mboxq->vport) {
1539 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1540 "1814 Mbox x%x failed, no vport\n",
1541 mboxq->u.mb.mbxCommand);
1542 dump_stack();
1543 return -ENODEV;
1544 }
1545 }
1546 return 0;
1547}
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559int
1560lpfc_mbox_dev_check(struct lpfc_hba *phba)
1561{
1562
1563 if (unlikely(pci_channel_offline(phba->pcidev)))
1564 return -ENODEV;
1565
1566
1567 if (phba->link_state == LPFC_HBA_ERROR)
1568 return -ENODEV;
1569
1570 return 0;
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584int
1585lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
1586{
1587 switch (cmd) {
1588 case MBX_WRITE_NV:
1589 case MBX_UPDATE_CFG:
1590 case MBX_DOWN_LOAD:
1591 case MBX_DEL_LD_ENTRY:
1592 case MBX_LOAD_AREA:
1593 case MBX_WRITE_WWN:
1594 case MBX_LOAD_EXP_ROM:
1595 return LPFC_MBOX_TMO_FLASH_CMD;
1596 case MBX_SLI4_CONFIG:
1597 return LPFC_MBOX_SLI4_CONFIG_TMO;
1598 }
1599 return LPFC_MBOX_TMO;
1600}
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612void
1613lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1614 dma_addr_t phyaddr, uint32_t length)
1615{
1616 struct lpfc_mbx_nembed_cmd *nembed_sge;
1617
1618 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1619 &mbox->u.mqe.un.nembed_cmd;
1620 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1621 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1622 nembed_sge->sge[sgentry].length = length;
1623}
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633void
1634lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1635 struct lpfc_mbx_sge *sge)
1636{
1637 struct lpfc_mbx_nembed_cmd *nembed_sge;
1638
1639 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1640 &mbox->u.mqe.un.nembed_cmd;
1641 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1642 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1643 sge->length = nembed_sge->sge[sgentry].length;
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653void
1654lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1655{
1656 struct lpfc_mbx_sli4_config *sli4_cfg;
1657 struct lpfc_mbx_sge sge;
1658 dma_addr_t phyaddr;
1659 uint32_t sgecount, sgentry;
1660
1661 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1662
1663
1664 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1665 mempool_free(mbox, phba->mbox_mem_pool);
1666 return;
1667 }
1668
1669
1670 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1671
1672 if (unlikely(!mbox->sge_array)) {
1673 mempool_free(mbox, phba->mbox_mem_pool);
1674 return;
1675 }
1676
1677 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1678 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1679 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1680 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1681 mbox->sge_array->addr[sgentry], phyaddr);
1682 }
1683
1684 kfree(mbox->sge_array);
1685
1686 mempool_free(mbox, phba->mbox_mem_pool);
1687}
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703int
1704lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1705 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1706{
1707 struct lpfc_mbx_sli4_config *sli4_config;
1708 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1709 uint32_t alloc_len;
1710 uint32_t resid_len;
1711 uint32_t pagen, pcount;
1712 void *viraddr;
1713 dma_addr_t phyaddr;
1714
1715
1716 memset(mbox, 0, sizeof(*mbox));
1717 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1718
1719
1720 sli4_config = &mbox->u.mqe.un.sli4_config;
1721
1722
1723 if (emb) {
1724
1725 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1726 sli4_config->header.cfg_mhdr.payload_length =
1727 LPFC_MBX_CMD_HDR_LENGTH + length;
1728
1729 bf_set(lpfc_mbox_hdr_opcode,
1730 &sli4_config->header.cfg_shdr.request, opcode);
1731 bf_set(lpfc_mbox_hdr_subsystem,
1732 &sli4_config->header.cfg_shdr.request, subsystem);
1733 sli4_config->header.cfg_shdr.request.request_length = length;
1734 return length;
1735 }
1736
1737
1738 pcount = (PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1739 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1740 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1741
1742 mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1743 GFP_KERNEL);
1744 if (!mbox->sge_array) {
1745 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1746 "2527 Failed to allocate non-embedded SGE "
1747 "array.\n");
1748 return 0;
1749 }
1750 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1751
1752
1753
1754
1755
1756 viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1757 &phyaddr, GFP_KERNEL);
1758
1759 if (!viraddr)
1760 break;
1761 memset(viraddr, 0, SLI4_PAGE_SIZE);
1762 mbox->sge_array->addr[pagen] = viraddr;
1763
1764 if (pagen == 0)
1765 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1766 resid_len = length - alloc_len;
1767 if (resid_len > SLI4_PAGE_SIZE) {
1768 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1769 SLI4_PAGE_SIZE);
1770 alloc_len += SLI4_PAGE_SIZE;
1771 } else {
1772 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1773 resid_len);
1774 alloc_len = length;
1775 }
1776 }
1777
1778
1779 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1780 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1781
1782
1783 if (pagen > 0) {
1784 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1785 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1786 cfg_shdr->request.request_length =
1787 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1788 }
1789
1790 if (cfg_shdr)
1791 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1792 sizeof(union lpfc_sli4_cfg_shdr));
1793
1794 return alloc_len;
1795}
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807uint8_t
1808lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1809{
1810 struct lpfc_mbx_sli4_config *sli4_cfg;
1811 union lpfc_sli4_cfg_shdr *cfg_shdr;
1812
1813 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1814 return 0;
1815 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1816
1817
1818 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1819 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1820 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1821 }
1822
1823
1824 if (unlikely(!mbox->sge_array))
1825 return 0;
1826 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1827 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841int
1842lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
1843 struct lpfcMboxq *mboxq,
1844 uint16_t fcf_index)
1845{
1846 void *virt_addr;
1847 dma_addr_t phys_addr;
1848 uint8_t *bytep;
1849 struct lpfc_mbx_sge sge;
1850 uint32_t alloc_len, req_len;
1851 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1852
1853 if (!mboxq)
1854 return -ENOMEM;
1855
1856 req_len = sizeof(struct fcf_record) +
1857 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
1858
1859
1860 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1861 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
1862 LPFC_SLI4_MBX_NEMBED);
1863
1864 if (alloc_len < req_len) {
1865 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1866 "0291 Allocated DMA memory size (x%x) is "
1867 "less than the requested DMA memory "
1868 "size (x%x)\n", alloc_len, req_len);
1869 return -ENOMEM;
1870 }
1871
1872
1873
1874
1875 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1876 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1877 virt_addr = mboxq->sge_array->addr[0];
1878 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1879
1880
1881 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
1882
1883 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
1884 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
1885
1886 return 0;
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896void
1897lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
1898{
1899
1900 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
1901 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
1902
1903
1904 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
1905
1906
1907 if (phba->cfg_enable_bg)
1908 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
1909
1910
1911 if (phba->max_vpi && phba->cfg_enable_npiv)
1912 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
1913
1914 return;
1915}
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928void
1929lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
1930{
1931 struct lpfc_mbx_init_vfi *init_vfi;
1932
1933 memset(mbox, 0, sizeof(*mbox));
1934 mbox->vport = vport;
1935 init_vfi = &mbox->u.mqe.un.init_vfi;
1936 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
1937 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
1938 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
1939 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
1940 bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
1941 bf_set(lpfc_init_vpi_vpi, init_vfi, vport->vpi + vport->phba->vpi_base);
1942 bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
1943}
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956void
1957lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
1958{
1959 struct lpfc_mbx_reg_vfi *reg_vfi;
1960
1961 memset(mbox, 0, sizeof(*mbox));
1962 reg_vfi = &mbox->u.mqe.un.reg_vfi;
1963 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
1964 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
1965 bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
1966 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
1967 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
1968 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
1969 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
1970 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
1971 reg_vfi->e_d_tov = vport->phba->fc_edtov;
1972 reg_vfi->r_a_tov = vport->phba->fc_ratov;
1973 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
1974 reg_vfi->bde.addrLow = putPaddrLow(phys);
1975 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
1976 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1977 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992void
1993lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
1994{
1995 memset(mbox, 0, sizeof(*mbox));
1996 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
1997 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
1998 vpi + phba->vpi_base);
1999 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2000 phba->pport->vfi + phba->vfi_base);
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014void
2015lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2016{
2017 memset(mbox, 0, sizeof(*mbox));
2018 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2019 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2020 vport->vfi + vport->phba->vfi_base);
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031int
2032lpfc_dump_fcoe_param(struct lpfc_hba *phba,
2033 struct lpfcMboxq *mbox)
2034{
2035 struct lpfc_dmabuf *mp = NULL;
2036 MAILBOX_t *mb;
2037
2038 memset(mbox, 0, sizeof(*mbox));
2039 mb = &mbox->u.mb;
2040
2041 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2042 if (mp)
2043 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2044
2045 if (!mp || !mp->virt) {
2046 kfree(mp);
2047
2048 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2049 "2569 lpfc_dump_fcoe_param: memory"
2050 " allocation failed\n");
2051 return 1;
2052 }
2053
2054 memset(mp->virt, 0, LPFC_BPL_SIZE);
2055 INIT_LIST_HEAD(&mp->list);
2056
2057
2058 mbox->context1 = (uint8_t *) mp;
2059
2060 mb->mbxCommand = MBX_DUMP_MEMORY;
2061 mb->un.varDmp.type = DMP_NV_PARAMS;
2062 mb->un.varDmp.region_id = DMP_REGION_23;
2063 mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
2064 mb->un.varWords[3] = putPaddrLow(mp->phys);
2065 mb->un.varWords[4] = putPaddrHigh(mp->phys);
2066 return 0;
2067}
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082void
2083lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2084{
2085 struct lpfc_mbx_reg_fcfi *reg_fcfi;
2086
2087 memset(mbox, 0, sizeof(*mbox));
2088 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
2089 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
2090 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
2091 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
2092 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2093 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2094 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2095 phba->fcf.current_rec.fcf_indx);
2096
2097 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
2098 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2099 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
2100 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2101 phba->fcf.current_rec.vlan_id);
2102 }
2103}
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113void
2114lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2115{
2116 memset(mbox, 0, sizeof(*mbox));
2117 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
2118 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
2119}
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129void
2130lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2131{
2132 struct lpfc_mbx_resume_rpi *resume_rpi;
2133
2134 memset(mbox, 0, sizeof(*mbox));
2135 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2136 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2137 bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
2138 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2139 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2140}
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150void
2151lpfc_supported_pages(struct lpfcMboxq *mbox)
2152{
2153 struct lpfc_mbx_supp_pages *supp_pages;
2154
2155 memset(mbox, 0, sizeof(*mbox));
2156 supp_pages = &mbox->u.mqe.un.supp_pages;
2157 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2158 bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
2159}
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169void
2170lpfc_sli4_params(struct lpfcMboxq *mbox)
2171{
2172 struct lpfc_mbx_sli4_params *sli4_params;
2173
2174 memset(mbox, 0, sizeof(*mbox));
2175 sli4_params = &mbox->u.mqe.un.sli4_params;
2176 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2177 bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
2178}
2179