1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_transport_fc.h>
31#include <scsi/scsi.h>
32#include <scsi/fc/fc_fs.h>
33
34#include "lpfc_hw4.h"
35#include "lpfc_hw.h"
36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
38#include "lpfc_nl.h"
39#include "lpfc_disc.h"
40#include "lpfc_scsi.h"
41#include "lpfc.h"
42#include "lpfc_logmsg.h"
43#include "lpfc_crtn.h"
44#include "lpfc_compat.h"
45
46
47
48
49
50
51
52
53
54
55
56
57
58int
59lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
60 uint16_t offset)
61{
62 MAILBOX_t *mb;
63 struct lpfc_dmabuf *mp;
64
65 mb = &pmb->u.mb;
66
67
68 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
69 mb->mbxCommand = MBX_DUMP_MEMORY;
70 mb->un.varDmp.type = DMP_NV_PARAMS;
71 mb->un.varDmp.entry_index = offset;
72 mb->un.varDmp.region_id = DMP_REGION_VPORT;
73 mb->mbxOwner = OWN_HOST;
74
75
76 if (phba->sli_rev != LPFC_SLI_REV4) {
77 mb->un.varDmp.cv = 1;
78 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
79 return 0;
80 }
81
82
83 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
84 if (mp)
85 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
86
87 if (!mp || !mp->virt) {
88 kfree(mp);
89 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
90 "2605 lpfc_dump_static_vport: memory"
91 " allocation failed\n");
92 return 1;
93 }
94 memset(mp->virt, 0, LPFC_BPL_SIZE);
95 INIT_LIST_HEAD(&mp->list);
96
97 pmb->ctx_buf = (uint8_t *)mp;
98 mb->un.varWords[3] = putPaddrLow(mp->phys);
99 mb->un.varWords[4] = putPaddrHigh(mp->phys);
100 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
101
102 return 0;
103}
104
105
106
107
108
109
110
111
112void
113lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
114{
115 MAILBOX_t *mb;
116 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
117 mb = &pmb->u.mb;
118 mb->mbxCommand = MBX_DOWN_LINK;
119 mb->mbxOwner = OWN_HOST;
120}
121
122
123
124
125
126
127
128
129
130
131
132
133
134void
135lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
136 uint16_t region_id)
137{
138 MAILBOX_t *mb;
139 void *ctx;
140
141 mb = &pmb->u.mb;
142 ctx = pmb->ctx_buf;
143
144
145 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
146 mb->mbxCommand = MBX_DUMP_MEMORY;
147 mb->un.varDmp.cv = 1;
148 mb->un.varDmp.type = DMP_NV_PARAMS;
149 mb->un.varDmp.entry_index = offset;
150 mb->un.varDmp.region_id = region_id;
151 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
152 mb->un.varDmp.co = 0;
153 mb->un.varDmp.resp_offset = 0;
154 pmb->ctx_buf = ctx;
155 mb->mbxOwner = OWN_HOST;
156 return;
157}
158
159
160
161
162
163
164
165
166
167void
168lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
169{
170 MAILBOX_t *mb;
171 void *ctx;
172
173 mb = &pmb->u.mb;
174
175 ctx = pmb->ctx_buf;
176
177
178 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
179 mb->mbxCommand = MBX_DUMP_MEMORY;
180 mb->mbxOwner = OWN_HOST;
181 mb->un.varDmp.cv = 1;
182 mb->un.varDmp.type = DMP_NV_PARAMS;
183 if (phba->sli_rev < LPFC_SLI_REV4)
184 mb->un.varDmp.entry_index = 0;
185 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
186 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
187 mb->un.varDmp.co = 0;
188 mb->un.varDmp.resp_offset = 0;
189 pmb->ctx_buf = ctx;
190 return;
191}
192
193
194
195
196
197
198
199
200
201
202
203
204void
205lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
206{
207 MAILBOX_t *mb;
208
209 mb = &pmb->u.mb;
210 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
211 mb->mbxCommand = MBX_READ_NV;
212 mb->mbxOwner = OWN_HOST;
213 return;
214}
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229void
230lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
231 uint32_t ring)
232{
233 MAILBOX_t *mb;
234
235 mb = &pmb->u.mb;
236 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
237 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
238 mb->un.varCfgAsyncEvent.ring = ring;
239 mb->mbxOwner = OWN_HOST;
240 return;
241}
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256void
257lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
258{
259 MAILBOX_t *mb;
260
261 mb = &pmb->u.mb;
262 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
263 mb->mbxCommand = MBX_HEARTBEAT;
264 mb->mbxOwner = OWN_HOST;
265 return;
266}
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289int
290lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
291 struct lpfc_dmabuf *mp)
292{
293 MAILBOX_t *mb;
294
295 mb = &pmb->u.mb;
296 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
297
298 INIT_LIST_HEAD(&mp->list);
299 mb->mbxCommand = MBX_READ_TOPOLOGY;
300 mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
301 mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
302 mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
303
304
305
306
307 pmb->ctx_buf = (uint8_t *)mp;
308 mb->mbxOwner = OWN_HOST;
309 return (0);
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327void
328lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
329{
330 MAILBOX_t *mb;
331
332 mb = &pmb->u.mb;
333 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
334
335 mb->un.varClearLA.eventTag = phba->fc_eventTag;
336 mb->mbxCommand = MBX_CLEAR_LA;
337 mb->mbxOwner = OWN_HOST;
338 return;
339}
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355void
356lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
357{
358 struct lpfc_vport *vport = phba->pport;
359 MAILBOX_t *mb = &pmb->u.mb;
360 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
361
362
363
364
365 if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) {
366 mb->un.varCfgLnk.cr = 1;
367 mb->un.varCfgLnk.ci = 1;
368 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
369 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
370 }
371
372 mb->un.varCfgLnk.myId = vport->fc_myDID;
373 mb->un.varCfgLnk.edtov = phba->fc_edtov;
374 mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
375 mb->un.varCfgLnk.ratov = phba->fc_ratov;
376 mb->un.varCfgLnk.rttov = phba->fc_rttov;
377 mb->un.varCfgLnk.altov = phba->fc_altov;
378 mb->un.varCfgLnk.crtov = phba->fc_crtov;
379 mb->un.varCfgLnk.cscn = 0;
380 if (phba->bbcredit_support && phba->cfg_enable_bbcr) {
381 mb->un.varCfgLnk.cscn = 1;
382 mb->un.varCfgLnk.bbscn = bf_get(lpfc_bbscn_def,
383 &phba->sli4_hba.bbscn_params);
384 }
385
386 if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
387 mb->un.varCfgLnk.ack0_enable = 1;
388
389 mb->mbxCommand = MBX_CONFIG_LINK;
390 mb->mbxOwner = OWN_HOST;
391 return;
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407int
408lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
409{
410 MAILBOX_t *mb = &pmb->u.mb;
411 uint32_t attentionConditions[2];
412
413
414 if (phba->cfg_use_msi != 2) {
415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
416 "0475 Not configured for supporting MSI-X "
417 "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
418 return -EINVAL;
419 }
420
421 if (phba->sli_rev < 3) {
422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
423 "0476 HBA not supporting SLI-3 or later "
424 "SLI Revision: 0x%x\n", phba->sli_rev);
425 return -EINVAL;
426 }
427
428
429 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
430
431
432
433
434
435
436 attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
437 HA_LATT | HA_MBATT);
438 attentionConditions[1] = 0;
439
440 mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
441 mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
442
443
444
445
446#ifdef __BIG_ENDIAN_BITFIELD
447
448 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
449
450 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
451#else
452
453 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
454
455 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
456#endif
457
458 mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
459 mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
460
461
462 mb->un.varCfgMSI.autoClearHA[0] = 0;
463 mb->un.varCfgMSI.autoClearHA[1] = 0;
464
465
466 mb->mbxCommand = MBX_CONFIG_MSI;
467 mb->mbxOwner = OWN_HOST;
468
469 return 0;
470}
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486void
487lpfc_init_link(struct lpfc_hba * phba,
488 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
489{
490 lpfc_vpd_t *vpd;
491 MAILBOX_t *mb;
492
493 mb = &pmb->u.mb;
494 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
495
496 switch (topology) {
497 case FLAGS_TOPOLOGY_MODE_LOOP_PT:
498 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
499 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
500 break;
501 case FLAGS_TOPOLOGY_MODE_PT_PT:
502 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
503 break;
504 case FLAGS_TOPOLOGY_MODE_LOOP:
505 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
506 break;
507 case FLAGS_TOPOLOGY_MODE_PT_LOOP:
508 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
509 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
510 break;
511 case FLAGS_LOCAL_LB:
512 mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
513 break;
514 }
515
516
517 if ((phba->sli4_hba.pc_sli4_params.sli_family == LPFC_SLI_INTF_FAMILY_G6 ||
518 phba->sli4_hba.pc_sli4_params.if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
519 !(phba->sli4_hba.pc_sli4_params.pls) &&
520 mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
521 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
522 phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
523 }
524
525
526 if (phba->sli_rev == LPFC_SLI_REV3 && !phba->cfg_fcp_wait_abts_rsp)
527 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
528
529
530
531
532 vpd = &phba->vpd;
533 if (vpd->rev.feaLevelHigh >= 0x02){
534 switch(linkspeed){
535 case LPFC_USER_LINK_SPEED_1G:
536 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
537 mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
538 break;
539 case LPFC_USER_LINK_SPEED_2G:
540 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
541 mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
542 break;
543 case LPFC_USER_LINK_SPEED_4G:
544 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
545 mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
546 break;
547 case LPFC_USER_LINK_SPEED_8G:
548 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
549 mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
550 break;
551 case LPFC_USER_LINK_SPEED_10G:
552 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
553 mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
554 break;
555 case LPFC_USER_LINK_SPEED_16G:
556 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
557 mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
558 break;
559 case LPFC_USER_LINK_SPEED_32G:
560 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
561 mb->un.varInitLnk.link_speed = LINK_SPEED_32G;
562 break;
563 case LPFC_USER_LINK_SPEED_64G:
564 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
565 mb->un.varInitLnk.link_speed = LINK_SPEED_64G;
566 break;
567 case LPFC_USER_LINK_SPEED_AUTO:
568 default:
569 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
570 break;
571 }
572
573 }
574 else
575 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
576
577 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
578 mb->mbxOwner = OWN_HOST;
579 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
580 return;
581}
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604int
605lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
606{
607 struct lpfc_dmabuf *mp;
608 MAILBOX_t *mb;
609
610 mb = &pmb->u.mb;
611 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
612
613 mb->mbxOwner = OWN_HOST;
614
615
616
617 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
618 if (mp)
619 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
620 if (!mp || !mp->virt) {
621 kfree(mp);
622 mb->mbxCommand = MBX_READ_SPARM64;
623
624 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
625 "0301 READ_SPARAM: no buffers\n");
626 return (1);
627 }
628 INIT_LIST_HEAD(&mp->list);
629 mb->mbxCommand = MBX_READ_SPARM64;
630 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
631 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
632 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
633 if (phba->sli_rev >= LPFC_SLI_REV3)
634 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
635
636
637 pmb->ctx_buf = mp;
638
639 return (0);
640}
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657void
658lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
659 LPFC_MBOXQ_t * pmb)
660{
661 MAILBOX_t *mb;
662
663 mb = &pmb->u.mb;
664 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
665
666 mb->un.varUnregDID.did = did;
667 mb->un.varUnregDID.vpi = vpi;
668 if ((vpi != 0xffff) &&
669 (phba->sli_rev == LPFC_SLI_REV4))
670 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
671
672 mb->mbxCommand = MBX_UNREG_D_ID;
673 mb->mbxOwner = OWN_HOST;
674 return;
675}
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690void
691lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
692{
693 MAILBOX_t *mb;
694
695 mb = &pmb->u.mb;
696 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
697
698 mb->mbxCommand = MBX_READ_CONFIG;
699 mb->mbxOwner = OWN_HOST;
700 return;
701}
702
703
704
705
706
707
708
709
710
711
712
713
714
715void
716lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
717{
718 MAILBOX_t *mb;
719
720 mb = &pmb->u.mb;
721 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
722
723 mb->mbxCommand = MBX_READ_LNK_STAT;
724 mb->mbxOwner = OWN_HOST;
725 return;
726}
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752int
753lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
754 uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
755{
756 MAILBOX_t *mb = &pmb->u.mb;
757 uint8_t *sparam;
758 struct lpfc_dmabuf *mp;
759
760 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
761
762 mb->un.varRegLogin.rpi = 0;
763 if (phba->sli_rev == LPFC_SLI_REV4)
764 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
765 if (phba->sli_rev >= LPFC_SLI_REV3)
766 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
767 mb->un.varRegLogin.did = did;
768 mb->mbxOwner = OWN_HOST;
769
770 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
771 if (mp)
772 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
773 if (!mp || !mp->virt) {
774 kfree(mp);
775 mb->mbxCommand = MBX_REG_LOGIN64;
776
777 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
778 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
779 "rpi x%x\n", vpi, did, rpi);
780 return 1;
781 }
782 INIT_LIST_HEAD(&mp->list);
783 sparam = mp->virt;
784
785
786 memcpy(sparam, param, sizeof (struct serv_parm));
787
788
789 pmb->ctx_buf = (uint8_t *)mp;
790
791 mb->mbxCommand = MBX_REG_LOGIN64;
792 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
793 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
794 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
795
796 return 0;
797}
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816void
817lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
818 LPFC_MBOXQ_t * pmb)
819{
820 MAILBOX_t *mb;
821
822 mb = &pmb->u.mb;
823 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
824
825 mb->un.varUnregLogin.rpi = rpi;
826 mb->un.varUnregLogin.rsvd1 = 0;
827 if (phba->sli_rev >= LPFC_SLI_REV3)
828 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
829
830 mb->mbxCommand = MBX_UNREG_LOGIN;
831 mb->mbxOwner = OWN_HOST;
832
833 return;
834}
835
836
837
838
839
840
841
842
843void
844lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
845{
846 struct lpfc_hba *phba = vport->phba;
847 LPFC_MBOXQ_t *mbox;
848 int rc;
849
850 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
851 if (mbox) {
852
853
854
855
856
857
858
859 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
860 mbox);
861 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
862 mbox->vport = vport;
863 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
864 mbox->ctx_ndlp = NULL;
865 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
866 if (rc == MBX_NOT_FINISHED)
867 mempool_free(mbox, phba->mbox_mem_pool);
868 }
869}
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884void
885lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
886{
887 MAILBOX_t *mb = &pmb->u.mb;
888 struct lpfc_hba *phba = vport->phba;
889
890 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
891
892
893
894 if ((phba->sli_rev == LPFC_SLI_REV4) &&
895 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
896 mb->un.varRegVpi.upd = 1;
897
898 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
899 mb->un.varRegVpi.sid = vport->fc_myDID;
900 if (phba->sli_rev == LPFC_SLI_REV4)
901 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
902 else
903 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
904 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
905 sizeof(struct lpfc_name));
906 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
907 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
908
909 mb->mbxCommand = MBX_REG_VPI;
910 mb->mbxOwner = OWN_HOST;
911 return;
912
913}
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931void
932lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
933{
934 MAILBOX_t *mb = &pmb->u.mb;
935 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
936
937 if (phba->sli_rev == LPFC_SLI_REV3)
938 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
939 else if (phba->sli_rev >= LPFC_SLI_REV4)
940 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
941
942 mb->mbxCommand = MBX_UNREG_VPI;
943 mb->mbxOwner = OWN_HOST;
944 return;
945
946}
947
948
949
950
951
952
953
954
955static void
956lpfc_config_pcb_setup(struct lpfc_hba * phba)
957{
958 struct lpfc_sli *psli = &phba->sli;
959 struct lpfc_sli_ring *pring;
960 PCB_t *pcbp = phba->pcb;
961 dma_addr_t pdma_addr;
962 uint32_t offset;
963 uint32_t iocbCnt = 0;
964 int i;
965
966 pcbp->maxRing = (psli->num_rings - 1);
967
968 for (i = 0; i < psli->num_rings; i++) {
969 pring = &psli->sli3_ring[i];
970
971 pring->sli.sli3.sizeCiocb =
972 phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
973 SLI2_IOCB_CMD_SIZE;
974 pring->sli.sli3.sizeRiocb =
975 phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
976 SLI2_IOCB_RSP_SIZE;
977
978
979 if ((pring->sli.sli3.numCiocb == 0) ||
980 (pring->sli.sli3.numRiocb == 0)) {
981 pcbp->rdsc[i].cmdEntries = 0;
982 pcbp->rdsc[i].rspEntries = 0;
983 pcbp->rdsc[i].cmdAddrHigh = 0;
984 pcbp->rdsc[i].rspAddrHigh = 0;
985 pcbp->rdsc[i].cmdAddrLow = 0;
986 pcbp->rdsc[i].rspAddrLow = 0;
987 pring->sli.sli3.cmdringaddr = NULL;
988 pring->sli.sli3.rspringaddr = NULL;
989 continue;
990 }
991
992 pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
993 pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
994
995 offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
996 (uint8_t *) phba->slim2p.virt;
997 pdma_addr = phba->slim2p.phys + offset;
998 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
999 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
1000 iocbCnt += pring->sli.sli3.numCiocb;
1001
1002
1003 pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
1004
1005 pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
1006 offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
1007 (uint8_t *)phba->slim2p.virt;
1008 pdma_addr = phba->slim2p.phys + offset;
1009 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
1010 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
1011 iocbCnt += pring->sli.sli3.numRiocb;
1012 }
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029void
1030lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1031{
1032 MAILBOX_t *mb = &pmb->u.mb;
1033 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1034 mb->un.varRdRev.cv = 1;
1035 mb->un.varRdRev.v3req = 1;
1036 mb->mbxCommand = MBX_READ_REV;
1037 mb->mbxOwner = OWN_HOST;
1038 return;
1039}
1040
1041void
1042lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1043{
1044 MAILBOX_t *mb = &pmb->u.mb;
1045 struct lpfc_mqe *mqe;
1046
1047 switch (mb->mbxCommand) {
1048 case MBX_READ_REV:
1049 mqe = &pmb->u.mqe;
1050 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
1051 mqe->un.read_rev.fw_name, 16);
1052 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
1053 mqe->un.read_rev.ulp_fw_name, 16);
1054 break;
1055 default:
1056 break;
1057 }
1058 return;
1059}
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static void
1072lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
1073 struct lpfc_hbq_init *hbq_desc)
1074{
1075 hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
1076 hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
1077 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static void
1091lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
1092 struct lpfc_hbq_init *hbq_desc)
1093{
1094 hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
1095 hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
1096 hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
1097 hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
1098 memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
1099 sizeof(hbqmb->profiles.profile3.cmdmatch));
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113static void
1114lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
1115 struct lpfc_hbq_init *hbq_desc)
1116{
1117 hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
1118 hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
1119 hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
1120 hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
1121 memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
1122 sizeof(hbqmb->profiles.profile5.cmdmatch));
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139void
1140lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
1141 struct lpfc_hbq_init *hbq_desc,
1142 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
1143{
1144 int i;
1145 MAILBOX_t *mb = &pmb->u.mb;
1146 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
1147
1148 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1149 hbqmb->hbqId = id;
1150 hbqmb->entry_count = hbq_desc->entry_count;
1151 hbqmb->recvNotify = hbq_desc->rn;
1152
1153 hbqmb->numMask = hbq_desc->mask_count;
1154
1155 hbqmb->profile = hbq_desc->profile;
1156
1157
1158 hbqmb->ringMask = hbq_desc->ring_mask;
1159
1160
1161 hbqmb->headerLen = hbq_desc->headerLen;
1162
1163 hbqmb->logEntry = hbq_desc->logEntry;
1164
1165
1166
1167 hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
1168 hbq_entry_index * sizeof(struct lpfc_hbq_entry);
1169 hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
1170
1171 mb->mbxCommand = MBX_CONFIG_HBQ;
1172 mb->mbxOwner = OWN_HOST;
1173
1174
1175
1176
1177 if (hbq_desc->profile == 2)
1178 lpfc_build_hbq_profile2(hbqmb, hbq_desc);
1179 else if (hbq_desc->profile == 3)
1180 lpfc_build_hbq_profile3(hbqmb, hbq_desc);
1181 else if (hbq_desc->profile == 5)
1182 lpfc_build_hbq_profile5(hbqmb, hbq_desc);
1183
1184
1185 if (!hbq_desc->mask_count)
1186 return;
1187
1188
1189 for (i = 0; i < hbq_desc->mask_count; i++) {
1190 hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
1191 hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
1192 hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
1193 hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
1194 }
1195
1196 return;
1197}
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216void
1217lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1218{
1219 int i;
1220 MAILBOX_t *mb = &pmb->u.mb;
1221 struct lpfc_sli *psli;
1222 struct lpfc_sli_ring *pring;
1223
1224 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1225
1226 mb->un.varCfgRing.ring = ring;
1227 mb->un.varCfgRing.maxOrigXchg = 0;
1228 mb->un.varCfgRing.maxRespXchg = 0;
1229 mb->un.varCfgRing.recvNotify = 1;
1230
1231 psli = &phba->sli;
1232 pring = &psli->sli3_ring[ring];
1233 mb->un.varCfgRing.numMask = pring->num_mask;
1234 mb->mbxCommand = MBX_CONFIG_RING;
1235 mb->mbxOwner = OWN_HOST;
1236
1237
1238 if (pring->prt[0].profile) {
1239 mb->un.varCfgRing.profile = pring->prt[0].profile;
1240 return;
1241 }
1242
1243
1244 for (i = 0; i < pring->num_mask; i++) {
1245 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1246 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1247 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1248 else
1249 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
1250 mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
1251 mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
1252 }
1253
1254 return;
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271void
1272lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1273{
1274 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1275 MAILBOX_t *mb = &pmb->u.mb;
1276 dma_addr_t pdma_addr;
1277 uint32_t bar_low, bar_high;
1278 size_t offset;
1279 struct lpfc_hgp hgp;
1280 int i;
1281 uint32_t pgp_offset;
1282
1283 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1284 mb->mbxCommand = MBX_CONFIG_PORT;
1285 mb->mbxOwner = OWN_HOST;
1286
1287 mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
1288
1289 offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
1290 pdma_addr = phba->slim2p.phys + offset;
1291 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
1292 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
1293
1294
1295 mb->un.varCfgPort.hps = 1;
1296
1297
1298
1299 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1300 if (phba->cfg_enable_bg)
1301 mb->un.varCfgPort.cbg = 1;
1302 mb->un.varCfgPort.cerbm = 1;
1303 mb->un.varCfgPort.ccrp = 1;
1304 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1305 if (phba->max_vpi && phba->cfg_enable_npiv &&
1306 phba->vpd.sli3Feat.cmv) {
1307 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1308 mb->un.varCfgPort.cmv = 1;
1309 } else
1310 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1311 } else
1312 phba->sli_rev = LPFC_SLI_REV2;
1313 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1314
1315
1316 if (phba->sli_rev == LPFC_SLI_REV3)
1317 mb->un.varCfgPort.casabt = 1;
1318
1319
1320 phba->pcb->type = TYPE_NATIVE_SLI2;
1321 phba->pcb->feature = FEATURE_INITIAL_SLI2;
1322
1323
1324 phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
1325 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
1326 pdma_addr = phba->slim2p.phys + offset;
1327 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
1328 phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
1350 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380 if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
1381 phba->host_gp = (struct lpfc_hgp __iomem *)
1382 &phba->mbox->us.s2.host[0];
1383 phba->hbq_put = NULL;
1384 offset = (uint8_t *)&phba->mbox->us.s2.host -
1385 (uint8_t *)phba->slim2p.virt;
1386 pdma_addr = phba->slim2p.phys + offset;
1387 phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
1388 phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
1389 } else {
1390
1391 mb->un.varCfgPort.hps = 1;
1392
1393 if (phba->sli_rev == 3) {
1394 phba->host_gp = &mb_slim->us.s3.host[0];
1395 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
1396 } else {
1397 phba->host_gp = &mb_slim->us.s2.host[0];
1398 phba->hbq_put = NULL;
1399 }
1400
1401
1402 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
1403 (void __iomem *)phba->host_gp -
1404 (void __iomem *)phba->MBslimaddr;
1405 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
1406 phba->pcb->hgpAddrHigh = bar_high;
1407 else
1408 phba->pcb->hgpAddrHigh = 0;
1409
1410 memset(&hgp, 0, sizeof(struct lpfc_hgp));
1411
1412 for (i = 0; i < phba->sli.num_rings; i++) {
1413 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
1414 sizeof(*phba->host_gp));
1415 }
1416 }
1417
1418
1419 if (phba->sli_rev == 3)
1420 pgp_offset = offsetof(struct lpfc_sli2_slim,
1421 mbx.us.s3_pgp.port);
1422 else
1423 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
1424 pdma_addr = phba->slim2p.phys + pgp_offset;
1425 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
1426 phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
1427
1428
1429 lpfc_config_pcb_setup(phba);
1430
1431
1432 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
1433 uint32_t hbainit[5];
1434
1435 lpfc_hba_init(phba, hbainit);
1436
1437 memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
1438 }
1439
1440
1441 lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
1442}
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459void
1460lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1461{
1462 MAILBOX_t *mb = &pmb->u.mb;
1463
1464 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1465 mb->mbxCommand = MBX_KILL_BOARD;
1466 mb->mbxOwner = OWN_HOST;
1467 return;
1468}
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480void
1481lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
1482{
1483 struct lpfc_sli *psli;
1484
1485 psli = &phba->sli;
1486
1487 list_add_tail(&mbq->list, &psli->mboxq);
1488
1489 psli->mboxq_cnt++;
1490
1491 return;
1492}
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508LPFC_MBOXQ_t *
1509lpfc_mbox_get(struct lpfc_hba * phba)
1510{
1511 LPFC_MBOXQ_t *mbq = NULL;
1512 struct lpfc_sli *psli = &phba->sli;
1513
1514 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
1515 if (mbq)
1516 psli->mboxq_cnt--;
1517
1518 return mbq;
1519}
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531void
1532__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1533{
1534 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1535}
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547void
1548lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1549{
1550 unsigned long iflag;
1551
1552
1553 spin_lock_irqsave(&phba->hbalock, iflag);
1554 __lpfc_mbox_cmpl_put(phba, mbq);
1555 spin_unlock_irqrestore(&phba->hbalock, iflag);
1556 return;
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570int
1571lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1572{
1573
1574
1575
1576 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1577 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1578 if (!mboxq->vport) {
1579 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1580 "1814 Mbox x%x failed, no vport\n",
1581 mboxq->u.mb.mbxCommand);
1582 dump_stack();
1583 return -ENODEV;
1584 }
1585 }
1586 return 0;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599int
1600lpfc_mbox_dev_check(struct lpfc_hba *phba)
1601{
1602
1603 if (unlikely(pci_channel_offline(phba->pcidev)))
1604 return -ENODEV;
1605
1606
1607 if (phba->link_state == LPFC_HBA_ERROR)
1608 return -ENODEV;
1609
1610 return 0;
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624int
1625lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1626{
1627 MAILBOX_t *mbox = &mboxq->u.mb;
1628 uint8_t subsys, opcode;
1629
1630 switch (mbox->mbxCommand) {
1631 case MBX_WRITE_NV:
1632 case MBX_DUMP_MEMORY:
1633 case MBX_UPDATE_CFG:
1634 case MBX_DOWN_LOAD:
1635 case MBX_DEL_LD_ENTRY:
1636 case MBX_WRITE_VPARMS:
1637 case MBX_LOAD_AREA:
1638 case MBX_WRITE_WWN:
1639 case MBX_LOAD_EXP_ROM:
1640 case MBX_ACCESS_VDATA:
1641 return LPFC_MBOX_TMO_FLASH_CMD;
1642 case MBX_SLI4_CONFIG:
1643 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
1644 opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
1645 if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
1646 switch (opcode) {
1647 case LPFC_MBOX_OPCODE_READ_OBJECT:
1648 case LPFC_MBOX_OPCODE_WRITE_OBJECT:
1649 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
1650 case LPFC_MBOX_OPCODE_DELETE_OBJECT:
1651 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
1652 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
1653 case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
1654 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
1655 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
1656 case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
1657 case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
1658 case LPFC_MBOX_OPCODE_RESET_LICENSES:
1659 case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
1660 case LPFC_MBOX_OPCODE_GET_VPD_DATA:
1661 case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
1662 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1663 }
1664 }
1665 if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
1666 switch (opcode) {
1667 case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
1668 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1669 }
1670 }
1671 return LPFC_MBOX_SLI4_CONFIG_TMO;
1672 }
1673 return LPFC_MBOX_TMO;
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686void
1687lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1688 dma_addr_t phyaddr, uint32_t length)
1689{
1690 struct lpfc_mbx_nembed_cmd *nembed_sge;
1691
1692 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1693 &mbox->u.mqe.un.nembed_cmd;
1694 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1695 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1696 nembed_sge->sge[sgentry].length = length;
1697}
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708void
1709lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1710 struct lpfc_mbx_sge *sge)
1711{
1712 struct lpfc_mbx_nembed_cmd *nembed_sge;
1713
1714 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1715 &mbox->u.mqe.un.nembed_cmd;
1716 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1717 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1718 sge->length = nembed_sge->sge[sgentry].length;
1719}
1720
1721
1722
1723
1724
1725
1726
1727
1728void
1729lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1730{
1731 struct lpfc_mbx_sli4_config *sli4_cfg;
1732 struct lpfc_mbx_sge sge;
1733 dma_addr_t phyaddr;
1734 uint32_t sgecount, sgentry;
1735
1736 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1737
1738
1739 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1740 mempool_free(mbox, phba->mbox_mem_pool);
1741 return;
1742 }
1743
1744
1745 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1746
1747 if (unlikely(!mbox->sge_array)) {
1748 mempool_free(mbox, phba->mbox_mem_pool);
1749 return;
1750 }
1751
1752 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1753 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1754 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1755 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1756 mbox->sge_array->addr[sgentry], phyaddr);
1757 }
1758
1759 kfree(mbox->sge_array);
1760
1761 mempool_free(mbox, phba->mbox_mem_pool);
1762}
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779int
1780lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1781 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1782{
1783 struct lpfc_mbx_sli4_config *sli4_config;
1784 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1785 uint32_t alloc_len;
1786 uint32_t resid_len;
1787 uint32_t pagen, pcount;
1788 void *viraddr;
1789 dma_addr_t phyaddr;
1790
1791
1792 memset(mbox, 0, sizeof(*mbox));
1793 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1794
1795
1796 sli4_config = &mbox->u.mqe.un.sli4_config;
1797
1798
1799 if (emb) {
1800
1801 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1802 sli4_config->header.cfg_mhdr.payload_length = length;
1803
1804 bf_set(lpfc_mbox_hdr_opcode,
1805 &sli4_config->header.cfg_shdr.request, opcode);
1806 bf_set(lpfc_mbox_hdr_subsystem,
1807 &sli4_config->header.cfg_shdr.request, subsystem);
1808 sli4_config->header.cfg_shdr.request.request_length =
1809 length - LPFC_MBX_CMD_HDR_LENGTH;
1810 return length;
1811 }
1812
1813
1814 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1815 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1816 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1817
1818 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1819 GFP_KERNEL);
1820 if (!mbox->sge_array) {
1821 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1822 "2527 Failed to allocate non-embedded SGE "
1823 "array.\n");
1824 return 0;
1825 }
1826 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1827
1828
1829
1830
1831
1832 viraddr = dma_alloc_coherent(&phba->pcidev->dev,
1833 SLI4_PAGE_SIZE, &phyaddr,
1834 GFP_KERNEL);
1835
1836 if (!viraddr)
1837 break;
1838 mbox->sge_array->addr[pagen] = viraddr;
1839
1840 if (pagen == 0)
1841 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1842 resid_len = length - alloc_len;
1843 if (resid_len > SLI4_PAGE_SIZE) {
1844 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1845 SLI4_PAGE_SIZE);
1846 alloc_len += SLI4_PAGE_SIZE;
1847 } else {
1848 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1849 resid_len);
1850 alloc_len = length;
1851 }
1852 }
1853
1854
1855 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1856 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1857
1858
1859 if (pagen > 0) {
1860 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1861 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1862 cfg_shdr->request.request_length =
1863 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1864 }
1865
1866 if (cfg_shdr)
1867 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1868 sizeof(union lpfc_sli4_cfg_shdr));
1869 return alloc_len;
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887int
1888lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1889 uint16_t exts_count, uint16_t rsrc_type, bool emb)
1890{
1891 uint8_t opcode = 0;
1892 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1893 void *virtaddr = NULL;
1894
1895
1896 if (emb == LPFC_SLI4_MBX_NEMBED) {
1897
1898 virtaddr = mbox->sge_array->addr[0];
1899 if (virtaddr == NULL)
1900 return 1;
1901 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1902 }
1903
1904
1905
1906
1907
1908 if (emb == LPFC_SLI4_MBX_EMBED)
1909 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1910 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1911 rsrc_type);
1912 else {
1913
1914 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1915 n_rsrc_extnt, rsrc_type);
1916 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1917 &n_rsrc_extnt->word4,
1918 sizeof(uint32_t));
1919 }
1920
1921
1922 opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
1923 switch (opcode) {
1924 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1925 if (emb == LPFC_SLI4_MBX_EMBED)
1926 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1927 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1928 exts_count);
1929 else
1930 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1931 n_rsrc_extnt, exts_count);
1932 break;
1933 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
1934 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
1935 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
1936
1937 break;
1938 default:
1939 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1940 "2929 Resource Extent Opcode x%x is "
1941 "unsupported\n", opcode);
1942 return 1;
1943 }
1944
1945 return 0;
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958uint8_t
1959lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1960{
1961 struct lpfc_mbx_sli4_config *sli4_cfg;
1962 union lpfc_sli4_cfg_shdr *cfg_shdr;
1963
1964 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1965 return LPFC_MBOX_SUBSYSTEM_NA;
1966 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1967
1968
1969 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1970 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1971 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1972 }
1973
1974
1975 if (unlikely(!mbox->sge_array))
1976 return LPFC_MBOX_SUBSYSTEM_NA;
1977 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1978 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1979}
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991uint8_t
1992lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1993{
1994 struct lpfc_mbx_sli4_config *sli4_cfg;
1995 union lpfc_sli4_cfg_shdr *cfg_shdr;
1996
1997 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1998 return LPFC_MBOX_OPCODE_NA;
1999 sli4_cfg = &mbox->u.mqe.un.sli4_config;
2000
2001
2002 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
2003 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
2004 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
2005 }
2006
2007
2008 if (unlikely(!mbox->sge_array))
2009 return LPFC_MBOX_OPCODE_NA;
2010 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
2011 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
2012}
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026int
2027lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
2028 struct lpfcMboxq *mboxq,
2029 uint16_t fcf_index)
2030{
2031 void *virt_addr;
2032 uint8_t *bytep;
2033 struct lpfc_mbx_sge sge;
2034 uint32_t alloc_len, req_len;
2035 struct lpfc_mbx_read_fcf_tbl *read_fcf;
2036
2037 if (!mboxq)
2038 return -ENOMEM;
2039
2040 req_len = sizeof(struct fcf_record) +
2041 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
2042
2043
2044 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2045 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
2046 LPFC_SLI4_MBX_NEMBED);
2047
2048 if (alloc_len < req_len) {
2049 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2050 "0291 Allocated DMA memory size (x%x) is "
2051 "less than the requested DMA memory "
2052 "size (x%x)\n", alloc_len, req_len);
2053 return -ENOMEM;
2054 }
2055
2056
2057
2058
2059 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
2060 virt_addr = mboxq->sge_array->addr[0];
2061 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
2062
2063
2064 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
2065
2066 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
2067 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
2068
2069 return 0;
2070}
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080void
2081lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
2082{
2083
2084 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
2085 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
2086
2087
2088 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
2089 bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
2090
2091
2092 if (phba->cfg_enable_bg)
2093 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
2094
2095
2096 if (phba->max_vpi && phba->cfg_enable_npiv)
2097 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
2098
2099 if (phba->nvmet_support) {
2100 bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1);
2101
2102 bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0);
2103 bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0);
2104 }
2105
2106
2107 if (phba->cfg_vmid_app_header) {
2108 bf_set(lpfc_mbx_rq_ftr_rq_ashdr, &mboxq->u.mqe.un.req_ftrs, 1);
2109 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 1);
2110 }
2111 return;
2112}
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125void
2126lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2127{
2128 struct lpfc_mbx_init_vfi *init_vfi;
2129
2130 memset(mbox, 0, sizeof(*mbox));
2131 mbox->vport = vport;
2132 init_vfi = &mbox->u.mqe.un.init_vfi;
2133 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
2134 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
2135 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
2136 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
2137 bf_set(lpfc_init_vfi_vfi, init_vfi,
2138 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2139 bf_set(lpfc_init_vfi_vpi, init_vfi,
2140 vport->phba->vpi_ids[vport->vpi]);
2141 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2142 vport->phba->fcf.fcfi);
2143}
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156void
2157lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2158{
2159 struct lpfc_mbx_reg_vfi *reg_vfi;
2160 struct lpfc_hba *phba = vport->phba;
2161 uint8_t bbscn_fabric = 0, bbscn_max = 0, bbscn_def = 0;
2162
2163 memset(mbox, 0, sizeof(*mbox));
2164 reg_vfi = &mbox->u.mqe.un.reg_vfi;
2165 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
2166 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
2167 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2168 phba->sli4_hba.vfi_ids[vport->vfi]);
2169 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
2170 bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
2171 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
2172 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
2173 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
2174 reg_vfi->e_d_tov = phba->fc_edtov;
2175 reg_vfi->r_a_tov = phba->fc_ratov;
2176 if (phys) {
2177 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
2178 reg_vfi->bde.addrLow = putPaddrLow(phys);
2179 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
2180 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2181 }
2182 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
2183
2184
2185 if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
2186 (vport->fc_flag & FC_VFI_REGISTERED) &&
2187 (!phba->fc_topology_changed))
2188 bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
2189
2190 bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 0);
2191 bf_set(lpfc_reg_vfi_bbscn, reg_vfi, 0);
2192 bbscn_fabric = (phba->fc_fabparam.cmn.bbRcvSizeMsb >> 4) & 0xF;
2193
2194 if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
2195 bbscn_fabric != 0) {
2196 bbscn_max = bf_get(lpfc_bbscn_max,
2197 &phba->sli4_hba.bbscn_params);
2198 if (bbscn_fabric <= bbscn_max) {
2199 bbscn_def = bf_get(lpfc_bbscn_def,
2200 &phba->sli4_hba.bbscn_params);
2201
2202 if (bbscn_fabric > bbscn_def)
2203 bf_set(lpfc_reg_vfi_bbscn, reg_vfi,
2204 bbscn_fabric);
2205 else
2206 bf_set(lpfc_reg_vfi_bbscn, reg_vfi, bbscn_def);
2207
2208 bf_set(lpfc_reg_vfi_bbcr, reg_vfi, 1);
2209 }
2210 }
2211 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2212 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2213 " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x"
2214 " port_state:x%x topology chg:%d bbscn_fabric :%d\n",
2215 vport->fc_myDID,
2216 phba->fcf.fcfi,
2217 phba->sli4_hba.vfi_ids[vport->vfi],
2218 phba->vpi_ids[vport->vpi],
2219 reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag,
2220 vport->port_state, phba->fc_topology_changed,
2221 bbscn_fabric);
2222}
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236void
2237lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
2238{
2239 memset(mbox, 0, sizeof(*mbox));
2240 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
2241 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2242 phba->vpi_ids[vpi]);
2243 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2244 phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2245}
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258void
2259lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2260{
2261 memset(mbox, 0, sizeof(*mbox));
2262 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2263 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2264 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2265}
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275int
2276lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2277{
2278 struct lpfc_dmabuf *mp = NULL;
2279 MAILBOX_t *mb;
2280
2281 memset(mbox, 0, sizeof(*mbox));
2282 mb = &mbox->u.mb;
2283
2284 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2285 if (mp)
2286 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2287
2288 if (!mp || !mp->virt) {
2289 kfree(mp);
2290
2291 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2292 "2569 lpfc dump config region 23: memory"
2293 " allocation failed\n");
2294 return 1;
2295 }
2296
2297 memset(mp->virt, 0, LPFC_BPL_SIZE);
2298 INIT_LIST_HEAD(&mp->list);
2299
2300
2301 mbox->ctx_buf = (uint8_t *)mp;
2302
2303 mb->mbxCommand = MBX_DUMP_MEMORY;
2304 mb->un.varDmp.type = DMP_NV_PARAMS;
2305 mb->un.varDmp.region_id = DMP_REGION_23;
2306 mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
2307 mb->un.varWords[3] = putPaddrLow(mp->phys);
2308 mb->un.varWords[4] = putPaddrHigh(mp->phys);
2309 return 0;
2310}
2311
2312static void
2313lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2314{
2315 MAILBOX_t *mb;
2316 int rc = FAILURE;
2317 struct lpfc_rdp_context *rdp_context =
2318 (struct lpfc_rdp_context *)(mboxq->ctx_ndlp);
2319
2320 mb = &mboxq->u.mb;
2321 if (mb->mbxStatus)
2322 goto mbx_failed;
2323
2324 memcpy(&rdp_context->link_stat, &mb->un.varRdLnk, sizeof(READ_LNK_VAR));
2325
2326 rc = SUCCESS;
2327
2328mbx_failed:
2329 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2330 rdp_context->cmpl(phba, rdp_context, rc);
2331}
2332
2333static void
2334lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2335{
2336 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
2337 struct lpfc_rdp_context *rdp_context =
2338 (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
2339
2340 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2341 goto error_mbuf_free;
2342
2343 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2,
2344 DMP_SFF_PAGE_A2_SIZE);
2345
2346
2347 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2348 kfree(mp);
2349
2350 memset(mbox, 0, sizeof(*mbox));
2351 lpfc_read_lnk_stat(phba, mbox);
2352 mbox->vport = rdp_context->ndlp->vport;
2353 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_link_stat;
2354 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
2355 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == MBX_NOT_FINISHED)
2356 goto error_cmd_free;
2357
2358 return;
2359
2360error_mbuf_free:
2361 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2362 kfree(mp);
2363error_cmd_free:
2364 lpfc_sli4_mbox_cmd_free(phba, mbox);
2365 rdp_context->cmpl(phba, rdp_context, FAILURE);
2366}
2367
2368void
2369lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2370{
2371 int rc;
2372 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(mbox->ctx_buf);
2373 struct lpfc_rdp_context *rdp_context =
2374 (struct lpfc_rdp_context *)(mbox->ctx_ndlp);
2375
2376 if (bf_get(lpfc_mqe_status, &mbox->u.mqe))
2377 goto error;
2378
2379 lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0,
2380 DMP_SFF_PAGE_A0_SIZE);
2381
2382 memset(mbox, 0, sizeof(*mbox));
2383
2384 memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE);
2385 INIT_LIST_HEAD(&mp->list);
2386
2387
2388 mbox->ctx_buf = mp;
2389 mbox->vport = rdp_context->ndlp->vport;
2390
2391 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
2392 bf_set(lpfc_mbx_memory_dump_type3_type,
2393 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
2394 bf_set(lpfc_mbx_memory_dump_type3_link,
2395 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
2396 bf_set(lpfc_mbx_memory_dump_type3_page_no,
2397 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2);
2398 bf_set(lpfc_mbx_memory_dump_type3_length,
2399 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE);
2400 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
2401 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
2402
2403 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a2;
2404 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
2405 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
2406 if (rc == MBX_NOT_FINISHED)
2407 goto error;
2408
2409 return;
2410
2411error:
2412 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2413 kfree(mp);
2414 lpfc_sli4_mbox_cmd_free(phba, mbox);
2415 rdp_context->cmpl(phba, rdp_context, FAILURE);
2416}
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427int
2428lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2429{
2430 struct lpfc_dmabuf *mp = NULL;
2431
2432 memset(mbox, 0, sizeof(*mbox));
2433
2434 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2435 if (mp)
2436 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2437 if (!mp || !mp->virt) {
2438 kfree(mp);
2439 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2440 "3569 dump type 3 page 0xA0 allocation failed\n");
2441 return 1;
2442 }
2443
2444 memset(mp->virt, 0, LPFC_BPL_SIZE);
2445 INIT_LIST_HEAD(&mp->list);
2446
2447 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY);
2448
2449 mbox->ctx_buf = mp;
2450
2451 bf_set(lpfc_mbx_memory_dump_type3_type,
2452 &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD);
2453 bf_set(lpfc_mbx_memory_dump_type3_link,
2454 &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port);
2455 bf_set(lpfc_mbx_memory_dump_type3_page_no,
2456 &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A0);
2457 bf_set(lpfc_mbx_memory_dump_type3_length,
2458 &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE);
2459 mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys);
2460 mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys);
2461
2462 return 0;
2463}
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478void
2479lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2480{
2481 struct lpfc_mbx_reg_fcfi *reg_fcfi;
2482
2483 memset(mbox, 0, sizeof(*mbox));
2484 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
2485 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
2486 if (phba->nvmet_support == 0) {
2487 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
2488 phba->sli4_hba.hdr_rq->queue_id);
2489
2490 bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, 0);
2491 bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0);
2492 bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, 0);
2493 bf_set(lpfc_reg_fcfi_rctl_mask0, reg_fcfi, 0);
2494
2495 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
2496
2497
2498 bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
2499 (~phba->fcf.addr_mode) & 0x3);
2500 } else {
2501
2502 if (phba->cfg_nvmet_mrq != 1)
2503 return;
2504
2505 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
2506 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
2507
2508 bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, FC_TYPE_FCP);
2509 bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0xff);
2510 bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi,
2511 FC_RCTL_DD_UNSOL_CMD);
2512
2513 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi,
2514 phba->sli4_hba.hdr_rq->queue_id);
2515
2516 bf_set(lpfc_reg_fcfi_type_match1, reg_fcfi, 0);
2517 bf_set(lpfc_reg_fcfi_type_mask1, reg_fcfi, 0);
2518 bf_set(lpfc_reg_fcfi_rctl_match1, reg_fcfi, 0);
2519 bf_set(lpfc_reg_fcfi_rctl_mask1, reg_fcfi, 0);
2520 }
2521 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2522 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2523 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2524 phba->fcf.current_rec.fcf_indx);
2525 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2526 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
2527 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2528 phba->fcf.current_rec.vlan_id);
2529 }
2530}
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546void
2547lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode)
2548{
2549 struct lpfc_mbx_reg_fcfi_mrq *reg_fcfi;
2550
2551
2552 if (phba->cfg_nvmet_mrq <= 1)
2553 return;
2554
2555 memset(mbox, 0, sizeof(*mbox));
2556 reg_fcfi = &mbox->u.mqe.un.reg_fcfi_mrq;
2557 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI_MRQ);
2558 if (mode == 0) {
2559 bf_set(lpfc_reg_fcfi_mrq_info_index, reg_fcfi,
2560 phba->fcf.current_rec.fcf_indx);
2561 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2562 bf_set(lpfc_reg_fcfi_mrq_vv, reg_fcfi, 1);
2563 bf_set(lpfc_reg_fcfi_mrq_vlan_tag, reg_fcfi,
2564 phba->fcf.current_rec.vlan_id);
2565 }
2566 return;
2567 }
2568
2569 bf_set(lpfc_reg_fcfi_mrq_rq_id0, reg_fcfi,
2570 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
2571
2572 bf_set(lpfc_reg_fcfi_mrq_type_match0, reg_fcfi, FC_TYPE_FCP);
2573 bf_set(lpfc_reg_fcfi_mrq_type_mask0, reg_fcfi, 0xff);
2574 bf_set(lpfc_reg_fcfi_mrq_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD);
2575 bf_set(lpfc_reg_fcfi_mrq_rctl_mask0, reg_fcfi, 0xff);
2576 bf_set(lpfc_reg_fcfi_mrq_ptc0, reg_fcfi, 1);
2577 bf_set(lpfc_reg_fcfi_mrq_pt0, reg_fcfi, 1);
2578
2579 bf_set(lpfc_reg_fcfi_mrq_policy, reg_fcfi, 3);
2580 bf_set(lpfc_reg_fcfi_mrq_mode, reg_fcfi, 1);
2581 bf_set(lpfc_reg_fcfi_mrq_filter, reg_fcfi, 1);
2582 bf_set(lpfc_reg_fcfi_mrq_npairs, reg_fcfi, phba->cfg_nvmet_mrq);
2583
2584 bf_set(lpfc_reg_fcfi_mrq_rq_id1, reg_fcfi,
2585 phba->sli4_hba.hdr_rq->queue_id);
2586
2587 bf_set(lpfc_reg_fcfi_mrq_type_match1, reg_fcfi, 0);
2588 bf_set(lpfc_reg_fcfi_mrq_type_mask1, reg_fcfi, 0);
2589 bf_set(lpfc_reg_fcfi_mrq_rctl_match1, reg_fcfi, 0);
2590 bf_set(lpfc_reg_fcfi_mrq_rctl_mask1, reg_fcfi, 0);
2591
2592 bf_set(lpfc_reg_fcfi_mrq_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2593 bf_set(lpfc_reg_fcfi_mrq_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2594}
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604void
2605lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2606{
2607 memset(mbox, 0, sizeof(*mbox));
2608 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
2609 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
2610}
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620void
2621lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2622{
2623 struct lpfc_hba *phba = ndlp->phba;
2624 struct lpfc_mbx_resume_rpi *resume_rpi;
2625
2626 memset(mbox, 0, sizeof(*mbox));
2627 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2628 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2629 bf_set(lpfc_resume_rpi_index, resume_rpi,
2630 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2631 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2632 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2633}
2634
2635