1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "bfad_drv.h"
20#include "bfa_modules.h"
21#include "bfi_reg.h"
22
23BFA_TRC_FILE(HAL, CORE);
24
25
26
27
28static struct bfa_module_s *hal_mods[] = {
29 &hal_mod_fcdiag,
30 &hal_mod_sgpg,
31 &hal_mod_fcport,
32 &hal_mod_fcxp,
33 &hal_mod_lps,
34 &hal_mod_uf,
35 &hal_mod_rport,
36 &hal_mod_fcp,
37 &hal_mod_dconf,
38 NULL
39};
40
41
42
43
44static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
45 bfa_isr_unhandled,
46 bfa_isr_unhandled,
47 bfa_fcdiag_intr,
48 bfa_isr_unhandled,
49 bfa_isr_unhandled,
50 bfa_fcport_isr,
51 bfa_isr_unhandled,
52 bfa_isr_unhandled,
53 bfa_uf_isr,
54 bfa_fcxp_isr,
55 bfa_lps_isr,
56 bfa_rport_isr,
57 bfa_itn_isr,
58 bfa_isr_unhandled,
59 bfa_isr_unhandled,
60 bfa_isr_unhandled,
61 bfa_ioim_isr,
62 bfa_ioim_good_comp_isr,
63 bfa_tskim_isr,
64 bfa_isr_unhandled,
65 bfa_isr_unhandled,
66 bfa_isr_unhandled,
67 bfa_isr_unhandled,
68 bfa_isr_unhandled,
69 bfa_isr_unhandled,
70 bfa_isr_unhandled,
71 bfa_isr_unhandled,
72 bfa_isr_unhandled,
73 bfa_isr_unhandled,
74 bfa_isr_unhandled,
75 bfa_isr_unhandled,
76 bfa_isr_unhandled,
77};
78
79
80
81static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
82 NULL,
83 NULL,
84 NULL,
85 NULL,
86 NULL,
87 NULL,
88 bfa_iocfc_isr,
89 NULL,
90};
91
92
93
94void
95__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
96{
97 int tail = trcm->tail;
98 struct bfa_trc_s *trc = &trcm->trc[tail];
99
100 if (trcm->stopped)
101 return;
102
103 trc->fileno = (u16) fileno;
104 trc->line = (u16) line;
105 trc->data.u64 = data;
106 trc->timestamp = BFA_TRC_TS(trcm);
107
108 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
109 if (trcm->tail == trcm->head)
110 trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
111}
112
113static void
114bfa_com_port_attach(struct bfa_s *bfa)
115{
116 struct bfa_port_s *port = &bfa->modules.port;
117 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
118
119 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
120 bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
121}
122
123
124
125
126static void
127bfa_com_ablk_attach(struct bfa_s *bfa)
128{
129 struct bfa_ablk_s *ablk = &bfa->modules.ablk;
130 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
131
132 bfa_ablk_attach(ablk, &bfa->ioc);
133 bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
134}
135
136static void
137bfa_com_cee_attach(struct bfa_s *bfa)
138{
139 struct bfa_cee_s *cee = &bfa->modules.cee;
140 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
141
142 cee->trcmod = bfa->trcmod;
143 bfa_cee_attach(cee, &bfa->ioc, bfa);
144 bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
145}
146
147static void
148bfa_com_sfp_attach(struct bfa_s *bfa)
149{
150 struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa);
151 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
152
153 bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
154 bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
155}
156
157static void
158bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
159{
160 struct bfa_flash_s *flash = BFA_FLASH(bfa);
161 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
162
163 bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
164 bfa_flash_memclaim(flash, flash_dma->kva_curp,
165 flash_dma->dma_curp, mincfg);
166}
167
168static void
169bfa_com_diag_attach(struct bfa_s *bfa)
170{
171 struct bfa_diag_s *diag = BFA_DIAG_MOD(bfa);
172 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
173
174 bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
175 bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
176}
177
178static void
179bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
180{
181 struct bfa_phy_s *phy = BFA_PHY(bfa);
182 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
183
184 bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
185 bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
186}
187
188static void
189bfa_com_fru_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
190{
191 struct bfa_fru_s *fru = BFA_FRU(bfa);
192 struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa);
193
194 bfa_fru_attach(fru, &bfa->ioc, bfa, bfa->trcmod, mincfg);
195 bfa_fru_memclaim(fru, fru_dma->kva_curp, fru_dma->dma_curp, mincfg);
196}
197
198
199
200
201
202
203
204
205#define BFA_IOCFC_TOV 5000
206
207enum {
208 BFA_IOCFC_ACT_NONE = 0,
209 BFA_IOCFC_ACT_INIT = 1,
210 BFA_IOCFC_ACT_STOP = 2,
211 BFA_IOCFC_ACT_DISABLE = 3,
212 BFA_IOCFC_ACT_ENABLE = 4,
213};
214
215#define DEF_CFG_NUM_FABRICS 1
216#define DEF_CFG_NUM_LPORTS 256
217#define DEF_CFG_NUM_CQS 4
218#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
219#define DEF_CFG_NUM_TSKIM_REQS 128
220#define DEF_CFG_NUM_FCXP_REQS 64
221#define DEF_CFG_NUM_UF_BUFS 64
222#define DEF_CFG_NUM_RPORTS 1024
223#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
224#define DEF_CFG_NUM_TINS 256
225
226#define DEF_CFG_NUM_SGPGS 2048
227#define DEF_CFG_NUM_REQQ_ELEMS 256
228#define DEF_CFG_NUM_RSPQ_ELEMS 64
229#define DEF_CFG_NUM_SBOOT_TGTS 16
230#define DEF_CFG_NUM_SBOOT_LUNS 16
231
232
233
234
235bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
236bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
237bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
238bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
239 struct bfa_iocfc_s, enum iocfc_event);
240bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
241 struct bfa_iocfc_s, enum iocfc_event);
242bfa_fsm_state_decl(bfa_iocfc, operational,
243 struct bfa_iocfc_s, enum iocfc_event);
244bfa_fsm_state_decl(bfa_iocfc, dconf_write,
245 struct bfa_iocfc_s, enum iocfc_event);
246bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
247bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
248bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
249bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
250bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
251bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
252bfa_fsm_state_decl(bfa_iocfc, init_failed,
253 struct bfa_iocfc_s, enum iocfc_event);
254
255
256
257
258static void bfa_iocfc_start_submod(struct bfa_s *bfa);
259static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
260static void bfa_iocfc_send_cfg(void *bfa_arg);
261static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
262static void bfa_iocfc_disable_cbfn(void *bfa_arg);
263static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
264static void bfa_iocfc_reset_cbfn(void *bfa_arg);
265static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
266static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
267static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
268static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
269static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
270
271static void
272bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
273{
274}
275
276static void
277bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
278{
279 bfa_trc(iocfc->bfa, event);
280
281 switch (event) {
282 case IOCFC_E_INIT:
283 case IOCFC_E_ENABLE:
284 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
285 break;
286 default:
287 bfa_sm_fault(iocfc->bfa, event);
288 break;
289 }
290}
291
292static void
293bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
294{
295 bfa_ioc_enable(&iocfc->bfa->ioc);
296}
297
298static void
299bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
300{
301 bfa_trc(iocfc->bfa, event);
302
303 switch (event) {
304 case IOCFC_E_IOC_ENABLED:
305 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
306 break;
307
308 case IOCFC_E_DISABLE:
309 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
310 break;
311
312 case IOCFC_E_STOP:
313 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
314 break;
315
316 case IOCFC_E_IOC_FAILED:
317 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
318 break;
319 default:
320 bfa_sm_fault(iocfc->bfa, event);
321 break;
322 }
323}
324
325static void
326bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
327{
328 bfa_dconf_modinit(iocfc->bfa);
329}
330
331static void
332bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
333{
334 bfa_trc(iocfc->bfa, event);
335
336 switch (event) {
337 case IOCFC_E_DCONF_DONE:
338 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
339 break;
340
341 case IOCFC_E_DISABLE:
342 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
343 break;
344
345 case IOCFC_E_STOP:
346 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
347 break;
348
349 case IOCFC_E_IOC_FAILED:
350 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
351 break;
352 default:
353 bfa_sm_fault(iocfc->bfa, event);
354 break;
355 }
356}
357
358static void
359bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
360{
361 bfa_iocfc_send_cfg(iocfc->bfa);
362}
363
364static void
365bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
366{
367 bfa_trc(iocfc->bfa, event);
368
369 switch (event) {
370 case IOCFC_E_CFG_DONE:
371 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
372 break;
373
374 case IOCFC_E_DISABLE:
375 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
376 break;
377
378 case IOCFC_E_STOP:
379 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
380 break;
381
382 case IOCFC_E_IOC_FAILED:
383 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
384 break;
385 default:
386 bfa_sm_fault(iocfc->bfa, event);
387 break;
388 }
389}
390
391static void
392bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
393{
394 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
395 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
396 bfa_iocfc_init_cb, iocfc->bfa);
397}
398
399static void
400bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
401{
402 bfa_trc(iocfc->bfa, event);
403
404 switch (event) {
405 case IOCFC_E_START:
406 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
407 break;
408 case IOCFC_E_STOP:
409 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
410 break;
411 case IOCFC_E_DISABLE:
412 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
413 break;
414 case IOCFC_E_IOC_FAILED:
415 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
416 break;
417 default:
418 bfa_sm_fault(iocfc->bfa, event);
419 break;
420 }
421}
422
423static void
424bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
425{
426 bfa_fcport_init(iocfc->bfa);
427 bfa_iocfc_start_submod(iocfc->bfa);
428}
429
430static void
431bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
432{
433 bfa_trc(iocfc->bfa, event);
434
435 switch (event) {
436 case IOCFC_E_STOP:
437 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
438 break;
439 case IOCFC_E_DISABLE:
440 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
441 break;
442 case IOCFC_E_IOC_FAILED:
443 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
444 break;
445 default:
446 bfa_sm_fault(iocfc->bfa, event);
447 break;
448 }
449}
450
451static void
452bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
453{
454 bfa_dconf_modexit(iocfc->bfa);
455}
456
457static void
458bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
459{
460 bfa_trc(iocfc->bfa, event);
461
462 switch (event) {
463 case IOCFC_E_DCONF_DONE:
464 case IOCFC_E_IOC_FAILED:
465 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
466 break;
467 default:
468 bfa_sm_fault(iocfc->bfa, event);
469 break;
470 }
471}
472
473static void
474bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
475{
476 bfa_ioc_disable(&iocfc->bfa->ioc);
477}
478
479static void
480bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
481{
482 bfa_trc(iocfc->bfa, event);
483
484 switch (event) {
485 case IOCFC_E_IOC_DISABLED:
486 bfa_isr_disable(iocfc->bfa);
487 bfa_iocfc_disable_submod(iocfc->bfa);
488 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
489 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
490 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
491 bfa_iocfc_stop_cb, iocfc->bfa);
492 break;
493
494 case IOCFC_E_IOC_ENABLED:
495 case IOCFC_E_DCONF_DONE:
496 case IOCFC_E_CFG_DONE:
497 break;
498
499 default:
500 bfa_sm_fault(iocfc->bfa, event);
501 break;
502 }
503}
504
505static void
506bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
507{
508 bfa_ioc_enable(&iocfc->bfa->ioc);
509}
510
511static void
512bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
513{
514 bfa_trc(iocfc->bfa, event);
515
516 switch (event) {
517 case IOCFC_E_IOC_ENABLED:
518 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
519 break;
520
521 case IOCFC_E_DISABLE:
522 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
523 break;
524
525 case IOCFC_E_STOP:
526 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
527 break;
528
529 case IOCFC_E_IOC_FAILED:
530 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
531
532 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
533 break;
534
535 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
536 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
537 bfa_iocfc_enable_cb, iocfc->bfa);
538 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
539 break;
540 default:
541 bfa_sm_fault(iocfc->bfa, event);
542 break;
543 }
544}
545
546static void
547bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
548{
549 bfa_iocfc_send_cfg(iocfc->bfa);
550}
551
552static void
553bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
554{
555 bfa_trc(iocfc->bfa, event);
556
557 switch (event) {
558 case IOCFC_E_CFG_DONE:
559 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
560 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
561 break;
562
563 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
564 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
565 bfa_iocfc_enable_cb, iocfc->bfa);
566 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
567 break;
568 case IOCFC_E_DISABLE:
569 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
570 break;
571
572 case IOCFC_E_STOP:
573 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
574 break;
575 case IOCFC_E_IOC_FAILED:
576 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
577 if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
578 break;
579
580 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
581 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
582 bfa_iocfc_enable_cb, iocfc->bfa);
583 iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
584 break;
585 default:
586 bfa_sm_fault(iocfc->bfa, event);
587 break;
588 }
589}
590
591static void
592bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
593{
594 bfa_ioc_disable(&iocfc->bfa->ioc);
595}
596
597static void
598bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
599{
600 bfa_trc(iocfc->bfa, event);
601
602 switch (event) {
603 case IOCFC_E_IOC_DISABLED:
604 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
605 break;
606 case IOCFC_E_IOC_ENABLED:
607 case IOCFC_E_DCONF_DONE:
608 case IOCFC_E_CFG_DONE:
609 break;
610 default:
611 bfa_sm_fault(iocfc->bfa, event);
612 break;
613 }
614}
615
616static void
617bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
618{
619 bfa_isr_disable(iocfc->bfa);
620 bfa_iocfc_disable_submod(iocfc->bfa);
621 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
622 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
623 bfa_iocfc_disable_cb, iocfc->bfa);
624}
625
626static void
627bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
628{
629 bfa_trc(iocfc->bfa, event);
630
631 switch (event) {
632 case IOCFC_E_STOP:
633 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
634 break;
635 case IOCFC_E_ENABLE:
636 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
637 break;
638 default:
639 bfa_sm_fault(iocfc->bfa, event);
640 break;
641 }
642}
643
644static void
645bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
646{
647 bfa_isr_disable(iocfc->bfa);
648 bfa_iocfc_disable_submod(iocfc->bfa);
649}
650
651static void
652bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
653{
654 bfa_trc(iocfc->bfa, event);
655
656 switch (event) {
657 case IOCFC_E_STOP:
658 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
659 break;
660 case IOCFC_E_DISABLE:
661 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
662 break;
663 case IOCFC_E_IOC_ENABLED:
664 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
665 break;
666 case IOCFC_E_IOC_FAILED:
667 break;
668 default:
669 bfa_sm_fault(iocfc->bfa, event);
670 break;
671 }
672}
673
674static void
675bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
676{
677 bfa_isr_disable(iocfc->bfa);
678 iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
679 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
680 bfa_iocfc_init_cb, iocfc->bfa);
681}
682
683static void
684bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
685{
686 bfa_trc(iocfc->bfa, event);
687
688 switch (event) {
689 case IOCFC_E_STOP:
690 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
691 break;
692 case IOCFC_E_DISABLE:
693 bfa_ioc_disable(&iocfc->bfa->ioc);
694 break;
695 case IOCFC_E_IOC_ENABLED:
696 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
697 break;
698 case IOCFC_E_IOC_DISABLED:
699 bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
700 iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
701 bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
702 bfa_iocfc_disable_cb, iocfc->bfa);
703 break;
704 case IOCFC_E_IOC_FAILED:
705 break;
706 default:
707 bfa_sm_fault(iocfc->bfa, event);
708 break;
709 }
710}
711
712
713
714
715static void
716bfa_reqq_resume(struct bfa_s *bfa, int qid)
717{
718 struct list_head *waitq, *qe, *qen;
719 struct bfa_reqq_wait_s *wqe;
720
721 waitq = bfa_reqq(bfa, qid);
722 list_for_each_safe(qe, qen, waitq) {
723
724
725
726 if (bfa_reqq_full(bfa, qid))
727 break;
728
729 list_del(qe);
730 wqe = (struct bfa_reqq_wait_s *) qe;
731 wqe->qresume(wqe->cbarg);
732 }
733}
734
735bfa_boolean_t
736bfa_isr_rspq(struct bfa_s *bfa, int qid)
737{
738 struct bfi_msg_s *m;
739 u32 pi, ci;
740 struct list_head *waitq;
741 bfa_boolean_t ret;
742
743 ci = bfa_rspq_ci(bfa, qid);
744 pi = bfa_rspq_pi(bfa, qid);
745
746 ret = (ci != pi);
747
748 while (ci != pi) {
749 m = bfa_rspq_elem(bfa, qid, ci);
750 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
751
752 bfa_isrs[m->mhdr.msg_class] (bfa, m);
753 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
754 }
755
756
757
758
759 bfa_isr_rspq_ack(bfa, qid, ci);
760
761
762
763
764 waitq = bfa_reqq(bfa, qid);
765 if (!list_empty(waitq))
766 bfa_reqq_resume(bfa, qid);
767
768 return ret;
769}
770
771static inline void
772bfa_isr_reqq(struct bfa_s *bfa, int qid)
773{
774 struct list_head *waitq;
775
776 bfa_isr_reqq_ack(bfa, qid);
777
778
779
780
781 waitq = bfa_reqq(bfa, qid);
782 if (!list_empty(waitq))
783 bfa_reqq_resume(bfa, qid);
784}
785
786void
787bfa_msix_all(struct bfa_s *bfa, int vec)
788{
789 u32 intr, qintr;
790 int queue;
791
792 intr = readl(bfa->iocfc.bfa_regs.intr_status);
793 if (!intr)
794 return;
795
796
797
798
799 qintr = intr & __HFN_INT_RME_MASK;
800 if (qintr && bfa->queue_process) {
801 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
802 bfa_isr_rspq(bfa, queue);
803 }
804
805 intr &= ~qintr;
806 if (!intr)
807 return;
808
809
810
811
812 qintr = intr & __HFN_INT_CPE_MASK;
813 if (qintr && bfa->queue_process) {
814 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
815 bfa_isr_reqq(bfa, queue);
816 }
817 intr &= ~qintr;
818 if (!intr)
819 return;
820
821 bfa_msix_lpu_err(bfa, intr);
822}
823
824bfa_boolean_t
825bfa_intx(struct bfa_s *bfa)
826{
827 u32 intr, qintr;
828 int queue;
829 bfa_boolean_t rspq_comp = BFA_FALSE;
830
831 intr = readl(bfa->iocfc.bfa_regs.intr_status);
832
833 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
834 if (qintr)
835 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
836
837
838
839
840 if (bfa->queue_process) {
841 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
842 if (bfa_isr_rspq(bfa, queue))
843 rspq_comp = BFA_TRUE;
844 }
845
846 if (!intr)
847 return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
848
849
850
851
852 qintr = intr & __HFN_INT_CPE_MASK;
853 if (qintr && bfa->queue_process) {
854 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
855 bfa_isr_reqq(bfa, queue);
856 }
857 intr &= ~qintr;
858 if (!intr)
859 return BFA_TRUE;
860
861 if (bfa->intr_enabled)
862 bfa_msix_lpu_err(bfa, intr);
863
864 return BFA_TRUE;
865}
866
867void
868bfa_isr_enable(struct bfa_s *bfa)
869{
870 u32 umsk;
871 int port_id = bfa_ioc_portid(&bfa->ioc);
872
873 bfa_trc(bfa, bfa_ioc_pcifn(&bfa->ioc));
874 bfa_trc(bfa, port_id);
875
876 bfa_msix_ctrl_install(bfa);
877
878 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
879 umsk = __HFN_INT_ERR_MASK_CT2;
880 umsk |= port_id == 0 ?
881 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
882 } else {
883 umsk = __HFN_INT_ERR_MASK;
884 umsk |= port_id == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
885 }
886
887 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
888 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
889 bfa->iocfc.intr_mask = ~umsk;
890 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
891
892
893
894
895 bfa->intr_enabled = BFA_TRUE;
896}
897
898void
899bfa_isr_disable(struct bfa_s *bfa)
900{
901 bfa->intr_enabled = BFA_FALSE;
902 bfa_isr_mode_set(bfa, BFA_FALSE);
903 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
904 bfa_msix_uninstall(bfa);
905}
906
907void
908bfa_msix_reqq(struct bfa_s *bfa, int vec)
909{
910 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
911}
912
913void
914bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
915{
916 bfa_trc(bfa, m->mhdr.msg_class);
917 bfa_trc(bfa, m->mhdr.msg_id);
918 bfa_trc(bfa, m->mhdr.mtag.i2htok);
919 WARN_ON(1);
920 bfa_trc_stop(bfa->trcmod);
921}
922
923void
924bfa_msix_rspq(struct bfa_s *bfa, int vec)
925{
926 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
927}
928
929void
930bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
931{
932 u32 intr, curr_value;
933 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
934
935 intr = readl(bfa->iocfc.bfa_regs.intr_status);
936
937 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
938 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
939 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
940 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
941 __HFN_INT_MBOX_LPU1_CT2);
942 intr &= __HFN_INT_ERR_MASK_CT2;
943 } else {
944 halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
945 (intr & __HFN_INT_LL_HALT) : 0;
946 pss_isr = intr & __HFN_INT_ERR_PSS;
947 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
948 intr &= __HFN_INT_ERR_MASK;
949 }
950
951 if (lpu_isr)
952 bfa_ioc_mbox_isr(&bfa->ioc);
953
954 if (intr) {
955 if (halt_isr) {
956
957
958
959
960
961 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
962 curr_value &= ~__FW_INIT_HALT_P;
963 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
964 }
965
966 if (pss_isr) {
967
968
969
970
971
972 curr_value = readl(
973 bfa->ioc.ioc_regs.pss_err_status_reg);
974 writel(curr_value,
975 bfa->ioc.ioc_regs.pss_err_status_reg);
976 }
977
978 writel(intr, bfa->iocfc.bfa_regs.intr_status);
979 bfa_ioc_error_isr(&bfa->ioc);
980 }
981}
982
983
984
985
986
987
988
989
990
991
992
993
994static void
995bfa_iocfc_send_cfg(void *bfa_arg)
996{
997 struct bfa_s *bfa = bfa_arg;
998 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
999 struct bfi_iocfc_cfg_req_s cfg_req;
1000 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
1001 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
1002 int i;
1003
1004 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
1005 bfa_trc(bfa, cfg->fwcfg.num_cqs);
1006
1007 bfa_iocfc_reset_queues(bfa);
1008
1009
1010
1011
1012 cfg_info->single_msix_vec = 0;
1013 if (bfa->msix.nvecs == 1)
1014 cfg_info->single_msix_vec = 1;
1015 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
1016 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
1017 cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa,
1018 cfg->fwcfg.num_ioim_reqs));
1019 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
1020
1021 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
1022
1023
1024
1025 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
1026 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
1027 iocfc->req_cq_ba[i].pa);
1028 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
1029 iocfc->req_cq_shadow_ci[i].pa);
1030 cfg_info->req_cq_elems[i] =
1031 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
1032
1033 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
1034 iocfc->rsp_cq_ba[i].pa);
1035 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
1036 iocfc->rsp_cq_shadow_pi[i].pa);
1037 cfg_info->rsp_cq_elems[i] =
1038 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
1039 }
1040
1041
1042
1043
1044
1045 if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
1046 cfg_info->intr_attr.coalesce = BFA_TRUE;
1047
1048
1049
1050
1051 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
1052 bfa_fn_lpu(bfa));
1053 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
1054
1055 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
1056 sizeof(struct bfi_iocfc_cfg_req_s));
1057}
1058
1059static void
1060bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1061 struct bfa_pcidev_s *pcidev)
1062{
1063 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1064
1065 bfa->bfad = bfad;
1066 iocfc->bfa = bfa;
1067 iocfc->cfg = *cfg;
1068
1069
1070
1071
1072 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
1073 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
1074 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
1075 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
1076 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
1077 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
1078 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
1079 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
1080 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
1081 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
1082 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
1083 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
1084 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
1085 } else {
1086 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
1087 iocfc->hwif.hw_reqq_ack = NULL;
1088 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
1089 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
1090 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
1091 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
1092 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
1093 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
1094 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
1095 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
1096 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
1097 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
1098 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
1099 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
1100 }
1101
1102 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
1103 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
1104 iocfc->hwif.hw_isr_mode_set = NULL;
1105 iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
1106 }
1107
1108 iocfc->hwif.hw_reginit(bfa);
1109 bfa->msix.nvecs = 0;
1110}
1111
1112static void
1113bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
1114{
1115 u8 *dm_kva = NULL;
1116 u64 dm_pa = 0;
1117 int i, per_reqq_sz, per_rspq_sz;
1118 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1119 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1120 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1121 struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
1122
1123
1124 bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
1125 bfa_mem_dma_phys(ioc_dma));
1126
1127
1128 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1129 BFA_DMA_ALIGN_SZ);
1130 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1131 BFA_DMA_ALIGN_SZ);
1132
1133 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
1134 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
1135 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
1136 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
1137 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
1138
1139 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
1140 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
1141 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
1142 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
1143 }
1144
1145
1146 dm_kva = bfa_mem_dma_virt(iocfc_dma);
1147 dm_pa = bfa_mem_dma_phys(iocfc_dma);
1148
1149 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
1150 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
1151 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
1152 dm_kva += BFA_CACHELINE_SZ;
1153 dm_pa += BFA_CACHELINE_SZ;
1154
1155 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
1156 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
1157 dm_kva += BFA_CACHELINE_SZ;
1158 dm_pa += BFA_CACHELINE_SZ;
1159 }
1160
1161
1162 bfa->iocfc.cfg_info.kva = dm_kva;
1163 bfa->iocfc.cfg_info.pa = dm_pa;
1164 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
1165 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1166 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1167
1168
1169 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
1170 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
1171 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
1172 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1173 BFA_CACHELINE_SZ);
1174 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1175 BFA_CACHELINE_SZ);
1176
1177
1178 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
1179 bfa_mem_kva_curp(iocfc) += BFA_DBG_FWTRC_LEN;
1180}
1181
1182
1183
1184
1185static void
1186bfa_iocfc_start_submod(struct bfa_s *bfa)
1187{
1188 int i;
1189
1190 bfa->queue_process = BFA_TRUE;
1191 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1192 bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
1193
1194 for (i = 0; hal_mods[i]; i++)
1195 hal_mods[i]->start(bfa);
1196
1197 bfa->iocfc.submod_enabled = BFA_TRUE;
1198}
1199
1200
1201
1202
1203static void
1204bfa_iocfc_disable_submod(struct bfa_s *bfa)
1205{
1206 int i;
1207
1208 if (bfa->iocfc.submod_enabled == BFA_FALSE)
1209 return;
1210
1211 for (i = 0; hal_mods[i]; i++)
1212 hal_mods[i]->iocdisable(bfa);
1213
1214 bfa->iocfc.submod_enabled = BFA_FALSE;
1215}
1216
1217static void
1218bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
1219{
1220 struct bfa_s *bfa = bfa_arg;
1221
1222 if (complete)
1223 bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
1224}
1225
1226static void
1227bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
1228{
1229 struct bfa_s *bfa = bfa_arg;
1230 struct bfad_s *bfad = bfa->bfad;
1231
1232 if (compl)
1233 complete(&bfad->comp);
1234}
1235
1236static void
1237bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
1238{
1239 struct bfa_s *bfa = bfa_arg;
1240 struct bfad_s *bfad = bfa->bfad;
1241
1242 if (compl)
1243 complete(&bfad->enable_comp);
1244}
1245
1246static void
1247bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
1248{
1249 struct bfa_s *bfa = bfa_arg;
1250 struct bfad_s *bfad = bfa->bfad;
1251
1252 if (compl)
1253 complete(&bfad->disable_comp);
1254}
1255
1256
1257
1258
1259static void
1260bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
1261{
1262 int i;
1263 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
1264 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
1265
1266 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
1267 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
1268 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
1269 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
1270 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
1271 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
1272 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
1273 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
1274 }
1275}
1276
1277static void
1278bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
1279{
1280 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1281 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
1282
1283 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
1284 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
1285 bfa_rport_res_recfg(bfa, fwcfg->num_rports);
1286 bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs),
1287 fwcfg->num_ioim_reqs);
1288 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
1289}
1290
1291
1292
1293
1294static void
1295bfa_iocfc_cfgrsp(struct bfa_s *bfa)
1296{
1297 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1298 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1299 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
1300
1301 fwcfg->num_cqs = fwcfg->num_cqs;
1302 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
1303 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
1304 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
1305 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
1306 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
1307 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
1308
1309
1310
1311
1312 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
1313
1314
1315
1316
1317 bfa_iocfc_res_recfg(bfa, fwcfg);
1318
1319
1320
1321
1322 bfa_msix_queue_install(bfa);
1323
1324 if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
1325 bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
1326 bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
1327 bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
1328 }
1329}
1330
1331void
1332bfa_iocfc_reset_queues(struct bfa_s *bfa)
1333{
1334 int q;
1335
1336 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
1337 bfa_reqq_ci(bfa, q) = 0;
1338 bfa_reqq_pi(bfa, q) = 0;
1339 bfa_rspq_ci(bfa, q) = 0;
1340 bfa_rspq_pi(bfa, q) = 0;
1341 }
1342}
1343
1344
1345
1346
1347static void
1348bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
1349{
1350 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1351 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1352
1353 cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
1354 cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
1355
1356 bfa->ioc.attr->pwwn = msg->pwwn;
1357 bfa->ioc.attr->nwwn = msg->nwwn;
1358 bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
1359}
1360
1361
1362
1363
1364
1365
1366static bfa_status_t
1367bfa_faa_validate_request(struct bfa_s *bfa)
1368{
1369 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
1370 u32 card_type = bfa->ioc.attr->card_type;
1371
1372 if (bfa_ioc_is_operational(&bfa->ioc)) {
1373 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
1374 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
1375 } else {
1376 return BFA_STATUS_IOC_NON_OP;
1377 }
1378
1379 return BFA_STATUS_OK;
1380}
1381
1382bfa_status_t
1383bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
1384 bfa_cb_iocfc_t cbfn, void *cbarg)
1385{
1386 struct bfi_faa_query_s faa_attr_req;
1387 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1388 bfa_status_t status;
1389
1390 status = bfa_faa_validate_request(bfa);
1391 if (status != BFA_STATUS_OK)
1392 return status;
1393
1394 if (iocfc->faa_args.busy == BFA_TRUE)
1395 return BFA_STATUS_DEVBUSY;
1396
1397 iocfc->faa_args.faa_attr = attr;
1398 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
1399 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
1400
1401 iocfc->faa_args.busy = BFA_TRUE;
1402 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
1403 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
1404 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
1405
1406 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
1407 sizeof(struct bfi_faa_query_s));
1408
1409 return BFA_STATUS_OK;
1410}
1411
1412
1413
1414
1415static void
1416bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
1417 bfi_faa_query_rsp_t *rsp)
1418{
1419 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
1420
1421 if (iocfc->faa_args.faa_attr) {
1422 iocfc->faa_args.faa_attr->faa = rsp->faa;
1423 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
1424 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
1425 }
1426
1427 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
1428
1429 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
1430 iocfc->faa_args.busy = BFA_FALSE;
1431}
1432
1433
1434
1435
1436static void
1437bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
1438{
1439 struct bfa_s *bfa = bfa_arg;
1440
1441 if (status == BFA_STATUS_OK)
1442 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
1443 else
1444 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
1445}
1446
1447
1448
1449
1450static void
1451bfa_iocfc_disable_cbfn(void *bfa_arg)
1452{
1453 struct bfa_s *bfa = bfa_arg;
1454
1455 bfa->queue_process = BFA_FALSE;
1456 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
1457}
1458
1459
1460
1461
1462static void
1463bfa_iocfc_hbfail_cbfn(void *bfa_arg)
1464{
1465 struct bfa_s *bfa = bfa_arg;
1466
1467 bfa->queue_process = BFA_FALSE;
1468 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
1469}
1470
1471
1472
1473
1474static void
1475bfa_iocfc_reset_cbfn(void *bfa_arg)
1476{
1477 struct bfa_s *bfa = bfa_arg;
1478
1479 bfa_iocfc_reset_queues(bfa);
1480 bfa_isr_enable(bfa);
1481}
1482
1483
1484
1485
1486void
1487bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1488 struct bfa_s *bfa)
1489{
1490 int q, per_reqq_sz, per_rspq_sz;
1491 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1492 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1493 struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
1494 u32 dm_len = 0;
1495
1496
1497 bfa_mem_dma_setup(meminfo, ioc_dma,
1498 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
1499
1500
1501 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1502 BFA_DMA_ALIGN_SZ);
1503 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1504 BFA_DMA_ALIGN_SZ);
1505
1506 for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
1507 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
1508 per_reqq_sz);
1509 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
1510 per_rspq_sz);
1511 }
1512
1513
1514 for (q = 0; q < cfg->fwcfg.num_cqs; q++)
1515 dm_len += (2 * BFA_CACHELINE_SZ);
1516
1517
1518 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1519 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1520 BFA_CACHELINE_SZ);
1521
1522
1523 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
1524
1525
1526 bfa_mem_kva_setup(meminfo, iocfc_kva, BFA_DBG_FWTRC_LEN);
1527}
1528
1529
1530
1531
1532void
1533bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1534 struct bfa_pcidev_s *pcidev)
1535{
1536 int i;
1537 struct bfa_ioc_s *ioc = &bfa->ioc;
1538
1539 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
1540 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
1541 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
1542 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
1543
1544 ioc->trcmod = bfa->trcmod;
1545 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
1546
1547 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
1548 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
1549
1550 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
1551 bfa_iocfc_mem_claim(bfa, cfg);
1552 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
1553
1554 INIT_LIST_HEAD(&bfa->comp_q);
1555 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1556 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
1557
1558 bfa->iocfc.cb_reqd = BFA_FALSE;
1559 bfa->iocfc.op_status = BFA_STATUS_OK;
1560 bfa->iocfc.submod_enabled = BFA_FALSE;
1561
1562 bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
1563}
1564
1565
1566
1567
1568void
1569bfa_iocfc_init(struct bfa_s *bfa)
1570{
1571 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
1572}
1573
1574
1575
1576
1577
1578void
1579bfa_iocfc_start(struct bfa_s *bfa)
1580{
1581 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
1582}
1583
1584
1585
1586
1587
1588void
1589bfa_iocfc_stop(struct bfa_s *bfa)
1590{
1591 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
1592}
1593
1594void
1595bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
1596{
1597 struct bfa_s *bfa = bfaarg;
1598 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1599 union bfi_iocfc_i2h_msg_u *msg;
1600
1601 msg = (union bfi_iocfc_i2h_msg_u *) m;
1602 bfa_trc(bfa, msg->mh.msg_id);
1603
1604 switch (msg->mh.msg_id) {
1605 case BFI_IOCFC_I2H_CFG_REPLY:
1606 bfa_iocfc_cfgrsp(bfa);
1607 break;
1608 case BFI_IOCFC_I2H_UPDATEQ_RSP:
1609 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
1610 break;
1611 case BFI_IOCFC_I2H_ADDR_MSG:
1612 bfa_iocfc_process_faa_addr(bfa,
1613 (struct bfi_faa_addr_msg_s *)msg);
1614 break;
1615 case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1616 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
1617 break;
1618 default:
1619 WARN_ON(1);
1620 }
1621}
1622
1623void
1624bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
1625{
1626 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1627
1628 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
1629
1630 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
1631 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
1632 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
1633
1634 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
1635 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
1636 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
1637
1638 attr->config = iocfc->cfg;
1639}
1640
1641bfa_status_t
1642bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
1643{
1644 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1645 struct bfi_iocfc_set_intr_req_s *m;
1646
1647 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
1648 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
1649 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
1650
1651 if (!bfa_iocfc_is_operational(bfa))
1652 return BFA_STATUS_OK;
1653
1654 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
1655 if (!m)
1656 return BFA_STATUS_DEVBUSY;
1657
1658 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
1659 bfa_fn_lpu(bfa));
1660 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
1661 m->delay = iocfc->cfginfo->intr_attr.delay;
1662 m->latency = iocfc->cfginfo->intr_attr.latency;
1663
1664 bfa_trc(bfa, attr->delay);
1665 bfa_trc(bfa, attr->latency);
1666
1667 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
1668 return BFA_STATUS_OK;
1669}
1670
1671void
1672bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
1673{
1674 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1675
1676 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
1677 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
1678}
1679
1680
1681
1682void
1683bfa_iocfc_enable(struct bfa_s *bfa)
1684{
1685 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1686 "IOC Enable");
1687 bfa->iocfc.cb_reqd = BFA_TRUE;
1688 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
1689}
1690
1691void
1692bfa_iocfc_disable(struct bfa_s *bfa)
1693{
1694 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1695 "IOC Disable");
1696
1697 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
1698}
1699
1700bfa_boolean_t
1701bfa_iocfc_is_operational(struct bfa_s *bfa)
1702{
1703 return bfa_ioc_is_operational(&bfa->ioc) &&
1704 bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
1705}
1706
1707
1708
1709
1710void
1711bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1712{
1713 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1714 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1715 int i;
1716
1717 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1718 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1719 *nwwns = cfgrsp->pbc_cfg.nbluns;
1720 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1721 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1722
1723 return;
1724 }
1725
1726 *nwwns = cfgrsp->bootwwns.nwwns;
1727 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1728}
1729
1730int
1731bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1732{
1733 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1734 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1735
1736 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1737 return cfgrsp->pbc_cfg.nvports;
1738}
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772void
1773bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1774 struct bfa_s *bfa)
1775{
1776 int i;
1777 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
1778 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
1779 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
1780 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
1781 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
1782 struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
1783 struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
1784 struct bfa_mem_dma_s *fru_dma = BFA_MEM_FRU_DMA(bfa);
1785
1786 WARN_ON((cfg == NULL) || (meminfo == NULL));
1787
1788 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1789
1790
1791 INIT_LIST_HEAD(&meminfo->dma_info.qe);
1792 INIT_LIST_HEAD(&meminfo->kva_info.qe);
1793
1794 bfa_iocfc_meminfo(cfg, meminfo, bfa);
1795
1796 for (i = 0; hal_mods[i]; i++)
1797 hal_mods[i]->meminfo(cfg, meminfo, bfa);
1798
1799
1800 bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
1801 bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
1802 bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
1803 bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
1804 bfa_mem_dma_setup(meminfo, flash_dma,
1805 bfa_flash_meminfo(cfg->drvcfg.min_cfg));
1806 bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
1807 bfa_mem_dma_setup(meminfo, phy_dma,
1808 bfa_phy_meminfo(cfg->drvcfg.min_cfg));
1809 bfa_mem_dma_setup(meminfo, fru_dma,
1810 bfa_fru_meminfo(cfg->drvcfg.min_cfg));
1811}
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839void
1840bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1841 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1842{
1843 int i;
1844 struct bfa_mem_dma_s *dma_info, *dma_elem;
1845 struct bfa_mem_kva_s *kva_info, *kva_elem;
1846 struct list_head *dm_qe, *km_qe;
1847
1848 bfa->fcs = BFA_FALSE;
1849
1850 WARN_ON((cfg == NULL) || (meminfo == NULL));
1851
1852
1853 dma_info = &meminfo->dma_info;
1854 dma_info->kva_curp = dma_info->kva;
1855 dma_info->dma_curp = dma_info->dma;
1856
1857 kva_info = &meminfo->kva_info;
1858 kva_info->kva_curp = kva_info->kva;
1859
1860 list_for_each(dm_qe, &dma_info->qe) {
1861 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
1862 dma_elem->kva_curp = dma_elem->kva;
1863 dma_elem->dma_curp = dma_elem->dma;
1864 }
1865
1866 list_for_each(km_qe, &kva_info->qe) {
1867 kva_elem = (struct bfa_mem_kva_s *) km_qe;
1868 kva_elem->kva_curp = kva_elem->kva;
1869 }
1870
1871 bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
1872
1873 for (i = 0; hal_mods[i]; i++)
1874 hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
1875
1876 bfa_com_port_attach(bfa);
1877 bfa_com_ablk_attach(bfa);
1878 bfa_com_cee_attach(bfa);
1879 bfa_com_sfp_attach(bfa);
1880 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
1881 bfa_com_diag_attach(bfa);
1882 bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
1883 bfa_com_fru_attach(bfa, cfg->drvcfg.min_cfg);
1884}
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899void
1900bfa_detach(struct bfa_s *bfa)
1901{
1902 int i;
1903
1904 for (i = 0; hal_mods[i]; i++)
1905 hal_mods[i]->detach(bfa);
1906 bfa_ioc_detach(&bfa->ioc);
1907}
1908
1909void
1910bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1911{
1912 INIT_LIST_HEAD(comp_q);
1913 list_splice_tail_init(&bfa->comp_q, comp_q);
1914}
1915
1916void
1917bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1918{
1919 struct list_head *qe;
1920 struct list_head *qen;
1921 struct bfa_cb_qe_s *hcb_qe;
1922 bfa_cb_cbfn_status_t cbfn;
1923
1924 list_for_each_safe(qe, qen, comp_q) {
1925 hcb_qe = (struct bfa_cb_qe_s *) qe;
1926 if (hcb_qe->pre_rmv) {
1927
1928 list_del(qe);
1929 cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
1930 cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
1931 } else
1932 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1933 }
1934}
1935
1936void
1937bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1938{
1939 struct list_head *qe;
1940 struct bfa_cb_qe_s *hcb_qe;
1941
1942 while (!list_empty(comp_q)) {
1943 bfa_q_deq(comp_q, &qe);
1944 hcb_qe = (struct bfa_cb_qe_s *) qe;
1945 WARN_ON(hcb_qe->pre_rmv);
1946 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1947 }
1948}
1949
1950
1951
1952
1953
1954void
1955bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1956{
1957 static struct bfa_pciid_s __pciids[] = {
1958 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1959 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1960 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
1961 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
1962 };
1963
1964 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
1965 *pciids = __pciids;
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981void
1982bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1983{
1984 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1985 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1986 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1987 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1988 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1989 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1990 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1991 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1992 cfg->fwcfg.num_fwtio_reqs = 0;
1993
1994 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1995 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1996 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1997 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1998 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1999 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
2000 cfg->drvcfg.ioc_recover = BFA_FALSE;
2001 cfg->drvcfg.delay_comp = BFA_FALSE;
2002
2003}
2004
2005void
2006bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
2007{
2008 bfa_cfg_get_default(cfg);
2009 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
2010 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
2011 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
2012 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
2013 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
2014 cfg->fwcfg.num_fwtio_reqs = 0;
2015
2016 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
2017 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
2018 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
2019 cfg->drvcfg.min_cfg = BFA_TRUE;
2020}
2021