1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "bfad_drv.h"
19#include "bfa_ioc.h"
20#include "bfi_ctreg.h"
21#include "bfa_defs.h"
22#include "bfa_defs_svc.h"
23
24BFA_TRC_FILE(CNA, IOC);
25
26
27
28
29#define BFA_IOC_TOV 3000
30#define BFA_IOC_HWSEM_TOV 500
31#define BFA_IOC_HB_TOV 500
32#define BFA_IOC_HWINIT_MAX 5
33#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
34
35#define bfa_ioc_timer_start(__ioc) \
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
37 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
38#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
39
40#define bfa_hb_timer_start(__ioc) \
41 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
42 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
44
45#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
46
47
48
49
50
51#define bfa_ioc_firmware_lock(__ioc) \
52 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
53#define bfa_ioc_firmware_unlock(__ioc) \
54 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
55#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
56#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
57#define bfa_ioc_notify_fail(__ioc) \
58 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
59#define bfa_ioc_sync_join(__ioc) \
60 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
61#define bfa_ioc_sync_leave(__ioc) \
62 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
63#define bfa_ioc_sync_ack(__ioc) \
64 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
65#define bfa_ioc_sync_complete(__ioc) \
66 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
67
68#define bfa_ioc_mbox_cmd_pending(__ioc) \
69 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
70 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
71
72bfa_boolean_t bfa_auto_recover = BFA_TRUE;
73
74
75
76
77static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
78static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
79static void bfa_ioc_timeout(void *ioc);
80static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
81static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
82static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
83static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
84static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
85static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
86static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
87static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
88static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
89static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
90static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
91static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
92static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
93
94
95
96
97
98enum ioc_event {
99 IOC_E_RESET = 1,
100 IOC_E_ENABLE = 2,
101 IOC_E_DISABLE = 3,
102 IOC_E_DETACH = 4,
103 IOC_E_ENABLED = 5,
104 IOC_E_FWRSP_GETATTR = 6,
105 IOC_E_DISABLED = 7,
106 IOC_E_INITFAILED = 8,
107 IOC_E_PFFAILED = 9,
108 IOC_E_HBFAIL = 10,
109 IOC_E_HWERROR = 11,
110 IOC_E_TIMEOUT = 12,
111};
112
113bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
114bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
115bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
116bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
117bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
118bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
119bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
120bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
121bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
122
123static struct bfa_sm_table_s ioc_sm_table[] = {
124 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
125 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
126 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
127 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
128 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
129 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
130 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
131 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
132 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
133};
134
135
136
137
138
139#define bfa_iocpf_timer_start(__ioc) \
140 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
141 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
142#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
143
144#define bfa_iocpf_recovery_timer_start(__ioc) \
145 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
146 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
147
148#define bfa_sem_timer_start(__ioc) \
149 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
150 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
151#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
152
153
154
155
156static void bfa_iocpf_timeout(void *ioc_arg);
157static void bfa_iocpf_sem_timeout(void *ioc_arg);
158
159
160
161
162enum iocpf_event {
163 IOCPF_E_ENABLE = 1,
164 IOCPF_E_DISABLE = 2,
165 IOCPF_E_STOP = 3,
166 IOCPF_E_FWREADY = 4,
167 IOCPF_E_FWRSP_ENABLE = 5,
168 IOCPF_E_FWRSP_DISABLE = 6,
169 IOCPF_E_FAIL = 7,
170 IOCPF_E_INITFAIL = 8,
171 IOCPF_E_GETATTRFAIL = 9,
172 IOCPF_E_SEMLOCKED = 10,
173 IOCPF_E_TIMEOUT = 11,
174};
175
176
177
178
179enum bfa_iocpf_state {
180 BFA_IOCPF_RESET = 1,
181 BFA_IOCPF_SEMWAIT = 2,
182 BFA_IOCPF_HWINIT = 3,
183 BFA_IOCPF_READY = 4,
184 BFA_IOCPF_INITFAIL = 5,
185 BFA_IOCPF_FAIL = 6,
186 BFA_IOCPF_DISABLING = 7,
187 BFA_IOCPF_DISABLED = 8,
188 BFA_IOCPF_FWMISMATCH = 9,
189};
190
191bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
192bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
193bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
194bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
195bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
196bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
197bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
198bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
199 enum iocpf_event);
200bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
201bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
202bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
203bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
204bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
205 enum iocpf_event);
206bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
207
208static struct bfa_sm_table_s iocpf_sm_table[] = {
209 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
210 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
211 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
212 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
213 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
214 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
215 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
216 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
217 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
218 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
219 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
220 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
221 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
222 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
223};
224
225
226
227
228
229
230
231
232
233static void
234bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
235{
236}
237
238
239
240
241static void
242bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
243{
244 bfa_trc(ioc, event);
245
246 switch (event) {
247 case IOC_E_RESET:
248 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
249 break;
250
251 default:
252 bfa_sm_fault(ioc, event);
253 }
254}
255
256
257
258static void
259bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
260{
261 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
262}
263
264
265
266
267static void
268bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
269{
270 bfa_trc(ioc, event);
271
272 switch (event) {
273 case IOC_E_ENABLE:
274 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
275 break;
276
277 case IOC_E_DISABLE:
278 bfa_ioc_disable_comp(ioc);
279 break;
280
281 case IOC_E_DETACH:
282 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
283 break;
284
285 default:
286 bfa_sm_fault(ioc, event);
287 }
288}
289
290
291static void
292bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
293{
294 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
295}
296
297
298
299
300
301static void
302bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
303{
304 bfa_trc(ioc, event);
305
306 switch (event) {
307 case IOC_E_ENABLED:
308 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
309 break;
310
311 case IOC_E_PFFAILED:
312
313 case IOC_E_HWERROR:
314 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
315 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
316 if (event != IOC_E_PFFAILED)
317 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
318 break;
319
320 case IOC_E_DISABLE:
321 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
322 break;
323
324 case IOC_E_DETACH:
325 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
326 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
327 break;
328
329 case IOC_E_ENABLE:
330 break;
331
332 default:
333 bfa_sm_fault(ioc, event);
334 }
335}
336
337
338static void
339bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
340{
341 bfa_ioc_timer_start(ioc);
342 bfa_ioc_send_getattr(ioc);
343}
344
345
346
347
348static void
349bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
350{
351 bfa_trc(ioc, event);
352
353 switch (event) {
354 case IOC_E_FWRSP_GETATTR:
355 bfa_ioc_timer_stop(ioc);
356 bfa_ioc_check_attr_wwns(ioc);
357 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
358 break;
359
360 break;
361 case IOC_E_PFFAILED:
362 case IOC_E_HWERROR:
363 bfa_ioc_timer_stop(ioc);
364
365 case IOC_E_TIMEOUT:
366 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
368 if (event != IOC_E_PFFAILED)
369 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
370 break;
371
372 case IOC_E_DISABLE:
373 bfa_ioc_timer_stop(ioc);
374 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
375 break;
376
377 case IOC_E_ENABLE:
378 break;
379
380 default:
381 bfa_sm_fault(ioc, event);
382 }
383}
384
385
386static void
387bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
388{
389 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
390
391 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
392 bfa_ioc_hb_monitor(ioc);
393 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
394}
395
396static void
397bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
398{
399 bfa_trc(ioc, event);
400
401 switch (event) {
402 case IOC_E_ENABLE:
403 break;
404
405 case IOC_E_DISABLE:
406 bfa_hb_timer_stop(ioc);
407 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
408 break;
409
410 case IOC_E_PFFAILED:
411 case IOC_E_HWERROR:
412 bfa_hb_timer_stop(ioc);
413
414 case IOC_E_HBFAIL:
415 bfa_ioc_fail_notify(ioc);
416
417 if (ioc->iocpf.auto_recover)
418 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
419 else
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
421
422 if (event != IOC_E_PFFAILED)
423 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
424 break;
425
426 default:
427 bfa_sm_fault(ioc, event);
428 }
429}
430
431
432static void
433bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
434{
435 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
436 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
437 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
438}
439
440
441
442
443static void
444bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
445{
446 bfa_trc(ioc, event);
447
448 switch (event) {
449 case IOC_E_DISABLED:
450 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
451 break;
452
453 case IOC_E_HWERROR:
454
455
456
457
458
459 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
460 break;
461
462 default:
463 bfa_sm_fault(ioc, event);
464 }
465}
466
467
468
469
470static void
471bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
472{
473 bfa_ioc_disable_comp(ioc);
474}
475
476static void
477bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
478{
479 bfa_trc(ioc, event);
480
481 switch (event) {
482 case IOC_E_ENABLE:
483 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
484 break;
485
486 case IOC_E_DISABLE:
487 ioc->cbfn->disable_cbfn(ioc->bfa);
488 break;
489
490 case IOC_E_DETACH:
491 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
492 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
493 break;
494
495 default:
496 bfa_sm_fault(ioc, event);
497 }
498}
499
500
501static void
502bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
503{
504 bfa_trc(ioc, 0);
505}
506
507
508
509
510static void
511bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
512{
513 bfa_trc(ioc, event);
514
515 switch (event) {
516 case IOC_E_ENABLED:
517 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
518 break;
519
520 case IOC_E_PFFAILED:
521 case IOC_E_HWERROR:
522
523
524
525 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
526 if (event != IOC_E_PFFAILED)
527 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
528 break;
529
530 case IOC_E_INITFAILED:
531 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
532 break;
533
534 case IOC_E_ENABLE:
535 break;
536
537 case IOC_E_DISABLE:
538 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
539 break;
540
541 case IOC_E_DETACH:
542 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
543 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
544 break;
545
546 default:
547 bfa_sm_fault(ioc, event);
548 }
549}
550
551
552static void
553bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
554{
555 bfa_trc(ioc, 0);
556}
557
558
559
560
561static void
562bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
563{
564 bfa_trc(ioc, event);
565
566 switch (event) {
567
568 case IOC_E_ENABLE:
569 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
570 break;
571
572 case IOC_E_DISABLE:
573 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
574 break;
575
576 case IOC_E_DETACH:
577 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
578 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
579 break;
580
581 case IOC_E_HWERROR:
582
583
584
585 break;
586 default:
587 bfa_sm_fault(ioc, event);
588 }
589}
590
591
592
593
594
595
596
597
598static void
599bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
600{
601 iocpf->retry_count = 0;
602 iocpf->auto_recover = bfa_auto_recover;
603}
604
605
606
607
608static void
609bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
610{
611 struct bfa_ioc_s *ioc = iocpf->ioc;
612
613 bfa_trc(ioc, event);
614
615 switch (event) {
616 case IOCPF_E_ENABLE:
617 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
618 break;
619
620 case IOCPF_E_STOP:
621 break;
622
623 default:
624 bfa_sm_fault(ioc, event);
625 }
626}
627
628
629
630
631static void
632bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
633{
634 bfa_ioc_hw_sem_get(iocpf->ioc);
635}
636
637
638
639
640static void
641bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
642{
643 struct bfa_ioc_s *ioc = iocpf->ioc;
644
645 bfa_trc(ioc, event);
646
647 switch (event) {
648 case IOCPF_E_SEMLOCKED:
649 if (bfa_ioc_firmware_lock(ioc)) {
650 if (bfa_ioc_sync_complete(ioc)) {
651 iocpf->retry_count = 0;
652 bfa_ioc_sync_join(ioc);
653 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
654 } else {
655 bfa_ioc_firmware_unlock(ioc);
656 writel(1, ioc->ioc_regs.ioc_sem_reg);
657 bfa_sem_timer_start(ioc);
658 }
659 } else {
660 writel(1, ioc->ioc_regs.ioc_sem_reg);
661 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
662 }
663 break;
664
665 case IOCPF_E_DISABLE:
666 bfa_sem_timer_stop(ioc);
667 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
668 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
669 break;
670
671 case IOCPF_E_STOP:
672 bfa_sem_timer_stop(ioc);
673 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
674 break;
675
676 default:
677 bfa_sm_fault(ioc, event);
678 }
679}
680
681
682
683
684static void
685bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
686{
687
688
689
690 if (iocpf->retry_count == 0)
691 bfa_ioc_pf_fwmismatch(iocpf->ioc);
692
693 iocpf->retry_count++;
694 bfa_iocpf_timer_start(iocpf->ioc);
695}
696
697
698
699
700static void
701bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
702{
703 struct bfa_ioc_s *ioc = iocpf->ioc;
704
705 bfa_trc(ioc, event);
706
707 switch (event) {
708 case IOCPF_E_TIMEOUT:
709 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
710 break;
711
712 case IOCPF_E_DISABLE:
713 bfa_iocpf_timer_stop(ioc);
714 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
715 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
716 break;
717
718 case IOCPF_E_STOP:
719 bfa_iocpf_timer_stop(ioc);
720 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
721 break;
722
723 default:
724 bfa_sm_fault(ioc, event);
725 }
726}
727
728
729
730
731static void
732bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
733{
734 bfa_ioc_hw_sem_get(iocpf->ioc);
735}
736
737
738
739
740static void
741bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
742{
743 struct bfa_ioc_s *ioc = iocpf->ioc;
744
745 bfa_trc(ioc, event);
746
747 switch (event) {
748 case IOCPF_E_SEMLOCKED:
749 if (bfa_ioc_sync_complete(ioc)) {
750 bfa_ioc_sync_join(ioc);
751 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
752 } else {
753 writel(1, ioc->ioc_regs.ioc_sem_reg);
754 bfa_sem_timer_start(ioc);
755 }
756 break;
757
758 case IOCPF_E_DISABLE:
759 bfa_sem_timer_stop(ioc);
760 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
761 break;
762
763 default:
764 bfa_sm_fault(ioc, event);
765 }
766}
767
768static void
769bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
770{
771 bfa_iocpf_timer_start(iocpf->ioc);
772 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
773}
774
775
776
777
778
779static void
780bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
781{
782 struct bfa_ioc_s *ioc = iocpf->ioc;
783
784 bfa_trc(ioc, event);
785
786 switch (event) {
787 case IOCPF_E_FWREADY:
788 bfa_iocpf_timer_stop(ioc);
789 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
790 break;
791
792 case IOCPF_E_INITFAIL:
793 bfa_iocpf_timer_stop(ioc);
794
795
796
797
798 case IOCPF_E_TIMEOUT:
799 writel(1, ioc->ioc_regs.ioc_sem_reg);
800 if (event == IOCPF_E_TIMEOUT)
801 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
802 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
803 break;
804
805 case IOCPF_E_DISABLE:
806 bfa_iocpf_timer_stop(ioc);
807 bfa_ioc_sync_leave(ioc);
808 writel(1, ioc->ioc_regs.ioc_sem_reg);
809 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
810 break;
811
812 default:
813 bfa_sm_fault(ioc, event);
814 }
815}
816
817static void
818bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
819{
820 bfa_iocpf_timer_start(iocpf->ioc);
821 bfa_ioc_send_enable(iocpf->ioc);
822}
823
824
825
826
827
828static void
829bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
830{
831 struct bfa_ioc_s *ioc = iocpf->ioc;
832
833 bfa_trc(ioc, event);
834
835 switch (event) {
836 case IOCPF_E_FWRSP_ENABLE:
837 bfa_iocpf_timer_stop(ioc);
838 writel(1, ioc->ioc_regs.ioc_sem_reg);
839 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
840 break;
841
842 case IOCPF_E_INITFAIL:
843 bfa_iocpf_timer_stop(ioc);
844
845
846
847
848 case IOCPF_E_TIMEOUT:
849 writel(1, ioc->ioc_regs.ioc_sem_reg);
850 if (event == IOCPF_E_TIMEOUT)
851 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
852 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
853 break;
854
855 case IOCPF_E_DISABLE:
856 bfa_iocpf_timer_stop(ioc);
857 writel(1, ioc->ioc_regs.ioc_sem_reg);
858 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
859 break;
860
861 case IOCPF_E_FWREADY:
862 bfa_ioc_send_enable(ioc);
863 break;
864
865 default:
866 bfa_sm_fault(ioc, event);
867 }
868}
869
870static void
871bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
872{
873 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
874}
875
876static void
877bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
878{
879 struct bfa_ioc_s *ioc = iocpf->ioc;
880
881 bfa_trc(ioc, event);
882
883 switch (event) {
884 case IOCPF_E_DISABLE:
885 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
886 break;
887
888 case IOCPF_E_GETATTRFAIL:
889 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
890 break;
891
892 case IOCPF_E_FAIL:
893 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
894 break;
895
896 case IOCPF_E_FWREADY:
897 if (bfa_ioc_is_operational(ioc)) {
898 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
899 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
900 } else {
901 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
902 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
903 }
904 break;
905
906 default:
907 bfa_sm_fault(ioc, event);
908 }
909}
910
911static void
912bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
913{
914 bfa_iocpf_timer_start(iocpf->ioc);
915 bfa_ioc_send_disable(iocpf->ioc);
916}
917
918
919
920
921static void
922bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
923{
924 struct bfa_ioc_s *ioc = iocpf->ioc;
925
926 bfa_trc(ioc, event);
927
928 switch (event) {
929 case IOCPF_E_FWRSP_DISABLE:
930 case IOCPF_E_FWREADY:
931 bfa_iocpf_timer_stop(ioc);
932 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
933 break;
934
935 case IOCPF_E_FAIL:
936 bfa_iocpf_timer_stop(ioc);
937
938
939
940
941 case IOCPF_E_TIMEOUT:
942 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
943 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
944 break;
945
946 case IOCPF_E_FWRSP_ENABLE:
947 break;
948
949 default:
950 bfa_sm_fault(ioc, event);
951 }
952}
953
954static void
955bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
956{
957 bfa_ioc_hw_sem_get(iocpf->ioc);
958}
959
960
961
962
963static void
964bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
965{
966 struct bfa_ioc_s *ioc = iocpf->ioc;
967
968 bfa_trc(ioc, event);
969
970 switch (event) {
971 case IOCPF_E_SEMLOCKED:
972 bfa_ioc_sync_leave(ioc);
973 writel(1, ioc->ioc_regs.ioc_sem_reg);
974 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
975 break;
976
977 case IOCPF_E_FAIL:
978 break;
979
980 default:
981 bfa_sm_fault(ioc, event);
982 }
983}
984
985
986
987
988static void
989bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
990{
991 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
992}
993
994static void
995bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
996{
997 struct bfa_ioc_s *ioc = iocpf->ioc;
998
999 bfa_trc(ioc, event);
1000
1001 switch (event) {
1002 case IOCPF_E_ENABLE:
1003 iocpf->retry_count = 0;
1004 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1005 break;
1006
1007 case IOCPF_E_STOP:
1008 bfa_ioc_firmware_unlock(ioc);
1009 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1010 break;
1011
1012 default:
1013 bfa_sm_fault(ioc, event);
1014 }
1015}
1016
1017static void
1018bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1019{
1020 bfa_ioc_hw_sem_get(iocpf->ioc);
1021}
1022
1023
1024
1025
1026static void
1027bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1028{
1029 struct bfa_ioc_s *ioc = iocpf->ioc;
1030
1031 bfa_trc(ioc, event);
1032
1033 switch (event) {
1034 case IOCPF_E_SEMLOCKED:
1035 bfa_ioc_notify_fail(ioc);
1036 bfa_ioc_sync_ack(ioc);
1037 iocpf->retry_count++;
1038 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
1039 bfa_ioc_sync_leave(ioc);
1040 writel(1, ioc->ioc_regs.ioc_sem_reg);
1041 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1042 } else {
1043 if (bfa_ioc_sync_complete(ioc))
1044 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1045 else {
1046 writel(1, ioc->ioc_regs.ioc_sem_reg);
1047 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1048 }
1049 }
1050 break;
1051
1052 case IOCPF_E_DISABLE:
1053 bfa_sem_timer_stop(ioc);
1054 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1055 break;
1056
1057 case IOCPF_E_STOP:
1058 bfa_sem_timer_stop(ioc);
1059 bfa_ioc_firmware_unlock(ioc);
1060 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1061 break;
1062
1063 case IOCPF_E_FAIL:
1064 break;
1065
1066 default:
1067 bfa_sm_fault(ioc, event);
1068 }
1069}
1070
1071static void
1072bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1073{
1074 bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED);
1075}
1076
1077
1078
1079
1080static void
1081bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1082{
1083 struct bfa_ioc_s *ioc = iocpf->ioc;
1084
1085 bfa_trc(ioc, event);
1086
1087 switch (event) {
1088 case IOCPF_E_DISABLE:
1089 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1090 break;
1091
1092 case IOCPF_E_STOP:
1093 bfa_ioc_firmware_unlock(ioc);
1094 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1095 break;
1096
1097 default:
1098 bfa_sm_fault(ioc, event);
1099 }
1100}
1101
1102static void
1103bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1104{
1105
1106
1107
1108 bfa_ioc_lpu_stop(iocpf->ioc);
1109
1110
1111
1112
1113 bfa_ioc_mbox_hbfail(iocpf->ioc);
1114
1115 bfa_ioc_hw_sem_get(iocpf->ioc);
1116}
1117
1118static void
1119bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1120{
1121 struct bfa_ioc_s *ioc = iocpf->ioc;
1122
1123 bfa_trc(ioc, event);
1124
1125 switch (event) {
1126 case IOCPF_E_SEMLOCKED:
1127 iocpf->retry_count = 0;
1128 bfa_ioc_sync_ack(ioc);
1129 bfa_ioc_notify_fail(ioc);
1130 if (!iocpf->auto_recover) {
1131 bfa_ioc_sync_leave(ioc);
1132 writel(1, ioc->ioc_regs.ioc_sem_reg);
1133 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1134 } else {
1135 if (bfa_ioc_sync_complete(ioc))
1136 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1137 else {
1138 writel(1, ioc->ioc_regs.ioc_sem_reg);
1139 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1140 }
1141 }
1142 break;
1143
1144 case IOCPF_E_DISABLE:
1145 bfa_sem_timer_stop(ioc);
1146 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1147 break;
1148
1149 case IOCPF_E_FAIL:
1150 break;
1151
1152 default:
1153 bfa_sm_fault(ioc, event);
1154 }
1155}
1156
1157static void
1158bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1159{
1160}
1161
1162
1163
1164
1165static void
1166bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1167{
1168 struct bfa_ioc_s *ioc = iocpf->ioc;
1169
1170 bfa_trc(ioc, event);
1171
1172 switch (event) {
1173 case IOCPF_E_DISABLE:
1174 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1175 break;
1176
1177 default:
1178 bfa_sm_fault(ioc, event);
1179 }
1180}
1181
1182
1183
1184
1185
1186static void
1187bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1188{
1189 struct list_head *qe;
1190 struct bfa_ioc_hbfail_notify_s *notify;
1191
1192 ioc->cbfn->disable_cbfn(ioc->bfa);
1193
1194
1195
1196
1197 list_for_each(qe, &ioc->hb_notify_q) {
1198 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1199 notify->cbfn(notify->cbarg);
1200 }
1201}
1202
1203bfa_boolean_t
1204bfa_ioc_sem_get(void __iomem *sem_reg)
1205{
1206 u32 r32;
1207 int cnt = 0;
1208#define BFA_SEM_SPINCNT 3000
1209
1210 r32 = readl(sem_reg);
1211
1212 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1213 cnt++;
1214 udelay(2);
1215 r32 = readl(sem_reg);
1216 }
1217
1218 if (r32 == 0)
1219 return BFA_TRUE;
1220
1221 WARN_ON(cnt >= BFA_SEM_SPINCNT);
1222 return BFA_FALSE;
1223}
1224
1225static void
1226bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1227{
1228 u32 r32;
1229
1230
1231
1232
1233
1234 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1235 if (r32 == 0) {
1236 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1237 return;
1238 }
1239
1240 bfa_sem_timer_start(ioc);
1241}
1242
1243
1244
1245
1246static void
1247bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1248{
1249 u32 pss_ctl;
1250 int i;
1251#define PSS_LMEM_INIT_TIME 10000
1252
1253 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1254 pss_ctl &= ~__PSS_LMEM_RESET;
1255 pss_ctl |= __PSS_LMEM_INIT_EN;
1256
1257
1258
1259
1260 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1261 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1262
1263
1264
1265
1266 i = 0;
1267 do {
1268 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1269 i++;
1270 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1271
1272
1273
1274
1275
1276 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1277 bfa_trc(ioc, pss_ctl);
1278
1279 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1280 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1281}
1282
1283static void
1284bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1285{
1286 u32 pss_ctl;
1287
1288
1289
1290
1291 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1292 pss_ctl &= ~__PSS_LPU0_RESET;
1293
1294 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1295}
1296
1297static void
1298bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1299{
1300 u32 pss_ctl;
1301
1302
1303
1304
1305 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1306 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1307
1308 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1309}
1310
1311
1312
1313
1314void
1315bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1316{
1317 u32 pgnum, pgoff;
1318 u32 loff = 0;
1319 int i;
1320 u32 *fwsig = (u32 *) fwhdr;
1321
1322 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1323 pgoff = PSS_SMEM_PGOFF(loff);
1324 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1325
1326 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1327 i++) {
1328 fwsig[i] =
1329 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1330 loff += sizeof(u32);
1331 }
1332}
1333
1334
1335
1336
1337bfa_boolean_t
1338bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1339{
1340 struct bfi_ioc_image_hdr_s *drv_fwhdr;
1341 int i;
1342
1343 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1344 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1345
1346 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1347 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1348 bfa_trc(ioc, i);
1349 bfa_trc(ioc, fwhdr->md5sum[i]);
1350 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1351 return BFA_FALSE;
1352 }
1353 }
1354
1355 bfa_trc(ioc, fwhdr->md5sum[0]);
1356 return BFA_TRUE;
1357}
1358
1359
1360
1361
1362
1363static bfa_boolean_t
1364bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1365{
1366 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1367
1368 bfa_ioc_fwver_get(ioc, &fwhdr);
1369 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1370 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1371
1372 if (fwhdr.signature != drv_fwhdr->signature) {
1373 bfa_trc(ioc, fwhdr.signature);
1374 bfa_trc(ioc, drv_fwhdr->signature);
1375 return BFA_FALSE;
1376 }
1377
1378 if (swab32(fwhdr.param) != boot_env) {
1379 bfa_trc(ioc, fwhdr.param);
1380 bfa_trc(ioc, boot_env);
1381 return BFA_FALSE;
1382 }
1383
1384 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1385}
1386
1387
1388
1389
1390static void
1391bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1392{
1393 u32 r32;
1394
1395 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1396 if (r32)
1397 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1398}
1399
1400static void
1401bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1402{
1403 enum bfi_ioc_state ioc_fwstate;
1404 bfa_boolean_t fwvalid;
1405 u32 boot_type;
1406 u32 boot_env;
1407
1408 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1409
1410 if (force)
1411 ioc_fwstate = BFI_IOC_UNINIT;
1412
1413 bfa_trc(ioc, ioc_fwstate);
1414
1415 boot_type = BFI_BOOT_TYPE_NORMAL;
1416 boot_env = BFI_BOOT_LOADER_OS;
1417
1418
1419
1420
1421 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1422 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1423
1424 if (!fwvalid) {
1425 bfa_ioc_boot(ioc, boot_type, boot_env);
1426 return;
1427 }
1428
1429
1430
1431
1432
1433 if (ioc_fwstate == BFI_IOC_INITING) {
1434 ioc->cbfn->reset_cbfn(ioc->bfa);
1435 return;
1436 }
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1447
1448
1449
1450
1451
1452 bfa_ioc_msgflush(ioc);
1453 ioc->cbfn->reset_cbfn(ioc->bfa);
1454 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1455 return;
1456 }
1457
1458
1459
1460
1461 bfa_ioc_boot(ioc, boot_type, boot_env);
1462}
1463
1464static void
1465bfa_ioc_timeout(void *ioc_arg)
1466{
1467 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1468
1469 bfa_trc(ioc, 0);
1470 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1471}
1472
1473void
1474bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1475{
1476 u32 *msgp = (u32 *) ioc_msg;
1477 u32 i;
1478
1479 bfa_trc(ioc, msgp[0]);
1480 bfa_trc(ioc, len);
1481
1482 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1483
1484
1485
1486
1487 for (i = 0; i < len / sizeof(u32); i++)
1488 writel(cpu_to_le32(msgp[i]),
1489 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1490
1491 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1492 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1493
1494
1495
1496
1497 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1498 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1499}
1500
1501static void
1502bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1503{
1504 struct bfi_ioc_ctrl_req_s enable_req;
1505 struct timeval tv;
1506
1507 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1508 bfa_ioc_portid(ioc));
1509 enable_req.ioc_class = ioc->ioc_mc;
1510 do_gettimeofday(&tv);
1511 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1512 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1513}
1514
1515static void
1516bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1517{
1518 struct bfi_ioc_ctrl_req_s disable_req;
1519
1520 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1521 bfa_ioc_portid(ioc));
1522 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1523}
1524
1525static void
1526bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1527{
1528 struct bfi_ioc_getattr_req_s attr_req;
1529
1530 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1531 bfa_ioc_portid(ioc));
1532 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1533 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1534}
1535
1536static void
1537bfa_ioc_hb_check(void *cbarg)
1538{
1539 struct bfa_ioc_s *ioc = cbarg;
1540 u32 hb_count;
1541
1542 hb_count = readl(ioc->ioc_regs.heartbeat);
1543 if (ioc->hb_count == hb_count) {
1544 bfa_ioc_recover(ioc);
1545 return;
1546 } else {
1547 ioc->hb_count = hb_count;
1548 }
1549
1550 bfa_ioc_mbox_poll(ioc);
1551 bfa_hb_timer_start(ioc);
1552}
1553
1554static void
1555bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1556{
1557 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1558 bfa_hb_timer_start(ioc);
1559}
1560
1561
1562
1563
1564static void
1565bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1566 u32 boot_env)
1567{
1568 u32 *fwimg;
1569 u32 pgnum, pgoff;
1570 u32 loff = 0;
1571 u32 chunkno = 0;
1572 u32 i;
1573
1574
1575
1576
1577 bfa_ioc_lmem_init(ioc);
1578
1579 bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1580 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1581
1582 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1583 pgoff = PSS_SMEM_PGOFF(loff);
1584
1585 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1586
1587 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1588
1589 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1590 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1591 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1592 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1593 }
1594
1595
1596
1597
1598 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1599 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1600
1601 loff += sizeof(u32);
1602
1603
1604
1605
1606 loff = PSS_SMEM_PGOFF(loff);
1607 if (loff == 0) {
1608 pgnum++;
1609 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1610 }
1611 }
1612
1613 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1614 ioc->ioc_regs.host_page_num_fn);
1615
1616
1617
1618
1619 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1620 swab32(boot_type));
1621 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
1622 swab32(boot_env));
1623}
1624
1625
1626
1627
1628
1629static void
1630bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1631{
1632 struct bfi_ioc_attr_s *attr = ioc->attr;
1633
1634 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1635 attr->card_type = be32_to_cpu(attr->card_type);
1636 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1637
1638 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1639}
1640
1641
1642
1643
1644static void
1645bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1646{
1647 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1648 int mc;
1649
1650 INIT_LIST_HEAD(&mod->cmd_q);
1651 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1652 mod->mbhdlr[mc].cbfn = NULL;
1653 mod->mbhdlr[mc].cbarg = ioc->bfa;
1654 }
1655}
1656
1657
1658
1659
1660static void
1661bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1662{
1663 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1664 struct bfa_mbox_cmd_s *cmd;
1665 u32 stat;
1666
1667
1668
1669
1670 if (list_empty(&mod->cmd_q))
1671 return;
1672
1673
1674
1675
1676 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1677 if (stat)
1678 return;
1679
1680
1681
1682
1683 bfa_q_deq(&mod->cmd_q, &cmd);
1684 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1685}
1686
1687
1688
1689
1690static void
1691bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1692{
1693 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1694 struct bfa_mbox_cmd_s *cmd;
1695
1696 while (!list_empty(&mod->cmd_q))
1697 bfa_q_deq(&mod->cmd_q, &cmd);
1698}
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708static bfa_status_t
1709bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1710{
1711 u32 pgnum, loff;
1712 __be32 r32;
1713 int i, len;
1714 u32 *buf = tbuf;
1715
1716 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1717 loff = PSS_SMEM_PGOFF(soff);
1718 bfa_trc(ioc, pgnum);
1719 bfa_trc(ioc, loff);
1720 bfa_trc(ioc, sz);
1721
1722
1723
1724
1725 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1726 bfa_trc(ioc, 0);
1727 return BFA_STATUS_FAILED;
1728 }
1729
1730 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1731
1732 len = sz/sizeof(u32);
1733 bfa_trc(ioc, len);
1734 for (i = 0; i < len; i++) {
1735 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1736 buf[i] = be32_to_cpu(r32);
1737 loff += sizeof(u32);
1738
1739
1740
1741
1742 loff = PSS_SMEM_PGOFF(loff);
1743 if (loff == 0) {
1744 pgnum++;
1745 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1746 }
1747 }
1748 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1749 ioc->ioc_regs.host_page_num_fn);
1750
1751
1752
1753 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1754
1755 bfa_trc(ioc, pgnum);
1756 return BFA_STATUS_OK;
1757}
1758
1759
1760
1761
1762
1763
1764
1765
1766static bfa_status_t
1767bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1768{
1769 int i, len;
1770 u32 pgnum, loff;
1771
1772 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1773 loff = PSS_SMEM_PGOFF(soff);
1774 bfa_trc(ioc, pgnum);
1775 bfa_trc(ioc, loff);
1776 bfa_trc(ioc, sz);
1777
1778
1779
1780
1781 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1782 bfa_trc(ioc, 0);
1783 return BFA_STATUS_FAILED;
1784 }
1785
1786 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1787
1788 len = sz/sizeof(u32);
1789 bfa_trc(ioc, len);
1790 for (i = 0; i < len; i++) {
1791 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1792 loff += sizeof(u32);
1793
1794
1795
1796
1797 loff = PSS_SMEM_PGOFF(loff);
1798 if (loff == 0) {
1799 pgnum++;
1800 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1801 }
1802 }
1803 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1804 ioc->ioc_regs.host_page_num_fn);
1805
1806
1807
1808
1809 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1810 bfa_trc(ioc, pgnum);
1811 return BFA_STATUS_OK;
1812}
1813
1814static void
1815bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1816{
1817 struct list_head *qe;
1818 struct bfa_ioc_hbfail_notify_s *notify;
1819 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1820
1821
1822
1823
1824 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1825 list_for_each(qe, &ioc->hb_notify_q) {
1826 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1827 notify->cbfn(notify->cbarg);
1828 }
1829
1830 bfa_ioc_debug_save_ftrc(ioc);
1831
1832 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1833 "Heart Beat of IOC has failed\n");
1834
1835}
1836
1837static void
1838bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1839{
1840 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1841
1842
1843
1844 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1845 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1846 "Running firmware version is incompatible "
1847 "with the driver version\n");
1848}
1849
1850bfa_status_t
1851bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1852{
1853
1854
1855
1856
1857 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1858
1859 bfa_ioc_pll_init_asic(ioc);
1860
1861 ioc->pllinit = BFA_TRUE;
1862
1863
1864
1865 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1866
1867 return BFA_STATUS_OK;
1868}
1869
1870
1871
1872
1873
1874void
1875bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1876{
1877 void __iomem *rb;
1878
1879 bfa_ioc_stats(ioc, ioc_boots);
1880
1881 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1882 return;
1883
1884
1885
1886
1887 rb = ioc->pcidev.pci_bar_kva;
1888 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1889 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1890 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1891 } else {
1892 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1893 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1894 }
1895
1896 bfa_ioc_msgflush(ioc);
1897 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1898
1899
1900
1901
1902 ioc->cbfn->reset_cbfn(ioc->bfa);
1903 bfa_ioc_lpu_start(ioc);
1904}
1905
1906
1907
1908
1909void
1910bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1911{
1912 bfa_auto_recover = auto_recover;
1913}
1914
1915
1916
1917bfa_boolean_t
1918bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1919{
1920 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1921}
1922
1923bfa_boolean_t
1924bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1925{
1926 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
1927
1928 return ((r32 != BFI_IOC_UNINIT) &&
1929 (r32 != BFI_IOC_INITING) &&
1930 (r32 != BFI_IOC_MEMTEST));
1931}
1932
1933void
1934bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1935{
1936 __be32 *msgp = mbmsg;
1937 u32 r32;
1938 int i;
1939
1940
1941
1942
1943 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1944 i++) {
1945 r32 = readl(ioc->ioc_regs.lpu_mbox +
1946 i * sizeof(u32));
1947 msgp[i] = cpu_to_be32(r32);
1948 }
1949
1950
1951
1952
1953 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1954 readl(ioc->ioc_regs.lpu_mbox_cmd);
1955}
1956
1957void
1958bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1959{
1960 union bfi_ioc_i2h_msg_u *msg;
1961 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
1962
1963 msg = (union bfi_ioc_i2h_msg_u *) m;
1964
1965 bfa_ioc_stats(ioc, ioc_isrs);
1966
1967 switch (msg->mh.msg_id) {
1968 case BFI_IOC_I2H_HBEAT:
1969 break;
1970
1971 case BFI_IOC_I2H_READY_EVENT:
1972 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1973 break;
1974
1975 case BFI_IOC_I2H_ENABLE_REPLY:
1976 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1977 break;
1978
1979 case BFI_IOC_I2H_DISABLE_REPLY:
1980 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1981 break;
1982
1983 case BFI_IOC_I2H_GETATTR_REPLY:
1984 bfa_ioc_getattr_reply(ioc);
1985 break;
1986
1987 default:
1988 bfa_trc(ioc, msg->mh.msg_id);
1989 WARN_ON(1);
1990 }
1991}
1992
1993
1994
1995
1996
1997
1998
1999void
2000bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2001 struct bfa_timer_mod_s *timer_mod)
2002{
2003 ioc->bfa = bfa;
2004 ioc->cbfn = cbfn;
2005 ioc->timer_mod = timer_mod;
2006 ioc->fcmode = BFA_FALSE;
2007 ioc->pllinit = BFA_FALSE;
2008 ioc->dbg_fwsave_once = BFA_TRUE;
2009 ioc->iocpf.ioc = ioc;
2010
2011 bfa_ioc_mbox_attach(ioc);
2012 INIT_LIST_HEAD(&ioc->hb_notify_q);
2013
2014 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2015 bfa_fsm_send_event(ioc, IOC_E_RESET);
2016}
2017
2018
2019
2020
2021void
2022bfa_ioc_detach(struct bfa_ioc_s *ioc)
2023{
2024 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2025}
2026
2027
2028
2029
2030
2031
2032void
2033bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2034 enum bfi_mclass mc)
2035{
2036 ioc->ioc_mc = mc;
2037 ioc->pcidev = *pcidev;
2038 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
2039 ioc->cna = ioc->ctdev && !ioc->fcmode;
2040
2041
2042
2043
2044 if (ioc->ctdev)
2045 bfa_ioc_set_ct_hwif(ioc);
2046 else
2047 bfa_ioc_set_cb_hwif(ioc);
2048
2049 bfa_ioc_map_port(ioc);
2050 bfa_ioc_reg_init(ioc);
2051}
2052
2053
2054
2055
2056
2057
2058
2059void
2060bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2061{
2062
2063
2064
2065 ioc->attr_dma.kva = dm_kva;
2066 ioc->attr_dma.pa = dm_pa;
2067 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2068}
2069
2070void
2071bfa_ioc_enable(struct bfa_ioc_s *ioc)
2072{
2073 bfa_ioc_stats(ioc, ioc_enables);
2074 ioc->dbg_fwsave_once = BFA_TRUE;
2075
2076 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2077}
2078
2079void
2080bfa_ioc_disable(struct bfa_ioc_s *ioc)
2081{
2082 bfa_ioc_stats(ioc, ioc_disables);
2083 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2084}
2085
2086
2087
2088
2089
2090
2091void
2092bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2093{
2094 ioc->dbg_fwsave = dbg_fwsave;
2095 ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2096}
2097
2098
2099
2100
2101
2102
2103
2104void
2105bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2106{
2107 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2108 int mc;
2109
2110 for (mc = 0; mc < BFI_MC_MAX; mc++)
2111 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2112}
2113
2114
2115
2116
2117void
2118bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2119 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2120{
2121 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2122
2123 mod->mbhdlr[mc].cbfn = cbfn;
2124 mod->mbhdlr[mc].cbarg = cbarg;
2125}
2126
2127
2128
2129
2130
2131
2132
2133
2134void
2135bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2136{
2137 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2138 u32 stat;
2139
2140
2141
2142
2143 if (!list_empty(&mod->cmd_q)) {
2144 list_add_tail(&cmd->qe, &mod->cmd_q);
2145 return;
2146 }
2147
2148
2149
2150
2151 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2152 if (stat) {
2153 list_add_tail(&cmd->qe, &mod->cmd_q);
2154 return;
2155 }
2156
2157
2158
2159
2160 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2161}
2162
2163
2164
2165
2166void
2167bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2168{
2169 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2170 struct bfi_mbmsg_s m;
2171 int mc;
2172
2173 bfa_ioc_msgget(ioc, &m);
2174
2175
2176
2177
2178 mc = m.mh.msg_class;
2179 if (mc == BFI_MC_IOC) {
2180 bfa_ioc_isr(ioc, &m);
2181 return;
2182 }
2183
2184 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2185 return;
2186
2187 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2188}
2189
2190void
2191bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2192{
2193 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2194}
2195
2196void
2197bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2198{
2199 ioc->fcmode = BFA_TRUE;
2200 ioc->port_id = bfa_ioc_pcifn(ioc);
2201}
2202
2203
2204
2205
2206bfa_boolean_t
2207bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2208{
2209 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2210 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2211}
2212
2213
2214
2215
2216bfa_boolean_t
2217bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2218{
2219 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2220 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2221 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2222}
2223
2224#define bfa_ioc_state_disabled(__sm) \
2225 (((__sm) == BFI_IOC_UNINIT) || \
2226 ((__sm) == BFI_IOC_INITING) || \
2227 ((__sm) == BFI_IOC_HWINIT) || \
2228 ((__sm) == BFI_IOC_DISABLED) || \
2229 ((__sm) == BFI_IOC_FAIL) || \
2230 ((__sm) == BFI_IOC_CFG_DISABLED))
2231
2232
2233
2234
2235
2236bfa_boolean_t
2237bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2238{
2239 u32 ioc_state;
2240 void __iomem *rb = ioc->pcidev.pci_bar_kva;
2241
2242 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2243 return BFA_FALSE;
2244
2245 ioc_state = readl(rb + BFA_IOC0_STATE_REG);
2246 if (!bfa_ioc_state_disabled(ioc_state))
2247 return BFA_FALSE;
2248
2249 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2250 ioc_state = readl(rb + BFA_IOC1_STATE_REG);
2251 if (!bfa_ioc_state_disabled(ioc_state))
2252 return BFA_FALSE;
2253 }
2254
2255 return BFA_TRUE;
2256}
2257
2258
2259
2260
2261void
2262bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2263{
2264 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2265 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2266}
2267
2268#define BFA_MFG_NAME "Brocade"
2269void
2270bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2271 struct bfa_adapter_attr_s *ad_attr)
2272{
2273 struct bfi_ioc_attr_s *ioc_attr;
2274
2275 ioc_attr = ioc->attr;
2276
2277 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2278 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2279 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2280 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2281 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2282 sizeof(struct bfa_mfg_vpd_s));
2283
2284 ad_attr->nports = bfa_ioc_get_nports(ioc);
2285 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2286
2287 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2288
2289 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2290
2291 ad_attr->card_type = ioc_attr->card_type;
2292 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2293
2294 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2295 ad_attr->prototype = 1;
2296 else
2297 ad_attr->prototype = 0;
2298
2299 ad_attr->pwwn = ioc->attr->pwwn;
2300 ad_attr->mac = bfa_ioc_get_mac(ioc);
2301
2302 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2303 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2304 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2305 ad_attr->asic_rev = ioc_attr->asic_rev;
2306
2307 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2308
2309 ad_attr->cna_capable = ioc->cna;
2310 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna &&
2311 !ad_attr->is_mezz;
2312}
2313
2314enum bfa_ioc_type_e
2315bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2316{
2317 if (!ioc->ctdev || ioc->fcmode)
2318 return BFA_IOC_TYPE_FC;
2319 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2320 return BFA_IOC_TYPE_FCoE;
2321 else if (ioc->ioc_mc == BFI_MC_LL)
2322 return BFA_IOC_TYPE_LL;
2323 else {
2324 WARN_ON(ioc->ioc_mc != BFI_MC_LL);
2325 return BFA_IOC_TYPE_LL;
2326 }
2327}
2328
2329void
2330bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2331{
2332 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2333 memcpy((void *)serial_num,
2334 (void *)ioc->attr->brcd_serialnum,
2335 BFA_ADAPTER_SERIAL_NUM_LEN);
2336}
2337
2338void
2339bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2340{
2341 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2342 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2343}
2344
2345void
2346bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2347{
2348 WARN_ON(!chip_rev);
2349
2350 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2351
2352 chip_rev[0] = 'R';
2353 chip_rev[1] = 'e';
2354 chip_rev[2] = 'v';
2355 chip_rev[3] = '-';
2356 chip_rev[4] = ioc->attr->asic_rev;
2357 chip_rev[5] = '\0';
2358}
2359
2360void
2361bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2362{
2363 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2364 memcpy(optrom_ver, ioc->attr->optrom_version,
2365 BFA_VERSION_LEN);
2366}
2367
2368void
2369bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2370{
2371 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2372 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2373}
2374
2375void
2376bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2377{
2378 struct bfi_ioc_attr_s *ioc_attr;
2379
2380 WARN_ON(!model);
2381 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2382
2383 ioc_attr = ioc->attr;
2384
2385
2386
2387
2388 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2389 BFA_MFG_NAME, ioc_attr->card_type);
2390}
2391
2392enum bfa_ioc_state
2393bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2394{
2395 enum bfa_iocpf_state iocpf_st;
2396 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2397
2398 if (ioc_st == BFA_IOC_ENABLING ||
2399 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2400
2401 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2402
2403 switch (iocpf_st) {
2404 case BFA_IOCPF_SEMWAIT:
2405 ioc_st = BFA_IOC_SEMWAIT;
2406 break;
2407
2408 case BFA_IOCPF_HWINIT:
2409 ioc_st = BFA_IOC_HWINIT;
2410 break;
2411
2412 case BFA_IOCPF_FWMISMATCH:
2413 ioc_st = BFA_IOC_FWMISMATCH;
2414 break;
2415
2416 case BFA_IOCPF_FAIL:
2417 ioc_st = BFA_IOC_FAIL;
2418 break;
2419
2420 case BFA_IOCPF_INITFAIL:
2421 ioc_st = BFA_IOC_INITFAIL;
2422 break;
2423
2424 default:
2425 break;
2426 }
2427 }
2428
2429 return ioc_st;
2430}
2431
2432void
2433bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2434{
2435 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2436
2437 ioc_attr->state = bfa_ioc_get_state(ioc);
2438 ioc_attr->port_id = ioc->port_id;
2439
2440 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2441
2442 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2443
2444 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2445 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2446 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2447}
2448
2449mac_t
2450bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2451{
2452
2453
2454
2455 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2456 return ioc->attr->fcoe_mac;
2457 else
2458 return ioc->attr->mac;
2459}
2460
2461mac_t
2462bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2463{
2464 mac_t m;
2465
2466 m = ioc->attr->mfg_mac;
2467 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2468 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2469 else
2470 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2471 bfa_ioc_pcifn(ioc));
2472
2473 return m;
2474}
2475
2476bfa_boolean_t
2477bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2478{
2479 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
2480}
2481
2482
2483
2484
2485bfa_status_t
2486bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2487{
2488 int tlen;
2489
2490 if (ioc->dbg_fwsave_len == 0)
2491 return BFA_STATUS_ENOFSAVE;
2492
2493 tlen = *trclen;
2494 if (tlen > ioc->dbg_fwsave_len)
2495 tlen = ioc->dbg_fwsave_len;
2496
2497 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2498 *trclen = tlen;
2499 return BFA_STATUS_OK;
2500}
2501
2502
2503
2504
2505
2506bfa_status_t
2507bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2508{
2509 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2510 int tlen;
2511 bfa_status_t status;
2512
2513 bfa_trc(ioc, *trclen);
2514
2515 tlen = *trclen;
2516 if (tlen > BFA_DBG_FWTRC_LEN)
2517 tlen = BFA_DBG_FWTRC_LEN;
2518
2519 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2520 *trclen = tlen;
2521 return status;
2522}
2523
2524static void
2525bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2526{
2527 struct bfa_mbox_cmd_s cmd;
2528 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2529
2530 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2531 bfa_ioc_portid(ioc));
2532 req->ioc_class = ioc->ioc_mc;
2533 bfa_ioc_mbox_queue(ioc, &cmd);
2534}
2535
2536static void
2537bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2538{
2539 u32 fwsync_iter = 1000;
2540
2541 bfa_ioc_send_fwsync(ioc);
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2555 fwsync_iter--;
2556}
2557
2558
2559
2560
2561bfa_status_t
2562bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2563 u32 *offset, int *buflen)
2564{
2565 u32 loff;
2566 int dlen;
2567 bfa_status_t status;
2568 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2569
2570 if (*offset >= smem_len) {
2571 *offset = *buflen = 0;
2572 return BFA_STATUS_EINVAL;
2573 }
2574
2575 loff = *offset;
2576 dlen = *buflen;
2577
2578
2579
2580
2581
2582 if (loff == 0)
2583 bfa_ioc_fwsync(ioc);
2584
2585 if ((loff + dlen) >= smem_len)
2586 dlen = smem_len - loff;
2587
2588 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2589
2590 if (status != BFA_STATUS_OK) {
2591 *offset = *buflen = 0;
2592 return status;
2593 }
2594
2595 *offset += dlen;
2596
2597 if (*offset >= smem_len)
2598 *offset = 0;
2599
2600 *buflen = dlen;
2601
2602 return status;
2603}
2604
2605
2606
2607
2608bfa_status_t
2609bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2610{
2611 u32 loff = BFI_IOC_FWSTATS_OFF + \
2612 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2613 int tlen;
2614 bfa_status_t status;
2615
2616 if (ioc->stats_busy) {
2617 bfa_trc(ioc, ioc->stats_busy);
2618 return BFA_STATUS_DEVBUSY;
2619 }
2620 ioc->stats_busy = BFA_TRUE;
2621
2622 tlen = sizeof(struct bfa_fw_stats_s);
2623 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2624
2625 ioc->stats_busy = BFA_FALSE;
2626 return status;
2627}
2628
2629bfa_status_t
2630bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2631{
2632 u32 loff = BFI_IOC_FWSTATS_OFF + \
2633 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2634 int tlen;
2635 bfa_status_t status;
2636
2637 if (ioc->stats_busy) {
2638 bfa_trc(ioc, ioc->stats_busy);
2639 return BFA_STATUS_DEVBUSY;
2640 }
2641 ioc->stats_busy = BFA_TRUE;
2642
2643 tlen = sizeof(struct bfa_fw_stats_s);
2644 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2645
2646 ioc->stats_busy = BFA_FALSE;
2647 return status;
2648}
2649
2650
2651
2652
2653static void
2654bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2655{
2656 int tlen;
2657
2658 if (ioc->dbg_fwsave_once) {
2659 ioc->dbg_fwsave_once = BFA_FALSE;
2660 if (ioc->dbg_fwsave_len) {
2661 tlen = ioc->dbg_fwsave_len;
2662 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2663 }
2664 }
2665}
2666
2667
2668
2669
2670static void
2671bfa_ioc_recover(struct bfa_ioc_s *ioc)
2672{
2673 bfa_ioc_stats(ioc, ioc_hbfails);
2674 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2675}
2676
2677static void
2678bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2679{
2680 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2681 return;
2682}
2683
2684
2685
2686
2687static void
2688bfa_iocpf_timeout(void *ioc_arg)
2689{
2690 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2691
2692 bfa_trc(ioc, 0);
2693 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2694}
2695
2696static void
2697bfa_iocpf_sem_timeout(void *ioc_arg)
2698{
2699 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2700
2701 bfa_ioc_hw_sem_get(ioc);
2702}
2703
2704
2705
2706
2707void
2708bfa_timer_beat(struct bfa_timer_mod_s *mod)
2709{
2710 struct list_head *qh = &mod->timer_q;
2711 struct list_head *qe, *qe_next;
2712 struct bfa_timer_s *elem;
2713 struct list_head timedout_q;
2714
2715 INIT_LIST_HEAD(&timedout_q);
2716
2717 qe = bfa_q_next(qh);
2718
2719 while (qe != qh) {
2720 qe_next = bfa_q_next(qe);
2721
2722 elem = (struct bfa_timer_s *) qe;
2723 if (elem->timeout <= BFA_TIMER_FREQ) {
2724 elem->timeout = 0;
2725 list_del(&elem->qe);
2726 list_add_tail(&elem->qe, &timedout_q);
2727 } else {
2728 elem->timeout -= BFA_TIMER_FREQ;
2729 }
2730
2731 qe = qe_next;
2732 }
2733
2734
2735
2736
2737 while (!list_empty(&timedout_q)) {
2738 bfa_q_deq(&timedout_q, &elem);
2739 elem->timercb(elem->arg);
2740 }
2741}
2742
2743
2744
2745
2746void
2747bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2748 void (*timercb) (void *), void *arg, unsigned int timeout)
2749{
2750
2751 WARN_ON(timercb == NULL);
2752 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
2753
2754 timer->timeout = timeout;
2755 timer->timercb = timercb;
2756 timer->arg = arg;
2757
2758 list_add_tail(&timer->qe, &mod->timer_q);
2759}
2760
2761
2762
2763
2764void
2765bfa_timer_stop(struct bfa_timer_s *timer)
2766{
2767 WARN_ON(list_empty(&timer->qe));
2768
2769 list_del(&timer->qe);
2770}
2771