linux/drivers/scsi/bfa/bfa_ioc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   3 * Copyright (c) 2014- QLogic Corporation.
   4 * All rights reserved
   5 * www.qlogic.com
   6 *
   7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
   8 *
   9 * This program is free software; you can redistribute it and/or modify it
  10 * under the terms of the GNU General Public License (GPL) Version 2 as
  11 * published by the Free Software Foundation
  12 *
  13 * This program is distributed in the hope that it will be useful, but
  14 * WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 */
  18
  19#include "bfad_drv.h"
  20#include "bfad_im.h"
  21#include "bfa_ioc.h"
  22#include "bfi_reg.h"
  23#include "bfa_defs.h"
  24#include "bfa_defs_svc.h"
  25#include "bfi.h"
  26
  27BFA_TRC_FILE(CNA, IOC);
  28
  29/*
  30 * IOC local definitions
  31 */
  32#define BFA_IOC_TOV             3000    /* msecs */
  33#define BFA_IOC_HWSEM_TOV       500     /* msecs */
  34#define BFA_IOC_HB_TOV          500     /* msecs */
  35#define BFA_IOC_TOV_RECOVER      BFA_IOC_HB_TOV
  36#define BFA_IOC_POLL_TOV        BFA_TIMER_FREQ
  37
  38#define bfa_ioc_timer_start(__ioc)                                      \
  39        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
  40                        bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  41#define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
  42
  43#define bfa_hb_timer_start(__ioc)                                       \
  44        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,         \
  45                        bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  46#define bfa_hb_timer_stop(__ioc)        bfa_timer_stop(&(__ioc)->hb_timer)
  47
  48#define BFA_DBG_FWTRC_OFF(_fn)  (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  49
  50#define bfa_ioc_state_disabled(__sm)            \
  51        (((__sm) == BFI_IOC_UNINIT) ||          \
  52        ((__sm) == BFI_IOC_INITING) ||          \
  53        ((__sm) == BFI_IOC_HWINIT) ||           \
  54        ((__sm) == BFI_IOC_DISABLED) ||         \
  55        ((__sm) == BFI_IOC_FAIL) ||             \
  56        ((__sm) == BFI_IOC_CFG_DISABLED))
  57
  58/*
  59 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  60 */
  61
  62#define bfa_ioc_firmware_lock(__ioc)                    \
  63                        ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  64#define bfa_ioc_firmware_unlock(__ioc)                  \
  65                        ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  66#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  67#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  68#define bfa_ioc_notify_fail(__ioc)              \
  69                        ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
  70#define bfa_ioc_sync_start(__ioc)               \
  71                        ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  72#define bfa_ioc_sync_join(__ioc)                \
  73                        ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  74#define bfa_ioc_sync_leave(__ioc)               \
  75                        ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
  76#define bfa_ioc_sync_ack(__ioc)                 \
  77                        ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
  78#define bfa_ioc_sync_complete(__ioc)            \
  79                        ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
  80#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)           \
  81                        ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
  82#define bfa_ioc_get_cur_ioc_fwstate(__ioc)              \
  83                        ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
  84#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)           \
  85                ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
  86#define bfa_ioc_get_alt_ioc_fwstate(__ioc)              \
  87                        ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
  88
  89#define bfa_ioc_mbox_cmd_pending(__ioc)         \
  90                        (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  91                        readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  92
  93bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  94
  95/*
  96 * forward declarations
  97 */
  98static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  99static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
 100static void bfa_ioc_timeout(void *ioc);
 101static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
 102static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
 103static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
 104static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
 105static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
 106static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
 107static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
 108static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
 109static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
 110                                enum bfa_ioc_event_e event);
 111static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
 112static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
 113static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
 114static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 115static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
 116                                struct bfi_ioc_image_hdr_s *base_fwhdr,
 117                                struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
 118static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
 119                                struct bfa_ioc_s *ioc,
 120                                struct bfi_ioc_image_hdr_s *base_fwhdr);
 121
 122/*
 123 * IOC state machine definitions/declarations
 124 */
 125enum ioc_event {
 126        IOC_E_RESET             = 1,    /*  IOC reset request           */
 127        IOC_E_ENABLE            = 2,    /*  IOC enable request          */
 128        IOC_E_DISABLE           = 3,    /*  IOC disable request */
 129        IOC_E_DETACH            = 4,    /*  driver detach cleanup       */
 130        IOC_E_ENABLED           = 5,    /*  f/w enabled         */
 131        IOC_E_FWRSP_GETATTR     = 6,    /*  IOC get attribute response  */
 132        IOC_E_DISABLED          = 7,    /*  f/w disabled                */
 133        IOC_E_PFFAILED          = 8,    /*  failure notice by iocpf sm  */
 134        IOC_E_HBFAIL            = 9,    /*  heartbeat failure           */
 135        IOC_E_HWERROR           = 10,   /*  hardware error interrupt    */
 136        IOC_E_TIMEOUT           = 11,   /*  timeout                     */
 137        IOC_E_HWFAILED          = 12,   /*  PCI mapping failure notice  */
 138};
 139
 140bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
 141bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
 142bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
 143bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
 144bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
 145bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
 146bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
 147bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 148bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
 149bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
 150
 151static struct bfa_sm_table_s ioc_sm_table[] = {
 152        {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
 153        {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
 154        {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
 155        {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
 156        {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
 157        {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
 158        {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
 159        {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
 160        {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 161        {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
 162};
 163
 164/*
 165 * IOCPF state machine definitions/declarations
 166 */
 167
 168#define bfa_iocpf_timer_start(__ioc)                                    \
 169        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
 170                        bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
 171#define bfa_iocpf_timer_stop(__ioc)     bfa_timer_stop(&(__ioc)->ioc_timer)
 172
 173#define bfa_iocpf_poll_timer_start(__ioc)                               \
 174        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
 175                        bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
 176
 177#define bfa_sem_timer_start(__ioc)                                      \
 178        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,        \
 179                        bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
 180#define bfa_sem_timer_stop(__ioc)       bfa_timer_stop(&(__ioc)->sem_timer)
 181
 182/*
 183 * Forward declareations for iocpf state machine
 184 */
 185static void bfa_iocpf_timeout(void *ioc_arg);
 186static void bfa_iocpf_sem_timeout(void *ioc_arg);
 187static void bfa_iocpf_poll_timeout(void *ioc_arg);
 188
 189/*
 190 * IOCPF state machine events
 191 */
 192enum iocpf_event {
 193        IOCPF_E_ENABLE          = 1,    /*  IOCPF enable request        */
 194        IOCPF_E_DISABLE         = 2,    /*  IOCPF disable request       */
 195        IOCPF_E_STOP            = 3,    /*  stop on driver detach       */
 196        IOCPF_E_FWREADY         = 4,    /*  f/w initialization done     */
 197        IOCPF_E_FWRSP_ENABLE    = 5,    /*  enable f/w response */
 198        IOCPF_E_FWRSP_DISABLE   = 6,    /*  disable f/w response        */
 199        IOCPF_E_FAIL            = 7,    /*  failure notice by ioc sm    */
 200        IOCPF_E_INITFAIL        = 8,    /*  init fail notice by ioc sm  */
 201        IOCPF_E_GETATTRFAIL     = 9,    /*  init fail notice by ioc sm  */
 202        IOCPF_E_SEMLOCKED       = 10,   /*  h/w semaphore is locked     */
 203        IOCPF_E_TIMEOUT         = 11,   /*  f/w response timeout        */
 204        IOCPF_E_SEM_ERROR       = 12,   /*  h/w sem mapping error       */
 205};
 206
 207/*
 208 * IOCPF states
 209 */
 210enum bfa_iocpf_state {
 211        BFA_IOCPF_RESET         = 1,    /*  IOC is in reset state */
 212        BFA_IOCPF_SEMWAIT       = 2,    /*  Waiting for IOC h/w semaphore */
 213        BFA_IOCPF_HWINIT        = 3,    /*  IOC h/w is being initialized */
 214        BFA_IOCPF_READY         = 4,    /*  IOCPF is initialized */
 215        BFA_IOCPF_INITFAIL      = 5,    /*  IOCPF failed */
 216        BFA_IOCPF_FAIL          = 6,    /*  IOCPF failed */
 217        BFA_IOCPF_DISABLING     = 7,    /*  IOCPF is being disabled */
 218        BFA_IOCPF_DISABLED      = 8,    /*  IOCPF is disabled */
 219        BFA_IOCPF_FWMISMATCH    = 9,    /*  IOC f/w different from drivers */
 220};
 221
 222bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
 223bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
 224bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
 225bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
 226bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
 227bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
 228bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
 229bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
 230                                                enum iocpf_event);
 231bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
 232bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
 233bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
 234bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
 235bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
 236                                                enum iocpf_event);
 237bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
 238
 239static struct bfa_sm_table_s iocpf_sm_table[] = {
 240        {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
 241        {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
 242        {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
 243        {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
 244        {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
 245        {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
 246        {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
 247        {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
 248        {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
 249        {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
 250        {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
 251        {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
 252        {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
 253        {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
 254};
 255
 256/*
 257 * IOC State Machine
 258 */
 259
 260/*
 261 * Beginning state. IOC uninit state.
 262 */
 263
 264static void
 265bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
 266{
 267}
 268
 269/*
 270 * IOC is in uninit state.
 271 */
 272static void
 273bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
 274{
 275        bfa_trc(ioc, event);
 276
 277        switch (event) {
 278        case IOC_E_RESET:
 279                bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
 280                break;
 281
 282        default:
 283                bfa_sm_fault(ioc, event);
 284        }
 285}
 286/*
 287 * Reset entry actions -- initialize state machine
 288 */
 289static void
 290bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
 291{
 292        bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
 293}
 294
 295/*
 296 * IOC is in reset state.
 297 */
 298static void
 299bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
 300{
 301        bfa_trc(ioc, event);
 302
 303        switch (event) {
 304        case IOC_E_ENABLE:
 305                bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 306                break;
 307
 308        case IOC_E_DISABLE:
 309                bfa_ioc_disable_comp(ioc);
 310                break;
 311
 312        case IOC_E_DETACH:
 313                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 314                break;
 315
 316        default:
 317                bfa_sm_fault(ioc, event);
 318        }
 319}
 320
 321
 322static void
 323bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
 324{
 325        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
 326}
 327
 328/*
 329 * Host IOC function is being enabled, awaiting response from firmware.
 330 * Semaphore is acquired.
 331 */
 332static void
 333bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 334{
 335        bfa_trc(ioc, event);
 336
 337        switch (event) {
 338        case IOC_E_ENABLED:
 339                bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 340                break;
 341
 342        case IOC_E_PFFAILED:
 343                /* !!! fall through !!! */
 344        case IOC_E_HWERROR:
 345                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 346                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 347                if (event != IOC_E_PFFAILED)
 348                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
 349                break;
 350
 351        case IOC_E_HWFAILED:
 352                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 353                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 354                break;
 355
 356        case IOC_E_DISABLE:
 357                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 358                break;
 359
 360        case IOC_E_DETACH:
 361                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 362                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 363                break;
 364
 365        case IOC_E_ENABLE:
 366                break;
 367
 368        default:
 369                bfa_sm_fault(ioc, event);
 370        }
 371}
 372
 373
 374static void
 375bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
 376{
 377        bfa_ioc_timer_start(ioc);
 378        bfa_ioc_send_getattr(ioc);
 379}
 380
 381/*
 382 * IOC configuration in progress. Timer is active.
 383 */
 384static void
 385bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
 386{
 387        bfa_trc(ioc, event);
 388
 389        switch (event) {
 390        case IOC_E_FWRSP_GETATTR:
 391                bfa_ioc_timer_stop(ioc);
 392                bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 393                break;
 394
 395        case IOC_E_PFFAILED:
 396        case IOC_E_HWERROR:
 397                bfa_ioc_timer_stop(ioc);
 398                /* !!! fall through !!! */
 399        case IOC_E_TIMEOUT:
 400                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 401                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 402                if (event != IOC_E_PFFAILED)
 403                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
 404                break;
 405
 406        case IOC_E_DISABLE:
 407                bfa_ioc_timer_stop(ioc);
 408                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 409                break;
 410
 411        case IOC_E_ENABLE:
 412                break;
 413
 414        default:
 415                bfa_sm_fault(ioc, event);
 416        }
 417}
 418
 419static void
 420bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
 421{
 422        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 423
 424        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 425        bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
 426        bfa_ioc_hb_monitor(ioc);
 427        BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
 428        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
 429}
 430
 431static void
 432bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
 433{
 434        bfa_trc(ioc, event);
 435
 436        switch (event) {
 437        case IOC_E_ENABLE:
 438                break;
 439
 440        case IOC_E_DISABLE:
 441                bfa_hb_timer_stop(ioc);
 442                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 443                break;
 444
 445        case IOC_E_PFFAILED:
 446        case IOC_E_HWERROR:
 447                bfa_hb_timer_stop(ioc);
 448                /* !!! fall through !!! */
 449        case IOC_E_HBFAIL:
 450                if (ioc->iocpf.auto_recover)
 451                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
 452                else
 453                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 454
 455                bfa_ioc_fail_notify(ioc);
 456
 457                if (event != IOC_E_PFFAILED)
 458                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 459                break;
 460
 461        default:
 462                bfa_sm_fault(ioc, event);
 463        }
 464}
 465
 466
 467static void
 468bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
 469{
 470        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 471        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
 472        BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
 473        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
 474}
 475
 476/*
 477 * IOC is being disabled
 478 */
 479static void
 480bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 481{
 482        bfa_trc(ioc, event);
 483
 484        switch (event) {
 485        case IOC_E_DISABLED:
 486                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 487                break;
 488
 489        case IOC_E_HWERROR:
 490                /*
 491                 * No state change.  Will move to disabled state
 492                 * after iocpf sm completes failure processing and
 493                 * moves to disabled state.
 494                 */
 495                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 496                break;
 497
 498        case IOC_E_HWFAILED:
 499                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 500                bfa_ioc_disable_comp(ioc);
 501                break;
 502
 503        default:
 504                bfa_sm_fault(ioc, event);
 505        }
 506}
 507
 508/*
 509 * IOC disable completion entry.
 510 */
 511static void
 512bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
 513{
 514        bfa_ioc_disable_comp(ioc);
 515}
 516
 517static void
 518bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
 519{
 520        bfa_trc(ioc, event);
 521
 522        switch (event) {
 523        case IOC_E_ENABLE:
 524                bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 525                break;
 526
 527        case IOC_E_DISABLE:
 528                ioc->cbfn->disable_cbfn(ioc->bfa);
 529                break;
 530
 531        case IOC_E_DETACH:
 532                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 533                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 534                break;
 535
 536        default:
 537                bfa_sm_fault(ioc, event);
 538        }
 539}
 540
 541
 542static void
 543bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
 544{
 545        bfa_trc(ioc, 0);
 546}
 547
 548/*
 549 * Hardware initialization retry.
 550 */
 551static void
 552bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
 553{
 554        bfa_trc(ioc, event);
 555
 556        switch (event) {
 557        case IOC_E_ENABLED:
 558                bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 559                break;
 560
 561        case IOC_E_PFFAILED:
 562        case IOC_E_HWERROR:
 563                /*
 564                 * Initialization retry failed.
 565                 */
 566                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 567                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 568                if (event != IOC_E_PFFAILED)
 569                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
 570                break;
 571
 572        case IOC_E_HWFAILED:
 573                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 574                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 575                break;
 576
 577        case IOC_E_ENABLE:
 578                break;
 579
 580        case IOC_E_DISABLE:
 581                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 582                break;
 583
 584        case IOC_E_DETACH:
 585                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 586                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 587                break;
 588
 589        default:
 590                bfa_sm_fault(ioc, event);
 591        }
 592}
 593
 594
 595static void
 596bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
 597{
 598        bfa_trc(ioc, 0);
 599}
 600
 601/*
 602 * IOC failure.
 603 */
 604static void
 605bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
 606{
 607        bfa_trc(ioc, event);
 608
 609        switch (event) {
 610
 611        case IOC_E_ENABLE:
 612                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 613                break;
 614
 615        case IOC_E_DISABLE:
 616                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 617                break;
 618
 619        case IOC_E_DETACH:
 620                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 621                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 622                break;
 623
 624        case IOC_E_HWERROR:
 625        case IOC_E_HWFAILED:
 626                /*
 627                 * HB failure / HW error notification, ignore.
 628                 */
 629                break;
 630        default:
 631                bfa_sm_fault(ioc, event);
 632        }
 633}
 634
 635static void
 636bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
 637{
 638        bfa_trc(ioc, 0);
 639}
 640
 641static void
 642bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
 643{
 644        bfa_trc(ioc, event);
 645
 646        switch (event) {
 647        case IOC_E_ENABLE:
 648                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 649                break;
 650
 651        case IOC_E_DISABLE:
 652                ioc->cbfn->disable_cbfn(ioc->bfa);
 653                break;
 654
 655        case IOC_E_DETACH:
 656                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 657                break;
 658
 659        case IOC_E_HWERROR:
 660                /* Ignore - already in hwfail state */
 661                break;
 662
 663        default:
 664                bfa_sm_fault(ioc, event);
 665        }
 666}
 667
 668/*
 669 * IOCPF State Machine
 670 */
 671
 672/*
 673 * Reset entry actions -- initialize state machine
 674 */
 675static void
 676bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
 677{
 678        iocpf->fw_mismatch_notified = BFA_FALSE;
 679        iocpf->auto_recover = bfa_auto_recover;
 680}
 681
 682/*
 683 * Beginning state. IOC is in reset state.
 684 */
 685static void
 686bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 687{
 688        struct bfa_ioc_s *ioc = iocpf->ioc;
 689
 690        bfa_trc(ioc, event);
 691
 692        switch (event) {
 693        case IOCPF_E_ENABLE:
 694                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 695                break;
 696
 697        case IOCPF_E_STOP:
 698                break;
 699
 700        default:
 701                bfa_sm_fault(ioc, event);
 702        }
 703}
 704
 705/*
 706 * Semaphore should be acquired for version check.
 707 */
 708static void
 709bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 710{
 711        struct bfi_ioc_image_hdr_s      fwhdr;
 712        u32     r32, fwstate, pgnum, pgoff, loff = 0;
 713        int     i;
 714
 715        /*
 716         * Spin on init semaphore to serialize.
 717         */
 718        r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 719        while (r32 & 0x1) {
 720                udelay(20);
 721                r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 722        }
 723
 724        /* h/w sem init */
 725        fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
 726        if (fwstate == BFI_IOC_UNINIT) {
 727                writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 728                goto sem_get;
 729        }
 730
 731        bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
 732
 733        if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
 734                writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 735                goto sem_get;
 736        }
 737
 738        /*
 739         * Clear fwver hdr
 740         */
 741        pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
 742        pgoff = PSS_SMEM_PGOFF(loff);
 743        writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
 744
 745        for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
 746                bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
 747                loff += sizeof(u32);
 748        }
 749
 750        bfa_trc(iocpf->ioc, fwstate);
 751        bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
 752        bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
 753        bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
 754
 755        /*
 756         * Unlock the hw semaphore. Should be here only once per boot.
 757         */
 758        bfa_ioc_ownership_reset(iocpf->ioc);
 759
 760        /*
 761         * unlock init semaphore.
 762         */
 763        writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 764
 765sem_get:
 766        bfa_ioc_hw_sem_get(iocpf->ioc);
 767}
 768
 769/*
 770 * Awaiting h/w semaphore to continue with version check.
 771 */
 772static void
 773bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 774{
 775        struct bfa_ioc_s *ioc = iocpf->ioc;
 776
 777        bfa_trc(ioc, event);
 778
 779        switch (event) {
 780        case IOCPF_E_SEMLOCKED:
 781                if (bfa_ioc_firmware_lock(ioc)) {
 782                        if (bfa_ioc_sync_start(ioc)) {
 783                                bfa_ioc_sync_join(ioc);
 784                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 785                        } else {
 786                                bfa_ioc_firmware_unlock(ioc);
 787                                writel(1, ioc->ioc_regs.ioc_sem_reg);
 788                                bfa_sem_timer_start(ioc);
 789                        }
 790                } else {
 791                        writel(1, ioc->ioc_regs.ioc_sem_reg);
 792                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
 793                }
 794                break;
 795
 796        case IOCPF_E_SEM_ERROR:
 797                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 798                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
 799                break;
 800
 801        case IOCPF_E_DISABLE:
 802                bfa_sem_timer_stop(ioc);
 803                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 804                bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 805                break;
 806
 807        case IOCPF_E_STOP:
 808                bfa_sem_timer_stop(ioc);
 809                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 810                break;
 811
 812        default:
 813                bfa_sm_fault(ioc, event);
 814        }
 815}
 816
 817/*
 818 * Notify enable completion callback.
 819 */
 820static void
 821bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
 822{
 823        /*
 824         * Call only the first time sm enters fwmismatch state.
 825         */
 826        if (iocpf->fw_mismatch_notified == BFA_FALSE)
 827                bfa_ioc_pf_fwmismatch(iocpf->ioc);
 828
 829        iocpf->fw_mismatch_notified = BFA_TRUE;
 830        bfa_iocpf_timer_start(iocpf->ioc);
 831}
 832
 833/*
 834 * Awaiting firmware version match.
 835 */
 836static void
 837bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 838{
 839        struct bfa_ioc_s *ioc = iocpf->ioc;
 840
 841        bfa_trc(ioc, event);
 842
 843        switch (event) {
 844        case IOCPF_E_TIMEOUT:
 845                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 846                break;
 847
 848        case IOCPF_E_DISABLE:
 849                bfa_iocpf_timer_stop(ioc);
 850                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 851                bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 852                break;
 853
 854        case IOCPF_E_STOP:
 855                bfa_iocpf_timer_stop(ioc);
 856                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 857                break;
 858
 859        default:
 860                bfa_sm_fault(ioc, event);
 861        }
 862}
 863
 864/*
 865 * Request for semaphore.
 866 */
 867static void
 868bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
 869{
 870        bfa_ioc_hw_sem_get(iocpf->ioc);
 871}
 872
 873/*
 874 * Awaiting semaphore for h/w initialzation.
 875 */
 876static void
 877bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 878{
 879        struct bfa_ioc_s *ioc = iocpf->ioc;
 880
 881        bfa_trc(ioc, event);
 882
 883        switch (event) {
 884        case IOCPF_E_SEMLOCKED:
 885                if (bfa_ioc_sync_complete(ioc)) {
 886                        bfa_ioc_sync_join(ioc);
 887                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 888                } else {
 889                        writel(1, ioc->ioc_regs.ioc_sem_reg);
 890                        bfa_sem_timer_start(ioc);
 891                }
 892                break;
 893
 894        case IOCPF_E_SEM_ERROR:
 895                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 896                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
 897                break;
 898
 899        case IOCPF_E_DISABLE:
 900                bfa_sem_timer_stop(ioc);
 901                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 902                break;
 903
 904        default:
 905                bfa_sm_fault(ioc, event);
 906        }
 907}
 908
 909static void
 910bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
 911{
 912        iocpf->poll_time = 0;
 913        bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
 914}
 915
 916/*
 917 * Hardware is being initialized. Interrupts are enabled.
 918 * Holding hardware semaphore lock.
 919 */
 920static void
 921bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 922{
 923        struct bfa_ioc_s *ioc = iocpf->ioc;
 924
 925        bfa_trc(ioc, event);
 926
 927        switch (event) {
 928        case IOCPF_E_FWREADY:
 929                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
 930                break;
 931
 932        case IOCPF_E_TIMEOUT:
 933                writel(1, ioc->ioc_regs.ioc_sem_reg);
 934                bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
 935                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 936                break;
 937
 938        case IOCPF_E_DISABLE:
 939                bfa_iocpf_timer_stop(ioc);
 940                bfa_ioc_sync_leave(ioc);
 941                writel(1, ioc->ioc_regs.ioc_sem_reg);
 942                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 943                break;
 944
 945        default:
 946                bfa_sm_fault(ioc, event);
 947        }
 948}
 949
 950static void
 951bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
 952{
 953        bfa_iocpf_timer_start(iocpf->ioc);
 954        /*
 955         * Enable Interrupts before sending fw IOC ENABLE cmd.
 956         */
 957        iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
 958        bfa_ioc_send_enable(iocpf->ioc);
 959}
 960
 961/*
 962 * Host IOC function is being enabled, awaiting response from firmware.
 963 * Semaphore is acquired.
 964 */
 965static void
 966bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 967{
 968        struct bfa_ioc_s *ioc = iocpf->ioc;
 969
 970        bfa_trc(ioc, event);
 971
 972        switch (event) {
 973        case IOCPF_E_FWRSP_ENABLE:
 974                bfa_iocpf_timer_stop(ioc);
 975                writel(1, ioc->ioc_regs.ioc_sem_reg);
 976                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
 977                break;
 978
 979        case IOCPF_E_INITFAIL:
 980                bfa_iocpf_timer_stop(ioc);
 981                /*
 982                 * !!! fall through !!!
 983                 */
 984
 985        case IOCPF_E_TIMEOUT:
 986                writel(1, ioc->ioc_regs.ioc_sem_reg);
 987                if (event == IOCPF_E_TIMEOUT)
 988                        bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
 989                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 990                break;
 991
 992        case IOCPF_E_DISABLE:
 993                bfa_iocpf_timer_stop(ioc);
 994                writel(1, ioc->ioc_regs.ioc_sem_reg);
 995                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
 996                break;
 997
 998        default:
 999                bfa_sm_fault(ioc, event);
1000        }
1001}
1002
1003static void
1004bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
1005{
1006        bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
1007}
1008
1009static void
1010bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1011{
1012        struct bfa_ioc_s *ioc = iocpf->ioc;
1013
1014        bfa_trc(ioc, event);
1015
1016        switch (event) {
1017        case IOCPF_E_DISABLE:
1018                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1019                break;
1020
1021        case IOCPF_E_GETATTRFAIL:
1022                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1023                break;
1024
1025        case IOCPF_E_FAIL:
1026                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1027                break;
1028
1029        default:
1030                bfa_sm_fault(ioc, event);
1031        }
1032}
1033
1034static void
1035bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1036{
1037        bfa_iocpf_timer_start(iocpf->ioc);
1038        bfa_ioc_send_disable(iocpf->ioc);
1039}
1040
1041/*
1042 * IOC is being disabled
1043 */
1044static void
1045bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1046{
1047        struct bfa_ioc_s *ioc = iocpf->ioc;
1048
1049        bfa_trc(ioc, event);
1050
1051        switch (event) {
1052        case IOCPF_E_FWRSP_DISABLE:
1053                bfa_iocpf_timer_stop(ioc);
1054                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1055                break;
1056
1057        case IOCPF_E_FAIL:
1058                bfa_iocpf_timer_stop(ioc);
1059                /*
1060                 * !!! fall through !!!
1061                 */
1062
1063        case IOCPF_E_TIMEOUT:
1064                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1065                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1066                break;
1067
1068        case IOCPF_E_FWRSP_ENABLE:
1069                break;
1070
1071        default:
1072                bfa_sm_fault(ioc, event);
1073        }
1074}
1075
1076static void
1077bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1078{
1079        bfa_ioc_hw_sem_get(iocpf->ioc);
1080}
1081
1082/*
1083 * IOC hb ack request is being removed.
1084 */
1085static void
1086bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1087{
1088        struct bfa_ioc_s *ioc = iocpf->ioc;
1089
1090        bfa_trc(ioc, event);
1091
1092        switch (event) {
1093        case IOCPF_E_SEMLOCKED:
1094                bfa_ioc_sync_leave(ioc);
1095                writel(1, ioc->ioc_regs.ioc_sem_reg);
1096                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1097                break;
1098
1099        case IOCPF_E_SEM_ERROR:
1100                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1101                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1102                break;
1103
1104        case IOCPF_E_FAIL:
1105                break;
1106
1107        default:
1108                bfa_sm_fault(ioc, event);
1109        }
1110}
1111
1112/*
1113 * IOC disable completion entry.
1114 */
1115static void
1116bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1117{
1118        bfa_ioc_mbox_flush(iocpf->ioc);
1119        bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1120}
1121
1122static void
1123bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1124{
1125        struct bfa_ioc_s *ioc = iocpf->ioc;
1126
1127        bfa_trc(ioc, event);
1128
1129        switch (event) {
1130        case IOCPF_E_ENABLE:
1131                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1132                break;
1133
1134        case IOCPF_E_STOP:
1135                bfa_ioc_firmware_unlock(ioc);
1136                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1137                break;
1138
1139        default:
1140                bfa_sm_fault(ioc, event);
1141        }
1142}
1143
1144static void
1145bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1146{
1147        bfa_ioc_debug_save_ftrc(iocpf->ioc);
1148        bfa_ioc_hw_sem_get(iocpf->ioc);
1149}
1150
1151/*
1152 * Hardware initialization failed.
1153 */
1154static void
1155bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1156{
1157        struct bfa_ioc_s *ioc = iocpf->ioc;
1158
1159        bfa_trc(ioc, event);
1160
1161        switch (event) {
1162        case IOCPF_E_SEMLOCKED:
1163                bfa_ioc_notify_fail(ioc);
1164                bfa_ioc_sync_leave(ioc);
1165                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1166                writel(1, ioc->ioc_regs.ioc_sem_reg);
1167                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1168                break;
1169
1170        case IOCPF_E_SEM_ERROR:
1171                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1172                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1173                break;
1174
1175        case IOCPF_E_DISABLE:
1176                bfa_sem_timer_stop(ioc);
1177                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1178                break;
1179
1180        case IOCPF_E_STOP:
1181                bfa_sem_timer_stop(ioc);
1182                bfa_ioc_firmware_unlock(ioc);
1183                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1184                break;
1185
1186        case IOCPF_E_FAIL:
1187                break;
1188
1189        default:
1190                bfa_sm_fault(ioc, event);
1191        }
1192}
1193
1194static void
1195bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1196{
1197        bfa_trc(iocpf->ioc, 0);
1198}
1199
1200/*
1201 * Hardware initialization failed.
1202 */
1203static void
1204bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1205{
1206        struct bfa_ioc_s *ioc = iocpf->ioc;
1207
1208        bfa_trc(ioc, event);
1209
1210        switch (event) {
1211        case IOCPF_E_DISABLE:
1212                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1213                break;
1214
1215        case IOCPF_E_STOP:
1216                bfa_ioc_firmware_unlock(ioc);
1217                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1218                break;
1219
1220        default:
1221                bfa_sm_fault(ioc, event);
1222        }
1223}
1224
1225static void
1226bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1227{
1228        /*
1229         * Mark IOC as failed in hardware and stop firmware.
1230         */
1231        bfa_ioc_lpu_stop(iocpf->ioc);
1232
1233        /*
1234         * Flush any queued up mailbox requests.
1235         */
1236        bfa_ioc_mbox_flush(iocpf->ioc);
1237
1238        bfa_ioc_hw_sem_get(iocpf->ioc);
1239}
1240
1241static void
1242bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1243{
1244        struct bfa_ioc_s *ioc = iocpf->ioc;
1245
1246        bfa_trc(ioc, event);
1247
1248        switch (event) {
1249        case IOCPF_E_SEMLOCKED:
1250                bfa_ioc_sync_ack(ioc);
1251                bfa_ioc_notify_fail(ioc);
1252                if (!iocpf->auto_recover) {
1253                        bfa_ioc_sync_leave(ioc);
1254                        bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1255                        writel(1, ioc->ioc_regs.ioc_sem_reg);
1256                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1257                } else {
1258                        if (bfa_ioc_sync_complete(ioc))
1259                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1260                        else {
1261                                writel(1, ioc->ioc_regs.ioc_sem_reg);
1262                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1263                        }
1264                }
1265                break;
1266
1267        case IOCPF_E_SEM_ERROR:
1268                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1269                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1270                break;
1271
1272        case IOCPF_E_DISABLE:
1273                bfa_sem_timer_stop(ioc);
1274                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1275                break;
1276
1277        case IOCPF_E_FAIL:
1278                break;
1279
1280        default:
1281                bfa_sm_fault(ioc, event);
1282        }
1283}
1284
1285static void
1286bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1287{
1288        bfa_trc(iocpf->ioc, 0);
1289}
1290
1291/*
1292 * IOC is in failed state.
1293 */
1294static void
1295bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1296{
1297        struct bfa_ioc_s *ioc = iocpf->ioc;
1298
1299        bfa_trc(ioc, event);
1300
1301        switch (event) {
1302        case IOCPF_E_DISABLE:
1303                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1304                break;
1305
1306        default:
1307                bfa_sm_fault(ioc, event);
1308        }
1309}
1310
1311/*
1312 *  BFA IOC private functions
1313 */
1314
1315/*
1316 * Notify common modules registered for notification.
1317 */
1318static void
1319bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1320{
1321        struct bfa_ioc_notify_s *notify;
1322        struct list_head        *qe;
1323
1324        list_for_each(qe, &ioc->notify_q) {
1325                notify = (struct bfa_ioc_notify_s *)qe;
1326                notify->cbfn(notify->cbarg, event);
1327        }
1328}
1329
1330static void
1331bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1332{
1333        ioc->cbfn->disable_cbfn(ioc->bfa);
1334        bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1335}
1336
1337bfa_boolean_t
1338bfa_ioc_sem_get(void __iomem *sem_reg)
1339{
1340        u32 r32;
1341        int cnt = 0;
1342#define BFA_SEM_SPINCNT 3000
1343
1344        r32 = readl(sem_reg);
1345
1346        while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1347                cnt++;
1348                udelay(2);
1349                r32 = readl(sem_reg);
1350        }
1351
1352        if (!(r32 & 1))
1353                return BFA_TRUE;
1354
1355        return BFA_FALSE;
1356}
1357
1358static void
1359bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1360{
1361        u32     r32;
1362
1363        /*
1364         * First read to the semaphore register will return 0, subsequent reads
1365         * will return 1. Semaphore is released by writing 1 to the register
1366         */
1367        r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1368        if (r32 == ~0) {
1369                WARN_ON(r32 == ~0);
1370                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1371                return;
1372        }
1373        if (!(r32 & 1)) {
1374                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1375                return;
1376        }
1377
1378        bfa_sem_timer_start(ioc);
1379}
1380
1381/*
1382 * Initialize LPU local memory (aka secondary memory / SRAM)
1383 */
1384static void
1385bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1386{
1387        u32     pss_ctl;
1388        int             i;
1389#define PSS_LMEM_INIT_TIME  10000
1390
1391        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1392        pss_ctl &= ~__PSS_LMEM_RESET;
1393        pss_ctl |= __PSS_LMEM_INIT_EN;
1394
1395        /*
1396         * i2c workaround 12.5khz clock
1397         */
1398        pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1399        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1400
1401        /*
1402         * wait for memory initialization to be complete
1403         */
1404        i = 0;
1405        do {
1406                pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1407                i++;
1408        } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1409
1410        /*
1411         * If memory initialization is not successful, IOC timeout will catch
1412         * such failures.
1413         */
1414        WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1415        bfa_trc(ioc, pss_ctl);
1416
1417        pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1418        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1419}
1420
1421static void
1422bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1423{
1424        u32     pss_ctl;
1425
1426        /*
1427         * Take processor out of reset.
1428         */
1429        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1430        pss_ctl &= ~__PSS_LPU0_RESET;
1431
1432        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1433}
1434
1435static void
1436bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1437{
1438        u32     pss_ctl;
1439
1440        /*
1441         * Put processors in reset.
1442         */
1443        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1444        pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1445
1446        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1447}
1448
1449/*
1450 * Get driver and firmware versions.
1451 */
1452void
1453bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1454{
1455        u32     pgnum, pgoff;
1456        u32     loff = 0;
1457        int             i;
1458        u32     *fwsig = (u32 *) fwhdr;
1459
1460        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1461        pgoff = PSS_SMEM_PGOFF(loff);
1462        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1463
1464        for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1465             i++) {
1466                fwsig[i] =
1467                        bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1468                loff += sizeof(u32);
1469        }
1470}
1471
1472/*
1473 * Returns TRUE if driver is willing to work with current smem f/w version.
1474 */
1475bfa_boolean_t
1476bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
1477                struct bfi_ioc_image_hdr_s *smem_fwhdr)
1478{
1479        struct bfi_ioc_image_hdr_s *drv_fwhdr;
1480        enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
1481
1482        drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1483                bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1484
1485        /*
1486         * If smem is incompatible or old, driver should not work with it.
1487         */
1488        drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
1489        if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1490                drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1491                return BFA_FALSE;
1492        }
1493
1494        /*
1495         * IF Flash has a better F/W than smem do not work with smem.
1496         * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1497         * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1498         */
1499        smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
1500
1501        if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
1502                return BFA_FALSE;
1503        } else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
1504                return BFA_TRUE;
1505        } else {
1506                return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1507                        BFA_TRUE : BFA_FALSE;
1508        }
1509}
1510
1511/*
1512 * Return true if current running version is valid. Firmware signature and
1513 * execution context (driver/bios) must match.
1514 */
1515static bfa_boolean_t
1516bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1517{
1518        struct bfi_ioc_image_hdr_s fwhdr;
1519
1520        bfa_ioc_fwver_get(ioc, &fwhdr);
1521
1522        if (swab32(fwhdr.bootenv) != boot_env) {
1523                bfa_trc(ioc, fwhdr.bootenv);
1524                bfa_trc(ioc, boot_env);
1525                return BFA_FALSE;
1526        }
1527
1528        return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1529}
1530
1531static bfa_boolean_t
1532bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
1533                                struct bfi_ioc_image_hdr_s *fwhdr_2)
1534{
1535        int i;
1536
1537        for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
1538                if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1539                        return BFA_FALSE;
1540
1541        return BFA_TRUE;
1542}
1543
1544/*
1545 * Returns TRUE if major minor and maintainence are same.
1546 * If patch versions are same, check for MD5 Checksum to be same.
1547 */
1548static bfa_boolean_t
1549bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
1550                                struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1551{
1552        if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1553                return BFA_FALSE;
1554
1555        if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1556                return BFA_FALSE;
1557
1558        if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1559                return BFA_FALSE;
1560
1561        if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1562                return BFA_FALSE;
1563
1564        if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1565                drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1566                drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
1567                return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1568        }
1569
1570        return BFA_TRUE;
1571}
1572
1573static bfa_boolean_t
1574bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
1575{
1576        if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1577                return BFA_FALSE;
1578
1579        return BFA_TRUE;
1580}
1581
1582static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
1583{
1584        if (fwhdr->fwver.phase == 0 &&
1585                fwhdr->fwver.build == 0)
1586                return BFA_TRUE;
1587
1588        return BFA_FALSE;
1589}
1590
1591/*
1592 * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
1593 */
1594static enum bfi_ioc_img_ver_cmp_e
1595bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
1596                                struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1597{
1598        if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
1599                return BFI_IOC_IMG_VER_INCOMP;
1600
1601        if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1602                return BFI_IOC_IMG_VER_BETTER;
1603
1604        else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1605                return BFI_IOC_IMG_VER_OLD;
1606
1607        /*
1608         * GA takes priority over internal builds of the same patch stream.
1609         * At this point major minor maint and patch numbers are same.
1610         */
1611
1612        if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
1613                if (fwhdr_is_ga(fwhdr_to_cmp))
1614                        return BFI_IOC_IMG_VER_SAME;
1615                else
1616                        return BFI_IOC_IMG_VER_OLD;
1617        } else {
1618                if (fwhdr_is_ga(fwhdr_to_cmp))
1619                        return BFI_IOC_IMG_VER_BETTER;
1620        }
1621
1622        if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1623                return BFI_IOC_IMG_VER_BETTER;
1624        else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1625                return BFI_IOC_IMG_VER_OLD;
1626
1627        if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1628                return BFI_IOC_IMG_VER_BETTER;
1629        else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1630                return BFI_IOC_IMG_VER_OLD;
1631
1632        /*
1633         * All Version Numbers are equal.
1634         * Md5 check to be done as a part of compatibility check.
1635         */
1636        return BFI_IOC_IMG_VER_SAME;
1637}
1638
1639#define BFA_FLASH_PART_FWIMG_ADDR       0x100000 /* fw image address */
1640
1641bfa_status_t
1642bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
1643                                u32 *fwimg)
1644{
1645        return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1646                        BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1647                        (char *)fwimg, BFI_FLASH_CHUNK_SZ);
1648}
1649
1650static enum bfi_ioc_img_ver_cmp_e
1651bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
1652                        struct bfi_ioc_image_hdr_s *base_fwhdr)
1653{
1654        struct bfi_ioc_image_hdr_s *flash_fwhdr;
1655        bfa_status_t status;
1656        u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1657
1658        status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1659        if (status != BFA_STATUS_OK)
1660                return BFI_IOC_IMG_VER_INCOMP;
1661
1662        flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
1663        if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
1664                return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1665        else
1666                return BFI_IOC_IMG_VER_INCOMP;
1667}
1668
1669
1670/*
1671 * Invalidate fwver signature
1672 */
1673bfa_status_t
1674bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
1675{
1676
1677        u32     pgnum, pgoff;
1678        u32     loff = 0;
1679        enum bfi_ioc_state ioc_fwstate;
1680
1681        ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1682        if (!bfa_ioc_state_disabled(ioc_fwstate))
1683                return BFA_STATUS_ADAPTER_ENABLED;
1684
1685        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1686        pgoff = PSS_SMEM_PGOFF(loff);
1687        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1688        bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
1689
1690        return BFA_STATUS_OK;
1691}
1692
1693/*
1694 * Conditionally flush any pending message from firmware at start.
1695 */
1696static void
1697bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1698{
1699        u32     r32;
1700
1701        r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1702        if (r32)
1703                writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1704}
1705
1706static void
1707bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1708{
1709        enum bfi_ioc_state ioc_fwstate;
1710        bfa_boolean_t fwvalid;
1711        u32 boot_type;
1712        u32 boot_env;
1713
1714        ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1715
1716        if (force)
1717                ioc_fwstate = BFI_IOC_UNINIT;
1718
1719        bfa_trc(ioc, ioc_fwstate);
1720
1721        boot_type = BFI_FWBOOT_TYPE_NORMAL;
1722        boot_env = BFI_FWBOOT_ENV_OS;
1723
1724        /*
1725         * check if firmware is valid
1726         */
1727        fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1728                BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1729
1730        if (!fwvalid) {
1731                if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1732                        bfa_ioc_poll_fwinit(ioc);
1733                return;
1734        }
1735
1736        /*
1737         * If hardware initialization is in progress (initialized by other IOC),
1738         * just wait for an initialization completion interrupt.
1739         */
1740        if (ioc_fwstate == BFI_IOC_INITING) {
1741                bfa_ioc_poll_fwinit(ioc);
1742                return;
1743        }
1744
1745        /*
1746         * If IOC function is disabled and firmware version is same,
1747         * just re-enable IOC.
1748         *
1749         * If option rom, IOC must not be in operational state. With
1750         * convergence, IOC will be in operational state when 2nd driver
1751         * is loaded.
1752         */
1753        if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1754
1755                /*
1756                 * When using MSI-X any pending firmware ready event should
1757                 * be flushed. Otherwise MSI-X interrupts are not delivered.
1758                 */
1759                bfa_ioc_msgflush(ioc);
1760                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1761                return;
1762        }
1763
1764        /*
1765         * Initialize the h/w for any other states.
1766         */
1767        if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1768                bfa_ioc_poll_fwinit(ioc);
1769}
1770
1771static void
1772bfa_ioc_timeout(void *ioc_arg)
1773{
1774        struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1775
1776        bfa_trc(ioc, 0);
1777        bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1778}
1779
1780void
1781bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1782{
1783        u32 *msgp = (u32 *) ioc_msg;
1784        u32 i;
1785
1786        bfa_trc(ioc, msgp[0]);
1787        bfa_trc(ioc, len);
1788
1789        WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1790
1791        /*
1792         * first write msg to mailbox registers
1793         */
1794        for (i = 0; i < len / sizeof(u32); i++)
1795                writel(cpu_to_le32(msgp[i]),
1796                        ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1797
1798        for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1799                writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1800
1801        /*
1802         * write 1 to mailbox CMD to trigger LPU event
1803         */
1804        writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1805        (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1806}
1807
1808static void
1809bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1810{
1811        struct bfi_ioc_ctrl_req_s enable_req;
1812
1813        bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1814                    bfa_ioc_portid(ioc));
1815        enable_req.clscode = cpu_to_be16(ioc->clscode);
1816        /* unsigned 32-bit time_t overflow in y2106 */
1817        enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1818        bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1819}
1820
1821static void
1822bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1823{
1824        struct bfi_ioc_ctrl_req_s disable_req;
1825
1826        bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1827                    bfa_ioc_portid(ioc));
1828        disable_req.clscode = cpu_to_be16(ioc->clscode);
1829        /* unsigned 32-bit time_t overflow in y2106 */
1830        disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1831        bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1832}
1833
1834static void
1835bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1836{
1837        struct bfi_ioc_getattr_req_s    attr_req;
1838
1839        bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1840                    bfa_ioc_portid(ioc));
1841        bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1842        bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1843}
1844
1845static void
1846bfa_ioc_hb_check(void *cbarg)
1847{
1848        struct bfa_ioc_s  *ioc = cbarg;
1849        u32     hb_count;
1850
1851        hb_count = readl(ioc->ioc_regs.heartbeat);
1852        if (ioc->hb_count == hb_count) {
1853                bfa_ioc_recover(ioc);
1854                return;
1855        } else {
1856                ioc->hb_count = hb_count;
1857        }
1858
1859        bfa_ioc_mbox_poll(ioc);
1860        bfa_hb_timer_start(ioc);
1861}
1862
1863static void
1864bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1865{
1866        ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1867        bfa_hb_timer_start(ioc);
1868}
1869
1870/*
1871 *      Initiate a full firmware download.
1872 */
1873static bfa_status_t
1874bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1875                    u32 boot_env)
1876{
1877        u32 *fwimg;
1878        u32 pgnum, pgoff;
1879        u32 loff = 0;
1880        u32 chunkno = 0;
1881        u32 i;
1882        u32 asicmode;
1883        u32 fwimg_size;
1884        u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
1885        bfa_status_t status;
1886
1887        if (boot_env == BFI_FWBOOT_ENV_OS &&
1888                boot_type == BFI_FWBOOT_TYPE_FLASH) {
1889                fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
1890
1891                status = bfa_ioc_flash_img_get_chnk(ioc,
1892                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
1893                if (status != BFA_STATUS_OK)
1894                        return status;
1895
1896                fwimg = fwimg_buf;
1897        } else {
1898                fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
1899                fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1900                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1901        }
1902
1903        bfa_trc(ioc, fwimg_size);
1904
1905
1906        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1907        pgoff = PSS_SMEM_PGOFF(loff);
1908
1909        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1910
1911        for (i = 0; i < fwimg_size; i++) {
1912
1913                if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1914                        chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1915
1916                        if (boot_env == BFI_FWBOOT_ENV_OS &&
1917                                boot_type == BFI_FWBOOT_TYPE_FLASH) {
1918                                status = bfa_ioc_flash_img_get_chnk(ioc,
1919                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
1920                                        fwimg_buf);
1921                                if (status != BFA_STATUS_OK)
1922                                        return status;
1923
1924                                fwimg = fwimg_buf;
1925                        } else {
1926                                fwimg = bfa_cb_image_get_chunk(
1927                                        bfa_ioc_asic_gen(ioc),
1928                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1929                        }
1930                }
1931
1932                /*
1933                 * write smem
1934                 */
1935                bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1936                              fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1937
1938                loff += sizeof(u32);
1939
1940                /*
1941                 * handle page offset wrap around
1942                 */
1943                loff = PSS_SMEM_PGOFF(loff);
1944                if (loff == 0) {
1945                        pgnum++;
1946                        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1947                }
1948        }
1949
1950        writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1951                        ioc->ioc_regs.host_page_num_fn);
1952
1953        /*
1954         * Set boot type, env and device mode at the end.
1955         */
1956        if (boot_env == BFI_FWBOOT_ENV_OS &&
1957                boot_type == BFI_FWBOOT_TYPE_FLASH) {
1958                boot_type = BFI_FWBOOT_TYPE_NORMAL;
1959        }
1960        asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1961                                ioc->port0_mode, ioc->port1_mode);
1962        bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1963                        swab32(asicmode));
1964        bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1965                        swab32(boot_type));
1966        bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1967                        swab32(boot_env));
1968        return BFA_STATUS_OK;
1969}
1970
1971
1972/*
1973 * Update BFA configuration from firmware configuration.
1974 */
1975static void
1976bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1977{
1978        struct bfi_ioc_attr_s   *attr = ioc->attr;
1979
1980        attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1981        attr->card_type     = be32_to_cpu(attr->card_type);
1982        attr->maxfrsize     = be16_to_cpu(attr->maxfrsize);
1983        ioc->fcmode     = (attr->port_mode == BFI_PORT_MODE_FC);
1984        attr->mfg_year  = be16_to_cpu(attr->mfg_year);
1985
1986        bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1987}
1988
1989/*
1990 * Attach time initialization of mbox logic.
1991 */
1992static void
1993bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1994{
1995        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1996        int     mc;
1997
1998        INIT_LIST_HEAD(&mod->cmd_q);
1999        for (mc = 0; mc < BFI_MC_MAX; mc++) {
2000                mod->mbhdlr[mc].cbfn = NULL;
2001                mod->mbhdlr[mc].cbarg = ioc->bfa;
2002        }
2003}
2004
2005/*
2006 * Mbox poll timer -- restarts any pending mailbox requests.
2007 */
2008static void
2009bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
2010{
2011        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2012        struct bfa_mbox_cmd_s           *cmd;
2013        u32                     stat;
2014
2015        /*
2016         * If no command pending, do nothing
2017         */
2018        if (list_empty(&mod->cmd_q))
2019                return;
2020
2021        /*
2022         * If previous command is not yet fetched by firmware, do nothing
2023         */
2024        stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2025        if (stat)
2026                return;
2027
2028        /*
2029         * Enqueue command to firmware.
2030         */
2031        bfa_q_deq(&mod->cmd_q, &cmd);
2032        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2033}
2034
2035/*
2036 * Cleanup any pending requests.
2037 */
2038static void
2039bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
2040{
2041        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2042        struct bfa_mbox_cmd_s           *cmd;
2043
2044        while (!list_empty(&mod->cmd_q))
2045                bfa_q_deq(&mod->cmd_q, &cmd);
2046}
2047
2048/*
2049 * Read data from SMEM to host through PCI memmap
2050 *
2051 * @param[in]   ioc     memory for IOC
2052 * @param[in]   tbuf    app memory to store data from smem
2053 * @param[in]   soff    smem offset
2054 * @param[in]   sz      size of smem in bytes
2055 */
2056static bfa_status_t
2057bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
2058{
2059        u32 pgnum, loff;
2060        __be32 r32;
2061        int i, len;
2062        u32 *buf = tbuf;
2063
2064        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2065        loff = PSS_SMEM_PGOFF(soff);
2066        bfa_trc(ioc, pgnum);
2067        bfa_trc(ioc, loff);
2068        bfa_trc(ioc, sz);
2069
2070        /*
2071         *  Hold semaphore to serialize pll init and fwtrc.
2072         */
2073        if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2074                bfa_trc(ioc, 0);
2075                return BFA_STATUS_FAILED;
2076        }
2077
2078        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2079
2080        len = sz/sizeof(u32);
2081        bfa_trc(ioc, len);
2082        for (i = 0; i < len; i++) {
2083                r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2084                buf[i] = swab32(r32);
2085                loff += sizeof(u32);
2086
2087                /*
2088                 * handle page offset wrap around
2089                 */
2090                loff = PSS_SMEM_PGOFF(loff);
2091                if (loff == 0) {
2092                        pgnum++;
2093                        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2094                }
2095        }
2096        writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2097                        ioc->ioc_regs.host_page_num_fn);
2098        /*
2099         *  release semaphore.
2100         */
2101        readl(ioc->ioc_regs.ioc_init_sem_reg);
2102        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2103
2104        bfa_trc(ioc, pgnum);
2105        return BFA_STATUS_OK;
2106}
2107
2108/*
2109 * Clear SMEM data from host through PCI memmap
2110 *
2111 * @param[in]   ioc     memory for IOC
2112 * @param[in]   soff    smem offset
2113 * @param[in]   sz      size of smem in bytes
2114 */
2115static bfa_status_t
2116bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
2117{
2118        int i, len;
2119        u32 pgnum, loff;
2120
2121        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2122        loff = PSS_SMEM_PGOFF(soff);
2123        bfa_trc(ioc, pgnum);
2124        bfa_trc(ioc, loff);
2125        bfa_trc(ioc, sz);
2126
2127        /*
2128         *  Hold semaphore to serialize pll init and fwtrc.
2129         */
2130        if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2131                bfa_trc(ioc, 0);
2132                return BFA_STATUS_FAILED;
2133        }
2134
2135        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2136
2137        len = sz/sizeof(u32); /* len in words */
2138        bfa_trc(ioc, len);
2139        for (i = 0; i < len; i++) {
2140                bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
2141                loff += sizeof(u32);
2142
2143                /*
2144                 * handle page offset wrap around
2145                 */
2146                loff = PSS_SMEM_PGOFF(loff);
2147                if (loff == 0) {
2148                        pgnum++;
2149                        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2150                }
2151        }
2152        writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2153                        ioc->ioc_regs.host_page_num_fn);
2154
2155        /*
2156         *  release semaphore.
2157         */
2158        readl(ioc->ioc_regs.ioc_init_sem_reg);
2159        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2160        bfa_trc(ioc, pgnum);
2161        return BFA_STATUS_OK;
2162}
2163
2164static void
2165bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
2166{
2167        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2168
2169        /*
2170         * Notify driver and common modules registered for notification.
2171         */
2172        ioc->cbfn->hbfail_cbfn(ioc->bfa);
2173        bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2174
2175        bfa_ioc_debug_save_ftrc(ioc);
2176
2177        BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
2178                "Heart Beat of IOC has failed\n");
2179        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
2180
2181}
2182
2183static void
2184bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2185{
2186        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2187        /*
2188         * Provide enable completion callback.
2189         */
2190        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2191        BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
2192                "Running firmware version is incompatible "
2193                "with the driver version\n");
2194        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
2195}
2196
2197bfa_status_t
2198bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2199{
2200
2201        /*
2202         *  Hold semaphore so that nobody can access the chip during init.
2203         */
2204        bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2205
2206        bfa_ioc_pll_init_asic(ioc);
2207
2208        ioc->pllinit = BFA_TRUE;
2209
2210        /*
2211         * Initialize LMEM
2212         */
2213        bfa_ioc_lmem_init(ioc);
2214
2215        /*
2216         *  release semaphore.
2217         */
2218        readl(ioc->ioc_regs.ioc_init_sem_reg);
2219        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2220
2221        return BFA_STATUS_OK;
2222}
2223
2224/*
2225 * Interface used by diag module to do firmware boot with memory test
2226 * as the entry vector.
2227 */
2228bfa_status_t
2229bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2230{
2231        struct bfi_ioc_image_hdr_s *drv_fwhdr;
2232        bfa_status_t status;
2233        bfa_ioc_stats(ioc, ioc_boots);
2234
2235        if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2236                return BFA_STATUS_FAILED;
2237
2238        if (boot_env == BFI_FWBOOT_ENV_OS &&
2239                boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2240
2241                drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
2242                        bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2243
2244                /*
2245                 * Work with Flash iff flash f/w is better than driver f/w.
2246                 * Otherwise push drivers firmware.
2247                 */
2248                if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2249                                                BFI_IOC_IMG_VER_BETTER)
2250                        boot_type = BFI_FWBOOT_TYPE_FLASH;
2251        }
2252
2253        /*
2254         * Initialize IOC state of all functions on a chip reset.
2255         */
2256        if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2257                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2258                bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2259        } else {
2260                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2261                bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2262        }
2263
2264        bfa_ioc_msgflush(ioc);
2265        status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2266        if (status == BFA_STATUS_OK)
2267                bfa_ioc_lpu_start(ioc);
2268        else {
2269                WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
2270                bfa_iocpf_timeout(ioc);
2271        }
2272        return status;
2273}
2274
2275/*
2276 * Enable/disable IOC failure auto recovery.
2277 */
2278void
2279bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2280{
2281        bfa_auto_recover = auto_recover;
2282}
2283
2284
2285
2286bfa_boolean_t
2287bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2288{
2289        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2290}
2291
2292bfa_boolean_t
2293bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2294{
2295        u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
2296
2297        return ((r32 != BFI_IOC_UNINIT) &&
2298                (r32 != BFI_IOC_INITING) &&
2299                (r32 != BFI_IOC_MEMTEST));
2300}
2301
2302bfa_boolean_t
2303bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2304{
2305        __be32  *msgp = mbmsg;
2306        u32     r32;
2307        int             i;
2308
2309        r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2310        if ((r32 & 1) == 0)
2311                return BFA_FALSE;
2312
2313        /*
2314         * read the MBOX msg
2315         */
2316        for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2317             i++) {
2318                r32 = readl(ioc->ioc_regs.lpu_mbox +
2319                                   i * sizeof(u32));
2320                msgp[i] = cpu_to_be32(r32);
2321        }
2322
2323        /*
2324         * turn off mailbox interrupt by clearing mailbox status
2325         */
2326        writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2327        readl(ioc->ioc_regs.lpu_mbox_cmd);
2328
2329        return BFA_TRUE;
2330}
2331
2332void
2333bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2334{
2335        union bfi_ioc_i2h_msg_u *msg;
2336        struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2337
2338        msg = (union bfi_ioc_i2h_msg_u *) m;
2339
2340        bfa_ioc_stats(ioc, ioc_isrs);
2341
2342        switch (msg->mh.msg_id) {
2343        case BFI_IOC_I2H_HBEAT:
2344                break;
2345
2346        case BFI_IOC_I2H_ENABLE_REPLY:
2347                ioc->port_mode = ioc->port_mode_cfg =
2348                                (enum bfa_mode_s)msg->fw_event.port_mode;
2349                ioc->ad_cap_bm = msg->fw_event.cap_bm;
2350                bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2351                break;
2352
2353        case BFI_IOC_I2H_DISABLE_REPLY:
2354                bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2355                break;
2356
2357        case BFI_IOC_I2H_GETATTR_REPLY:
2358                bfa_ioc_getattr_reply(ioc);
2359                break;
2360
2361        default:
2362                bfa_trc(ioc, msg->mh.msg_id);
2363                WARN_ON(1);
2364        }
2365}
2366
2367/*
2368 * IOC attach time initialization and setup.
2369 *
2370 * @param[in]   ioc     memory for IOC
2371 * @param[in]   bfa     driver instance structure
2372 */
2373void
2374bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2375               struct bfa_timer_mod_s *timer_mod)
2376{
2377        ioc->bfa        = bfa;
2378        ioc->cbfn       = cbfn;
2379        ioc->timer_mod  = timer_mod;
2380        ioc->fcmode     = BFA_FALSE;
2381        ioc->pllinit    = BFA_FALSE;
2382        ioc->dbg_fwsave_once = BFA_TRUE;
2383        ioc->iocpf.ioc  = ioc;
2384
2385        bfa_ioc_mbox_attach(ioc);
2386        INIT_LIST_HEAD(&ioc->notify_q);
2387
2388        bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2389        bfa_fsm_send_event(ioc, IOC_E_RESET);
2390}
2391
2392/*
2393 * Driver detach time IOC cleanup.
2394 */
2395void
2396bfa_ioc_detach(struct bfa_ioc_s *ioc)
2397{
2398        bfa_fsm_send_event(ioc, IOC_E_DETACH);
2399        INIT_LIST_HEAD(&ioc->notify_q);
2400}
2401
2402/*
2403 * Setup IOC PCI properties.
2404 *
2405 * @param[in]   pcidev  PCI device information for this IOC
2406 */
2407void
2408bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2409                enum bfi_pcifn_class clscode)
2410{
2411        ioc->clscode    = clscode;
2412        ioc->pcidev     = *pcidev;
2413
2414        /*
2415         * Initialize IOC and device personality
2416         */
2417        ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2418        ioc->asic_mode  = BFI_ASIC_MODE_FC;
2419
2420        switch (pcidev->device_id) {
2421        case BFA_PCI_DEVICE_ID_FC_8G1P:
2422        case BFA_PCI_DEVICE_ID_FC_8G2P:
2423                ioc->asic_gen = BFI_ASIC_GEN_CB;
2424                ioc->fcmode = BFA_TRUE;
2425                ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2426                ioc->ad_cap_bm = BFA_CM_HBA;
2427                break;
2428
2429        case BFA_PCI_DEVICE_ID_CT:
2430                ioc->asic_gen = BFI_ASIC_GEN_CT;
2431                ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2432                ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2433                ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2434                ioc->ad_cap_bm = BFA_CM_CNA;
2435                break;
2436
2437        case BFA_PCI_DEVICE_ID_CT_FC:
2438                ioc->asic_gen = BFI_ASIC_GEN_CT;
2439                ioc->fcmode = BFA_TRUE;
2440                ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2441                ioc->ad_cap_bm = BFA_CM_HBA;
2442                break;
2443
2444        case BFA_PCI_DEVICE_ID_CT2:
2445        case BFA_PCI_DEVICE_ID_CT2_QUAD:
2446                ioc->asic_gen = BFI_ASIC_GEN_CT2;
2447                if (clscode == BFI_PCIFN_CLASS_FC &&
2448                    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2449                        ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2450                        ioc->fcmode = BFA_TRUE;
2451                        ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2452                        ioc->ad_cap_bm = BFA_CM_HBA;
2453                } else {
2454                        ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2455                        ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2456                        if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2457                                ioc->port_mode =
2458                                ioc->port_mode_cfg = BFA_MODE_CNA;
2459                                ioc->ad_cap_bm = BFA_CM_CNA;
2460                        } else {
2461                                ioc->port_mode =
2462                                ioc->port_mode_cfg = BFA_MODE_NIC;
2463                                ioc->ad_cap_bm = BFA_CM_NIC;
2464                        }
2465                }
2466                break;
2467
2468        default:
2469                WARN_ON(1);
2470        }
2471
2472        /*
2473         * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2474         */
2475        if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2476                bfa_ioc_set_cb_hwif(ioc);
2477        else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2478                bfa_ioc_set_ct_hwif(ioc);
2479        else {
2480                WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2481                bfa_ioc_set_ct2_hwif(ioc);
2482                bfa_ioc_ct2_poweron(ioc);
2483        }
2484
2485        bfa_ioc_map_port(ioc);
2486        bfa_ioc_reg_init(ioc);
2487}
2488
2489/*
2490 * Initialize IOC dma memory
2491 *
2492 * @param[in]   dm_kva  kernel virtual address of IOC dma memory
2493 * @param[in]   dm_pa   physical address of IOC dma memory
2494 */
2495void
2496bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2497{
2498        /*
2499         * dma memory for firmware attribute
2500         */
2501        ioc->attr_dma.kva = dm_kva;
2502        ioc->attr_dma.pa = dm_pa;
2503        ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2504}
2505
2506void
2507bfa_ioc_enable(struct bfa_ioc_s *ioc)
2508{
2509        bfa_ioc_stats(ioc, ioc_enables);
2510        ioc->dbg_fwsave_once = BFA_TRUE;
2511
2512        bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2513}
2514
2515void
2516bfa_ioc_disable(struct bfa_ioc_s *ioc)
2517{
2518        bfa_ioc_stats(ioc, ioc_disables);
2519        bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2520}
2521
2522void
2523bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2524{
2525        ioc->dbg_fwsave_once = BFA_TRUE;
2526        bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2527}
2528
2529/*
2530 * Initialize memory for saving firmware trace. Driver must initialize
2531 * trace memory before call bfa_ioc_enable().
2532 */
2533void
2534bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2535{
2536        ioc->dbg_fwsave     = dbg_fwsave;
2537        ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2538}
2539
2540/*
2541 * Register mailbox message handler functions
2542 *
2543 * @param[in]   ioc             IOC instance
2544 * @param[in]   mcfuncs         message class handler functions
2545 */
2546void
2547bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2548{
2549        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2550        int                             mc;
2551
2552        for (mc = 0; mc < BFI_MC_MAX; mc++)
2553                mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2554}
2555
2556/*
2557 * Register mailbox message handler function, to be called by common modules
2558 */
2559void
2560bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2561                    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2562{
2563        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2564
2565        mod->mbhdlr[mc].cbfn    = cbfn;
2566        mod->mbhdlr[mc].cbarg   = cbarg;
2567}
2568
2569/*
2570 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2571 * Responsibility of caller to serialize
2572 *
2573 * @param[in]   ioc     IOC instance
2574 * @param[i]    cmd     Mailbox command
2575 */
2576void
2577bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2578{
2579        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2580        u32                     stat;
2581
2582        /*
2583         * If a previous command is pending, queue new command
2584         */
2585        if (!list_empty(&mod->cmd_q)) {
2586                list_add_tail(&cmd->qe, &mod->cmd_q);
2587                return;
2588        }
2589
2590        /*
2591         * If mailbox is busy, queue command for poll timer
2592         */
2593        stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2594        if (stat) {
2595                list_add_tail(&cmd->qe, &mod->cmd_q);
2596                return;
2597        }
2598
2599        /*
2600         * mailbox is free -- queue command to firmware
2601         */
2602        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2603}
2604
2605/*
2606 * Handle mailbox interrupts
2607 */
2608void
2609bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2610{
2611        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2612        struct bfi_mbmsg_s              m;
2613        int                             mc;
2614
2615        if (bfa_ioc_msgget(ioc, &m)) {
2616                /*
2617                 * Treat IOC message class as special.
2618                 */
2619                mc = m.mh.msg_class;
2620                if (mc == BFI_MC_IOC) {
2621                        bfa_ioc_isr(ioc, &m);
2622                        return;
2623                }
2624
2625                if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2626                        return;
2627
2628                mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2629        }
2630
2631        bfa_ioc_lpu_read_stat(ioc);
2632
2633        /*
2634         * Try to send pending mailbox commands
2635         */
2636        bfa_ioc_mbox_poll(ioc);
2637}
2638
2639void
2640bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2641{
2642        bfa_ioc_stats(ioc, ioc_hbfails);
2643        ioc->stats.hb_count = ioc->hb_count;
2644        bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2645}
2646
2647/*
2648 * return true if IOC is disabled
2649 */
2650bfa_boolean_t
2651bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2652{
2653        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2654                bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2655}
2656
2657/*
2658 * return true if IOC firmware is different.
2659 */
2660bfa_boolean_t
2661bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2662{
2663        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2664                bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2665                bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2666}
2667
2668/*
2669 * Check if adapter is disabled -- both IOCs should be in a disabled
2670 * state.
2671 */
2672bfa_boolean_t
2673bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2674{
2675        u32     ioc_state;
2676
2677        if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2678                return BFA_FALSE;
2679
2680        ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2681        if (!bfa_ioc_state_disabled(ioc_state))
2682                return BFA_FALSE;
2683
2684        if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2685                ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2686                if (!bfa_ioc_state_disabled(ioc_state))
2687                        return BFA_FALSE;
2688        }
2689
2690        return BFA_TRUE;
2691}
2692
2693/*
2694 * Reset IOC fwstate registers.
2695 */
2696void
2697bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2698{
2699        bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2700        bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2701}
2702
2703#define BFA_MFG_NAME "QLogic"
2704void
2705bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2706                         struct bfa_adapter_attr_s *ad_attr)
2707{
2708        struct bfi_ioc_attr_s   *ioc_attr;
2709
2710        ioc_attr = ioc->attr;
2711
2712        bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2713        bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2714        bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2715        bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2716        memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2717                      sizeof(struct bfa_mfg_vpd_s));
2718
2719        ad_attr->nports = bfa_ioc_get_nports(ioc);
2720        ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2721
2722        bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2723        /* For now, model descr uses same model string */
2724        bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2725
2726        ad_attr->card_type = ioc_attr->card_type;
2727        ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2728
2729        if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2730                ad_attr->prototype = 1;
2731        else
2732                ad_attr->prototype = 0;
2733
2734        ad_attr->pwwn = ioc->attr->pwwn;
2735        ad_attr->mac  = bfa_ioc_get_mac(ioc);
2736
2737        ad_attr->pcie_gen = ioc_attr->pcie_gen;
2738        ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2739        ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2740        ad_attr->asic_rev = ioc_attr->asic_rev;
2741
2742        bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2743
2744        ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2745        ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2746                                  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2747        ad_attr->mfg_day = ioc_attr->mfg_day;
2748        ad_attr->mfg_month = ioc_attr->mfg_month;
2749        ad_attr->mfg_year = ioc_attr->mfg_year;
2750        memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
2751}
2752
2753enum bfa_ioc_type_e
2754bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2755{
2756        if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2757                return BFA_IOC_TYPE_LL;
2758
2759        WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2760
2761        return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2762                ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2763}
2764
2765void
2766bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2767{
2768        memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2769        memcpy((void *)serial_num,
2770                        (void *)ioc->attr->brcd_serialnum,
2771                        BFA_ADAPTER_SERIAL_NUM_LEN);
2772}
2773
2774void
2775bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2776{
2777        memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2778        memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2779}
2780
2781void
2782bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2783{
2784        WARN_ON(!chip_rev);
2785
2786        memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2787
2788        chip_rev[0] = 'R';
2789        chip_rev[1] = 'e';
2790        chip_rev[2] = 'v';
2791        chip_rev[3] = '-';
2792        chip_rev[4] = ioc->attr->asic_rev;
2793        chip_rev[5] = '\0';
2794}
2795
2796void
2797bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2798{
2799        memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2800        memcpy(optrom_ver, ioc->attr->optrom_version,
2801                      BFA_VERSION_LEN);
2802}
2803
2804void
2805bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2806{
2807        memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2808        strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2809}
2810
2811void
2812bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2813{
2814        struct bfi_ioc_attr_s   *ioc_attr;
2815        u8 nports = bfa_ioc_get_nports(ioc);
2816
2817        WARN_ON(!model);
2818        memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2819
2820        ioc_attr = ioc->attr;
2821
2822        if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
2823                (!bfa_mfg_is_mezz(ioc_attr->card_type)))
2824                snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
2825                        BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
2826        else
2827                snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2828                        BFA_MFG_NAME, ioc_attr->card_type);
2829}
2830
2831enum bfa_ioc_state
2832bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2833{
2834        enum bfa_iocpf_state iocpf_st;
2835        enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2836
2837        if (ioc_st == BFA_IOC_ENABLING ||
2838                ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2839
2840                iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2841
2842                switch (iocpf_st) {
2843                case BFA_IOCPF_SEMWAIT:
2844                        ioc_st = BFA_IOC_SEMWAIT;
2845                        break;
2846
2847                case BFA_IOCPF_HWINIT:
2848                        ioc_st = BFA_IOC_HWINIT;
2849                        break;
2850
2851                case BFA_IOCPF_FWMISMATCH:
2852                        ioc_st = BFA_IOC_FWMISMATCH;
2853                        break;
2854
2855                case BFA_IOCPF_FAIL:
2856                        ioc_st = BFA_IOC_FAIL;
2857                        break;
2858
2859                case BFA_IOCPF_INITFAIL:
2860                        ioc_st = BFA_IOC_INITFAIL;
2861                        break;
2862
2863                default:
2864                        break;
2865                }
2866        }
2867
2868        return ioc_st;
2869}
2870
2871void
2872bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2873{
2874        memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2875
2876        ioc_attr->state = bfa_ioc_get_state(ioc);
2877        ioc_attr->port_id = bfa_ioc_portid(ioc);
2878        ioc_attr->port_mode = ioc->port_mode;
2879        ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2880        ioc_attr->cap_bm = ioc->ad_cap_bm;
2881
2882        ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2883
2884        bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2885
2886        ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2887        ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2888        ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
2889        bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2890}
2891
2892mac_t
2893bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2894{
2895        /*
2896         * Check the IOC type and return the appropriate MAC
2897         */
2898        if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2899                return ioc->attr->fcoe_mac;
2900        else
2901                return ioc->attr->mac;
2902}
2903
2904mac_t
2905bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2906{
2907        mac_t   m;
2908
2909        m = ioc->attr->mfg_mac;
2910        if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2911                m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2912        else
2913                bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2914                        bfa_ioc_pcifn(ioc));
2915
2916        return m;
2917}
2918
2919/*
2920 * Send AEN notification
2921 */
2922void
2923bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2924{
2925        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2926        struct bfa_aen_entry_s  *aen_entry;
2927        enum bfa_ioc_type_e ioc_type;
2928
2929        bfad_get_aen_entry(bfad, aen_entry);
2930        if (!aen_entry)
2931                return;
2932
2933        ioc_type = bfa_ioc_get_type(ioc);
2934        switch (ioc_type) {
2935        case BFA_IOC_TYPE_FC:
2936                aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2937                break;
2938        case BFA_IOC_TYPE_FCoE:
2939                aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2940                aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2941                break;
2942        case BFA_IOC_TYPE_LL:
2943                aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2944                break;
2945        default:
2946                WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2947                break;
2948        }
2949
2950        /* Send the AEN notification */
2951        aen_entry->aen_data.ioc.ioc_type = ioc_type;
2952        bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2953                                  BFA_AEN_CAT_IOC, event);
2954}
2955
2956/*
2957 * Retrieve saved firmware trace from a prior IOC failure.
2958 */
2959bfa_status_t
2960bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2961{
2962        int     tlen;
2963
2964        if (ioc->dbg_fwsave_len == 0)
2965                return BFA_STATUS_ENOFSAVE;
2966
2967        tlen = *trclen;
2968        if (tlen > ioc->dbg_fwsave_len)
2969                tlen = ioc->dbg_fwsave_len;
2970
2971        memcpy(trcdata, ioc->dbg_fwsave, tlen);
2972        *trclen = tlen;
2973        return BFA_STATUS_OK;
2974}
2975
2976
2977/*
2978 * Retrieve saved firmware trace from a prior IOC failure.
2979 */
2980bfa_status_t
2981bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2982{
2983        u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2984        int tlen;
2985        bfa_status_t status;
2986
2987        bfa_trc(ioc, *trclen);
2988
2989        tlen = *trclen;
2990        if (tlen > BFA_DBG_FWTRC_LEN)
2991                tlen = BFA_DBG_FWTRC_LEN;
2992
2993        status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2994        *trclen = tlen;
2995        return status;
2996}
2997
2998static void
2999bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
3000{
3001        struct bfa_mbox_cmd_s cmd;
3002        struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
3003
3004        bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
3005                    bfa_ioc_portid(ioc));
3006        req->clscode = cpu_to_be16(ioc->clscode);
3007        bfa_ioc_mbox_queue(ioc, &cmd);
3008}
3009
3010static void
3011bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
3012{
3013        u32 fwsync_iter = 1000;
3014
3015        bfa_ioc_send_fwsync(ioc);
3016
3017        /*
3018         * After sending a fw sync mbox command wait for it to
3019         * take effect.  We will not wait for a response because
3020         *    1. fw_sync mbox cmd doesn't have a response.
3021         *    2. Even if we implement that,  interrupts might not
3022         *       be enabled when we call this function.
3023         * So, just keep checking if any mbox cmd is pending, and
3024         * after waiting for a reasonable amount of time, go ahead.
3025         * It is possible that fw has crashed and the mbox command
3026         * is never acknowledged.
3027         */
3028        while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
3029                fwsync_iter--;
3030}
3031
3032/*
3033 * Dump firmware smem
3034 */
3035bfa_status_t
3036bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
3037                                u32 *offset, int *buflen)
3038{
3039        u32 loff;
3040        int dlen;
3041        bfa_status_t status;
3042        u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
3043
3044        if (*offset >= smem_len) {
3045                *offset = *buflen = 0;
3046                return BFA_STATUS_EINVAL;
3047        }
3048
3049        loff = *offset;
3050        dlen = *buflen;
3051
3052        /*
3053         * First smem read, sync smem before proceeding
3054         * No need to sync before reading every chunk.
3055         */
3056        if (loff == 0)
3057                bfa_ioc_fwsync(ioc);
3058
3059        if ((loff + dlen) >= smem_len)
3060                dlen = smem_len - loff;
3061
3062        status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
3063
3064        if (status != BFA_STATUS_OK) {
3065                *offset = *buflen = 0;
3066                return status;
3067        }
3068
3069        *offset += dlen;
3070
3071        if (*offset >= smem_len)
3072                *offset = 0;
3073
3074        *buflen = dlen;
3075
3076        return status;
3077}
3078
3079/*
3080 * Firmware statistics
3081 */
3082bfa_status_t
3083bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
3084{
3085        u32 loff = BFI_IOC_FWSTATS_OFF + \
3086                BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3087        int tlen;
3088        bfa_status_t status;
3089
3090        if (ioc->stats_busy) {
3091                bfa_trc(ioc, ioc->stats_busy);
3092                return BFA_STATUS_DEVBUSY;
3093        }
3094        ioc->stats_busy = BFA_TRUE;
3095
3096        tlen = sizeof(struct bfa_fw_stats_s);
3097        status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
3098
3099        ioc->stats_busy = BFA_FALSE;
3100        return status;
3101}
3102
3103bfa_status_t
3104bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
3105{
3106        u32 loff = BFI_IOC_FWSTATS_OFF + \
3107                BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3108        int tlen;
3109        bfa_status_t status;
3110
3111        if (ioc->stats_busy) {
3112                bfa_trc(ioc, ioc->stats_busy);
3113                return BFA_STATUS_DEVBUSY;
3114        }
3115        ioc->stats_busy = BFA_TRUE;
3116
3117        tlen = sizeof(struct bfa_fw_stats_s);
3118        status = bfa_ioc_smem_clr(ioc, loff, tlen);
3119
3120        ioc->stats_busy = BFA_FALSE;
3121        return status;
3122}
3123
3124/*
3125 * Save firmware trace if configured.
3126 */
3127void
3128bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
3129{
3130        int             tlen;
3131
3132        if (ioc->dbg_fwsave_once) {
3133                ioc->dbg_fwsave_once = BFA_FALSE;
3134                if (ioc->dbg_fwsave_len) {
3135                        tlen = ioc->dbg_fwsave_len;
3136                        bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
3137                }
3138        }
3139}
3140
3141/*
3142 * Firmware failure detected. Start recovery actions.
3143 */
3144static void
3145bfa_ioc_recover(struct bfa_ioc_s *ioc)
3146{
3147        bfa_ioc_stats(ioc, ioc_hbfails);
3148        ioc->stats.hb_count = ioc->hb_count;
3149        bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
3150}
3151
3152/*
3153 *  BFA IOC PF private functions
3154 */
3155static void
3156bfa_iocpf_timeout(void *ioc_arg)
3157{
3158        struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3159
3160        bfa_trc(ioc, 0);
3161        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3162}
3163
3164static void
3165bfa_iocpf_sem_timeout(void *ioc_arg)
3166{
3167        struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3168
3169        bfa_ioc_hw_sem_get(ioc);
3170}
3171
3172static void
3173bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
3174{
3175        u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3176
3177        bfa_trc(ioc, fwstate);
3178
3179        if (fwstate == BFI_IOC_DISABLED) {
3180                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3181                return;
3182        }
3183
3184        if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
3185                bfa_iocpf_timeout(ioc);
3186        else {
3187                ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3188                bfa_iocpf_poll_timer_start(ioc);
3189        }
3190}
3191
3192static void
3193bfa_iocpf_poll_timeout(void *ioc_arg)
3194{
3195        struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3196
3197        bfa_ioc_poll_fwinit(ioc);
3198}
3199
3200/*
3201 *  bfa timer function
3202 */
3203void
3204bfa_timer_beat(struct bfa_timer_mod_s *mod)
3205{
3206        struct list_head *qh = &mod->timer_q;
3207        struct list_head *qe, *qe_next;
3208        struct bfa_timer_s *elem;
3209        struct list_head timedout_q;
3210
3211        INIT_LIST_HEAD(&timedout_q);
3212
3213        qe = bfa_q_next(qh);
3214
3215        while (qe != qh) {
3216                qe_next = bfa_q_next(qe);
3217
3218                elem = (struct bfa_timer_s *) qe;
3219                if (elem->timeout <= BFA_TIMER_FREQ) {
3220                        elem->timeout = 0;
3221                        list_del(&elem->qe);
3222                        list_add_tail(&elem->qe, &timedout_q);
3223                } else {
3224                        elem->timeout -= BFA_TIMER_FREQ;
3225                }
3226
3227                qe = qe_next;   /* go to next elem */
3228        }
3229
3230        /*
3231         * Pop all the timeout entries
3232         */
3233        while (!list_empty(&timedout_q)) {
3234                bfa_q_deq(&timedout_q, &elem);
3235                elem->timercb(elem->arg);
3236        }
3237}
3238
3239/*
3240 * Should be called with lock protection
3241 */
3242void
3243bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3244                    void (*timercb) (void *), void *arg, unsigned int timeout)
3245{
3246
3247        WARN_ON(timercb == NULL);
3248        WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3249
3250        timer->timeout = timeout;
3251        timer->timercb = timercb;
3252        timer->arg = arg;
3253
3254        list_add_tail(&timer->qe, &mod->timer_q);
3255}
3256
3257/*
3258 * Should be called with lock protection
3259 */
3260void
3261bfa_timer_stop(struct bfa_timer_s *timer)
3262{
3263        WARN_ON(list_empty(&timer->qe));
3264
3265        list_del(&timer->qe);
3266}
3267
3268/*
3269 *      ASIC block related
3270 */
3271static void
3272bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3273{
3274        struct bfa_ablk_cfg_inst_s *cfg_inst;
3275        int i, j;
3276        u16     be16;
3277
3278        for (i = 0; i < BFA_ABLK_MAX; i++) {
3279                cfg_inst = &cfg->inst[i];
3280                for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3281                        be16 = cfg_inst->pf_cfg[j].pers;
3282                        cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3283                        be16 = cfg_inst->pf_cfg[j].num_qpairs;
3284                        cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3285                        be16 = cfg_inst->pf_cfg[j].num_vectors;
3286                        cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3287                        be16 = cfg_inst->pf_cfg[j].bw_min;
3288                        cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3289                        be16 = cfg_inst->pf_cfg[j].bw_max;
3290                        cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3291                }
3292        }
3293}
3294
3295static void
3296bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3297{
3298        struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3299        struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3300        bfa_ablk_cbfn_t cbfn;
3301
3302        WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3303        bfa_trc(ablk->ioc, msg->mh.msg_id);
3304
3305        switch (msg->mh.msg_id) {
3306        case BFI_ABLK_I2H_QUERY:
3307                if (rsp->status == BFA_STATUS_OK) {
3308                        memcpy(ablk->cfg, ablk->dma_addr.kva,
3309                                sizeof(struct bfa_ablk_cfg_s));
3310                        bfa_ablk_config_swap(ablk->cfg);
3311                        ablk->cfg = NULL;
3312                }
3313                break;
3314
3315        case BFI_ABLK_I2H_ADPT_CONFIG:
3316        case BFI_ABLK_I2H_PORT_CONFIG:
3317                /* update config port mode */
3318                ablk->ioc->port_mode_cfg = rsp->port_mode;
3319
3320        case BFI_ABLK_I2H_PF_DELETE:
3321        case BFI_ABLK_I2H_PF_UPDATE:
3322        case BFI_ABLK_I2H_OPTROM_ENABLE:
3323        case BFI_ABLK_I2H_OPTROM_DISABLE:
3324                /* No-op */
3325                break;
3326
3327        case BFI_ABLK_I2H_PF_CREATE:
3328                *(ablk->pcifn) = rsp->pcifn;
3329                ablk->pcifn = NULL;
3330                break;
3331
3332        default:
3333                WARN_ON(1);
3334        }
3335
3336        ablk->busy = BFA_FALSE;
3337        if (ablk->cbfn) {
3338                cbfn = ablk->cbfn;
3339                ablk->cbfn = NULL;
3340                cbfn(ablk->cbarg, rsp->status);
3341        }
3342}
3343
3344static void
3345bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3346{
3347        struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3348
3349        bfa_trc(ablk->ioc, event);
3350
3351        switch (event) {
3352        case BFA_IOC_E_ENABLED:
3353                WARN_ON(ablk->busy != BFA_FALSE);
3354                break;
3355
3356        case BFA_IOC_E_DISABLED:
3357        case BFA_IOC_E_FAILED:
3358                /* Fail any pending requests */
3359                ablk->pcifn = NULL;
3360                if (ablk->busy) {
3361                        if (ablk->cbfn)
3362                                ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3363                        ablk->cbfn = NULL;
3364                        ablk->busy = BFA_FALSE;
3365                }
3366                break;
3367
3368        default:
3369                WARN_ON(1);
3370                break;
3371        }
3372}
3373
3374u32
3375bfa_ablk_meminfo(void)
3376{
3377        return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3378}
3379
3380void
3381bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3382{
3383        ablk->dma_addr.kva = dma_kva;
3384        ablk->dma_addr.pa  = dma_pa;
3385}
3386
3387void
3388bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3389{
3390        ablk->ioc = ioc;
3391
3392        bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3393        bfa_q_qe_init(&ablk->ioc_notify);
3394        bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3395        list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3396}
3397
3398bfa_status_t
3399bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3400                bfa_ablk_cbfn_t cbfn, void *cbarg)
3401{
3402        struct bfi_ablk_h2i_query_s *m;
3403
3404        WARN_ON(!ablk_cfg);
3405
3406        if (!bfa_ioc_is_operational(ablk->ioc)) {
3407                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3408                return BFA_STATUS_IOC_FAILURE;
3409        }
3410
3411        if (ablk->busy) {
3412                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3413                return  BFA_STATUS_DEVBUSY;
3414        }
3415
3416        ablk->cfg = ablk_cfg;
3417        ablk->cbfn  = cbfn;
3418        ablk->cbarg = cbarg;
3419        ablk->busy  = BFA_TRUE;
3420
3421        m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3422        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3423                    bfa_ioc_portid(ablk->ioc));
3424        bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3425        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3426
3427        return BFA_STATUS_OK;
3428}
3429
3430bfa_status_t
3431bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3432                u8 port, enum bfi_pcifn_class personality,
3433                u16 bw_min, u16 bw_max,
3434                bfa_ablk_cbfn_t cbfn, void *cbarg)
3435{
3436        struct bfi_ablk_h2i_pf_req_s *m;
3437
3438        if (!bfa_ioc_is_operational(ablk->ioc)) {
3439                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3440                return BFA_STATUS_IOC_FAILURE;
3441        }
3442
3443        if (ablk->busy) {
3444                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3445                return  BFA_STATUS_DEVBUSY;
3446        }
3447
3448        ablk->pcifn = pcifn;
3449        ablk->cbfn = cbfn;
3450        ablk->cbarg = cbarg;
3451        ablk->busy  = BFA_TRUE;
3452
3453        m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3454        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3455                    bfa_ioc_portid(ablk->ioc));
3456        m->pers = cpu_to_be16((u16)personality);
3457        m->bw_min = cpu_to_be16(bw_min);
3458        m->bw_max = cpu_to_be16(bw_max);
3459        m->port = port;
3460        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3461
3462        return BFA_STATUS_OK;
3463}
3464
3465bfa_status_t
3466bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3467                bfa_ablk_cbfn_t cbfn, void *cbarg)
3468{
3469        struct bfi_ablk_h2i_pf_req_s *m;
3470
3471        if (!bfa_ioc_is_operational(ablk->ioc)) {
3472                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3473                return BFA_STATUS_IOC_FAILURE;
3474        }
3475
3476        if (ablk->busy) {
3477                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3478                return  BFA_STATUS_DEVBUSY;
3479        }
3480
3481        ablk->cbfn  = cbfn;
3482        ablk->cbarg = cbarg;
3483        ablk->busy  = BFA_TRUE;
3484
3485        m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3486        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3487                    bfa_ioc_portid(ablk->ioc));
3488        m->pcifn = (u8)pcifn;
3489        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3490
3491        return BFA_STATUS_OK;
3492}
3493
3494bfa_status_t
3495bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3496                int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3497{
3498        struct bfi_ablk_h2i_cfg_req_s *m;
3499
3500        if (!bfa_ioc_is_operational(ablk->ioc)) {
3501                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3502                return BFA_STATUS_IOC_FAILURE;
3503        }
3504
3505        if (ablk->busy) {
3506                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3507                return  BFA_STATUS_DEVBUSY;
3508        }
3509
3510        ablk->cbfn  = cbfn;
3511        ablk->cbarg = cbarg;
3512        ablk->busy  = BFA_TRUE;
3513
3514        m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3515        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3516                    bfa_ioc_portid(ablk->ioc));
3517        m->mode = (u8)mode;
3518        m->max_pf = (u8)max_pf;
3519        m->max_vf = (u8)max_vf;
3520        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3521
3522        return BFA_STATUS_OK;
3523}
3524
3525bfa_status_t
3526bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3527                int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3528{
3529        struct bfi_ablk_h2i_cfg_req_s *m;
3530
3531        if (!bfa_ioc_is_operational(ablk->ioc)) {
3532                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3533                return BFA_STATUS_IOC_FAILURE;
3534        }
3535
3536        if (ablk->busy) {
3537                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3538                return  BFA_STATUS_DEVBUSY;
3539        }
3540
3541        ablk->cbfn  = cbfn;
3542        ablk->cbarg = cbarg;
3543        ablk->busy  = BFA_TRUE;
3544
3545        m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3546        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3547                bfa_ioc_portid(ablk->ioc));
3548        m->port = (u8)port;
3549        m->mode = (u8)mode;
3550        m->max_pf = (u8)max_pf;
3551        m->max_vf = (u8)max_vf;
3552        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3553
3554        return BFA_STATUS_OK;
3555}
3556
3557bfa_status_t
3558bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3559                   u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3560{
3561        struct bfi_ablk_h2i_pf_req_s *m;
3562
3563        if (!bfa_ioc_is_operational(ablk->ioc)) {
3564                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3565                return BFA_STATUS_IOC_FAILURE;
3566        }
3567
3568        if (ablk->busy) {
3569                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3570                return  BFA_STATUS_DEVBUSY;
3571        }
3572
3573        ablk->cbfn  = cbfn;
3574        ablk->cbarg = cbarg;
3575        ablk->busy  = BFA_TRUE;
3576
3577        m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3578        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3579                bfa_ioc_portid(ablk->ioc));
3580        m->pcifn = (u8)pcifn;
3581        m->bw_min = cpu_to_be16(bw_min);
3582        m->bw_max = cpu_to_be16(bw_max);
3583        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3584
3585        return BFA_STATUS_OK;
3586}
3587
3588bfa_status_t
3589bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3590{
3591        struct bfi_ablk_h2i_optrom_s *m;
3592
3593        if (!bfa_ioc_is_operational(ablk->ioc)) {
3594                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3595                return BFA_STATUS_IOC_FAILURE;
3596        }
3597
3598        if (ablk->busy) {
3599                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3600                return  BFA_STATUS_DEVBUSY;
3601        }
3602
3603        ablk->cbfn  = cbfn;
3604        ablk->cbarg = cbarg;
3605        ablk->busy  = BFA_TRUE;
3606
3607        m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3608        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3609                bfa_ioc_portid(ablk->ioc));
3610        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3611
3612        return BFA_STATUS_OK;
3613}
3614
3615bfa_status_t
3616bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3617{
3618        struct bfi_ablk_h2i_optrom_s *m;
3619
3620        if (!bfa_ioc_is_operational(ablk->ioc)) {
3621                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3622                return BFA_STATUS_IOC_FAILURE;
3623        }
3624
3625        if (ablk->busy) {
3626                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3627                return  BFA_STATUS_DEVBUSY;
3628        }
3629
3630        ablk->cbfn  = cbfn;
3631        ablk->cbarg = cbarg;
3632        ablk->busy  = BFA_TRUE;
3633
3634        m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3635        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3636                bfa_ioc_portid(ablk->ioc));
3637        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3638
3639        return BFA_STATUS_OK;
3640}
3641
3642/*
3643 *      SFP module specific
3644 */
3645
3646/* forward declarations */
3647static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3648static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3649static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3650                                enum bfa_port_speed portspeed);
3651
3652static void
3653bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3654{
3655        bfa_trc(sfp, sfp->lock);
3656        if (sfp->cbfn)
3657                sfp->cbfn(sfp->cbarg, sfp->status);
3658        sfp->lock = 0;
3659        sfp->cbfn = NULL;
3660}
3661
3662static void
3663bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3664{
3665        bfa_trc(sfp, sfp->portspeed);
3666        if (sfp->media) {
3667                bfa_sfp_media_get(sfp);
3668                if (sfp->state_query_cbfn)
3669                        sfp->state_query_cbfn(sfp->state_query_cbarg,
3670                                        sfp->status);
3671                sfp->media = NULL;
3672        }
3673
3674        if (sfp->portspeed) {
3675                sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3676                if (sfp->state_query_cbfn)
3677                        sfp->state_query_cbfn(sfp->state_query_cbarg,
3678                                        sfp->status);
3679                sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3680        }
3681
3682        sfp->state_query_lock = 0;
3683        sfp->state_query_cbfn = NULL;
3684}
3685
3686/*
3687 *      IOC event handler.
3688 */
3689static void
3690bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3691{
3692        struct bfa_sfp_s *sfp = sfp_arg;
3693
3694        bfa_trc(sfp, event);
3695        bfa_trc(sfp, sfp->lock);
3696        bfa_trc(sfp, sfp->state_query_lock);
3697
3698        switch (event) {
3699        case BFA_IOC_E_DISABLED:
3700        case BFA_IOC_E_FAILED:
3701                if (sfp->lock) {
3702                        sfp->status = BFA_STATUS_IOC_FAILURE;
3703                        bfa_cb_sfp_show(sfp);
3704                }
3705
3706                if (sfp->state_query_lock) {
3707                        sfp->status = BFA_STATUS_IOC_FAILURE;
3708                        bfa_cb_sfp_state_query(sfp);
3709                }
3710                break;
3711
3712        default:
3713                break;
3714        }
3715}
3716
3717/*
3718 * SFP's State Change Notification post to AEN
3719 */
3720static void
3721bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3722{
3723        struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3724        struct bfa_aen_entry_s  *aen_entry;
3725        enum bfa_port_aen_event aen_evt = 0;
3726
3727        bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3728                      ((u64)rsp->event));
3729
3730        bfad_get_aen_entry(bfad, aen_entry);
3731        if (!aen_entry)
3732                return;
3733
3734        aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3735        aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3736        aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3737
3738        switch (rsp->event) {
3739        case BFA_SFP_SCN_INSERTED:
3740                aen_evt = BFA_PORT_AEN_SFP_INSERT;
3741                break;
3742        case BFA_SFP_SCN_REMOVED:
3743                aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3744                break;
3745        case BFA_SFP_SCN_FAILED:
3746                aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3747                break;
3748        case BFA_SFP_SCN_UNSUPPORT:
3749                aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3750                break;
3751        case BFA_SFP_SCN_POM:
3752                aen_evt = BFA_PORT_AEN_SFP_POM;
3753                aen_entry->aen_data.port.level = rsp->pomlvl;
3754                break;
3755        default:
3756                bfa_trc(sfp, rsp->event);
3757                WARN_ON(1);
3758        }
3759
3760        /* Send the AEN notification */
3761        bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3762                                  BFA_AEN_CAT_PORT, aen_evt);
3763}
3764
3765/*
3766 *      SFP get data send
3767 */
3768static void
3769bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3770{
3771        struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3772
3773        bfa_trc(sfp, req->memtype);
3774
3775        /* build host command */
3776        bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3777                        bfa_ioc_portid(sfp->ioc));
3778
3779        /* send mbox cmd */
3780        bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3781}
3782
3783/*
3784 *      SFP is valid, read sfp data
3785 */
3786static void
3787bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3788{
3789        struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3790
3791        WARN_ON(sfp->lock != 0);
3792        bfa_trc(sfp, sfp->state);
3793
3794        sfp->lock = 1;
3795        sfp->memtype = memtype;
3796        req->memtype = memtype;
3797
3798        /* Setup SG list */
3799        bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3800
3801        bfa_sfp_getdata_send(sfp);
3802}
3803
3804/*
3805 *      SFP scn handler
3806 */
3807static void
3808bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3809{
3810        struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3811
3812        switch (rsp->event) {
3813        case BFA_SFP_SCN_INSERTED:
3814                sfp->state = BFA_SFP_STATE_INSERTED;
3815                sfp->data_valid = 0;
3816                bfa_sfp_scn_aen_post(sfp, rsp);
3817                break;
3818        case BFA_SFP_SCN_REMOVED:
3819                sfp->state = BFA_SFP_STATE_REMOVED;
3820                sfp->data_valid = 0;
3821                bfa_sfp_scn_aen_post(sfp, rsp);
3822                 break;
3823        case BFA_SFP_SCN_FAILED:
3824                sfp->state = BFA_SFP_STATE_FAILED;
3825                sfp->data_valid = 0;
3826                bfa_sfp_scn_aen_post(sfp, rsp);
3827                break;
3828        case BFA_SFP_SCN_UNSUPPORT:
3829                sfp->state = BFA_SFP_STATE_UNSUPPORT;
3830                bfa_sfp_scn_aen_post(sfp, rsp);
3831                if (!sfp->lock)
3832                        bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3833                break;
3834        case BFA_SFP_SCN_POM:
3835                bfa_sfp_scn_aen_post(sfp, rsp);
3836                break;
3837        case BFA_SFP_SCN_VALID:
3838                sfp->state = BFA_SFP_STATE_VALID;
3839                if (!sfp->lock)
3840                        bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3841                break;
3842        default:
3843                bfa_trc(sfp, rsp->event);
3844                WARN_ON(1);
3845        }
3846}
3847
3848/*
3849 * SFP show complete
3850 */
3851static void
3852bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3853{
3854        struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3855
3856        if (!sfp->lock) {
3857                /*
3858                 * receiving response after ioc failure
3859                 */
3860                bfa_trc(sfp, sfp->lock);
3861                return;
3862        }
3863
3864        bfa_trc(sfp, rsp->status);
3865        if (rsp->status == BFA_STATUS_OK) {
3866                sfp->data_valid = 1;
3867                if (sfp->state == BFA_SFP_STATE_VALID)
3868                        sfp->status = BFA_STATUS_OK;
3869                else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3870                        sfp->status = BFA_STATUS_SFP_UNSUPP;
3871                else
3872                        bfa_trc(sfp, sfp->state);
3873        } else {
3874                sfp->data_valid = 0;
3875                sfp->status = rsp->status;
3876                /* sfpshow shouldn't change sfp state */
3877        }
3878
3879        bfa_trc(sfp, sfp->memtype);
3880        if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3881                bfa_trc(sfp, sfp->data_valid);
3882                if (sfp->data_valid) {
3883                        u32     size = sizeof(struct sfp_mem_s);
3884                        u8 *des = (u8 *)(sfp->sfpmem);
3885                        memcpy(des, sfp->dbuf_kva, size);
3886                }
3887                /*
3888                 * Queue completion callback.
3889                 */
3890                bfa_cb_sfp_show(sfp);
3891        } else
3892                sfp->lock = 0;
3893
3894        bfa_trc(sfp, sfp->state_query_lock);
3895        if (sfp->state_query_lock) {
3896                sfp->state = rsp->state;
3897                /* Complete callback */
3898                bfa_cb_sfp_state_query(sfp);
3899        }
3900}
3901
3902/*
3903 *      SFP query fw sfp state
3904 */
3905static void
3906bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3907{
3908        struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3909
3910        /* Should not be doing query if not in _INIT state */
3911        WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3912        WARN_ON(sfp->state_query_lock != 0);
3913        bfa_trc(sfp, sfp->state);
3914
3915        sfp->state_query_lock = 1;
3916        req->memtype = 0;
3917
3918        if (!sfp->lock)
3919                bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3920}
3921
3922static void
3923bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3924{
3925        enum bfa_defs_sfp_media_e *media = sfp->media;
3926
3927        *media = BFA_SFP_MEDIA_UNKNOWN;
3928
3929        if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3930                *media = BFA_SFP_MEDIA_UNSUPPORT;
3931        else if (sfp->state == BFA_SFP_STATE_VALID) {
3932                union sfp_xcvr_e10g_code_u e10g;
3933                struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3934                u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3935                                (sfpmem->srlid_base.xcvr[5] >> 1);
3936
3937                e10g.b = sfpmem->srlid_base.xcvr[0];
3938                bfa_trc(sfp, e10g.b);
3939                bfa_trc(sfp, xmtr_tech);
3940                /* check fc transmitter tech */
3941                if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3942                    (xmtr_tech & SFP_XMTR_TECH_CP) ||
3943                    (xmtr_tech & SFP_XMTR_TECH_CA))
3944                        *media = BFA_SFP_MEDIA_CU;
3945                else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3946                         (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3947                        *media = BFA_SFP_MEDIA_EL;
3948                else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3949                         (xmtr_tech & SFP_XMTR_TECH_LC))
3950                        *media = BFA_SFP_MEDIA_LW;
3951                else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3952                         (xmtr_tech & SFP_XMTR_TECH_SN) ||
3953                         (xmtr_tech & SFP_XMTR_TECH_SA))
3954                        *media = BFA_SFP_MEDIA_SW;
3955                /* Check 10G Ethernet Compilance code */
3956                else if (e10g.r.e10g_sr)
3957                        *media = BFA_SFP_MEDIA_SW;
3958                else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3959                        *media = BFA_SFP_MEDIA_LW;
3960                else if (e10g.r.e10g_unall)
3961                        *media = BFA_SFP_MEDIA_UNKNOWN;
3962                else
3963                        bfa_trc(sfp, 0);
3964        } else
3965                bfa_trc(sfp, sfp->state);
3966}
3967
3968static bfa_status_t
3969bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3970{
3971        struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3972        struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3973        union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3974        union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3975
3976        if (portspeed == BFA_PORT_SPEED_10GBPS) {
3977                if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3978                        return BFA_STATUS_OK;
3979                else {
3980                        bfa_trc(sfp, e10g.b);
3981                        return BFA_STATUS_UNSUPP_SPEED;
3982                }
3983        }
3984        if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3985            ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3986            ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3987            ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3988            ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3989                return BFA_STATUS_OK;
3990        else {
3991                bfa_trc(sfp, portspeed);
3992                bfa_trc(sfp, fc3.b);
3993                bfa_trc(sfp, e10g.b);
3994                return BFA_STATUS_UNSUPP_SPEED;
3995        }
3996}
3997
3998/*
3999 *      SFP hmbox handler
4000 */
4001void
4002bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
4003{
4004        struct bfa_sfp_s *sfp = sfparg;
4005
4006        switch (msg->mh.msg_id) {
4007        case BFI_SFP_I2H_SHOW:
4008                bfa_sfp_show_comp(sfp, msg);
4009                break;
4010
4011        case BFI_SFP_I2H_SCN:
4012                bfa_sfp_scn(sfp, msg);
4013                break;
4014
4015        default:
4016                bfa_trc(sfp, msg->mh.msg_id);
4017                WARN_ON(1);
4018        }
4019}
4020
4021/*
4022 *      Return DMA memory needed by sfp module.
4023 */
4024u32
4025bfa_sfp_meminfo(void)
4026{
4027        return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4028}
4029
4030/*
4031 *      Attach virtual and physical memory for SFP.
4032 */
4033void
4034bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
4035                struct bfa_trc_mod_s *trcmod)
4036{
4037        sfp->dev = dev;
4038        sfp->ioc = ioc;
4039        sfp->trcmod = trcmod;
4040
4041        sfp->cbfn = NULL;
4042        sfp->cbarg = NULL;
4043        sfp->sfpmem = NULL;
4044        sfp->lock = 0;
4045        sfp->data_valid = 0;
4046        sfp->state = BFA_SFP_STATE_INIT;
4047        sfp->state_query_lock = 0;
4048        sfp->state_query_cbfn = NULL;
4049        sfp->state_query_cbarg = NULL;
4050        sfp->media = NULL;
4051        sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
4052        sfp->is_elb = BFA_FALSE;
4053
4054        bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
4055        bfa_q_qe_init(&sfp->ioc_notify);
4056        bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
4057        list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
4058}
4059
4060/*
4061 *      Claim Memory for SFP
4062 */
4063void
4064bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
4065{
4066        sfp->dbuf_kva   = dm_kva;
4067        sfp->dbuf_pa    = dm_pa;
4068        memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
4069
4070        dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4071        dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4072}
4073
4074/*
4075 * Show SFP eeprom content
4076 *
4077 * @param[in] sfp   - bfa sfp module
4078 *
4079 * @param[out] sfpmem - sfp eeprom data
4080 *
4081 */
4082bfa_status_t
4083bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
4084                bfa_cb_sfp_t cbfn, void *cbarg)
4085{
4086
4087        if (!bfa_ioc_is_operational(sfp->ioc)) {
4088                bfa_trc(sfp, 0);
4089                return BFA_STATUS_IOC_NON_OP;
4090        }
4091
4092        if (sfp->lock) {
4093                bfa_trc(sfp, 0);
4094                return BFA_STATUS_DEVBUSY;
4095        }
4096
4097        sfp->cbfn = cbfn;
4098        sfp->cbarg = cbarg;
4099        sfp->sfpmem = sfpmem;
4100
4101        bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
4102        return BFA_STATUS_OK;
4103}
4104
4105/*
4106 * Return SFP Media type
4107 *
4108 * @param[in] sfp   - bfa sfp module
4109 *
4110 * @param[out] media - port speed from user
4111 *
4112 */
4113bfa_status_t
4114bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
4115                bfa_cb_sfp_t cbfn, void *cbarg)
4116{
4117        if (!bfa_ioc_is_operational(sfp->ioc)) {
4118                bfa_trc(sfp, 0);
4119                return BFA_STATUS_IOC_NON_OP;
4120        }
4121
4122        sfp->media = media;
4123        if (sfp->state == BFA_SFP_STATE_INIT) {
4124                if (sfp->state_query_lock) {
4125                        bfa_trc(sfp, 0);
4126                        return BFA_STATUS_DEVBUSY;
4127                } else {
4128                        sfp->state_query_cbfn = cbfn;
4129                        sfp->state_query_cbarg = cbarg;
4130                        bfa_sfp_state_query(sfp);
4131                        return BFA_STATUS_SFP_NOT_READY;
4132                }
4133        }
4134
4135        bfa_sfp_media_get(sfp);
4136        return BFA_STATUS_OK;
4137}
4138
4139/*
4140 * Check if user set port speed is allowed by the SFP
4141 *
4142 * @param[in] sfp   - bfa sfp module
4143 * @param[in] portspeed - port speed from user
4144 *
4145 */
4146bfa_status_t
4147bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
4148                bfa_cb_sfp_t cbfn, void *cbarg)
4149{
4150        WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
4151
4152        if (!bfa_ioc_is_operational(sfp->ioc))
4153                return BFA_STATUS_IOC_NON_OP;
4154
4155        /* For Mezz card, all speed is allowed */
4156        if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
4157                return BFA_STATUS_OK;
4158
4159        /* Check SFP state */
4160        sfp->portspeed = portspeed;
4161        if (sfp->state == BFA_SFP_STATE_INIT) {
4162                if (sfp->state_query_lock) {
4163                        bfa_trc(sfp, 0);
4164                        return BFA_STATUS_DEVBUSY;
4165                } else {
4166                        sfp->state_query_cbfn = cbfn;
4167                        sfp->state_query_cbarg = cbarg;
4168                        bfa_sfp_state_query(sfp);
4169                        return BFA_STATUS_SFP_NOT_READY;
4170                }
4171        }
4172
4173        if (sfp->state == BFA_SFP_STATE_REMOVED ||
4174            sfp->state == BFA_SFP_STATE_FAILED) {
4175                bfa_trc(sfp, sfp->state);
4176                return BFA_STATUS_NO_SFP_DEV;
4177        }
4178
4179        if (sfp->state == BFA_SFP_STATE_INSERTED) {
4180                bfa_trc(sfp, sfp->state);
4181                return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
4182        }
4183
4184        /* For eloopback, all speed is allowed */
4185        if (sfp->is_elb)
4186                return BFA_STATUS_OK;
4187
4188        return bfa_sfp_speed_valid(sfp, portspeed);
4189}
4190
4191/*
4192 *      Flash module specific
4193 */
4194
4195/*
4196 * FLASH DMA buffer should be big enough to hold both MFG block and
4197 * asic block(64k) at the same time and also should be 2k aligned to
4198 * avoid write segement to cross sector boundary.
4199 */
4200#define BFA_FLASH_SEG_SZ        2048
4201#define BFA_FLASH_DMA_BUF_SZ    \
4202        BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4203
4204static void
4205bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4206                        int inst, int type)
4207{
4208        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4209        struct bfa_aen_entry_s  *aen_entry;
4210
4211        bfad_get_aen_entry(bfad, aen_entry);
4212        if (!aen_entry)
4213                return;
4214
4215        aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4216        aen_entry->aen_data.audit.partition_inst = inst;
4217        aen_entry->aen_data.audit.partition_type = type;
4218
4219        /* Send the AEN notification */
4220        bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4221                                  BFA_AEN_CAT_AUDIT, event);
4222}
4223
4224static void
4225bfa_flash_cb(struct bfa_flash_s *flash)
4226{
4227        flash->op_busy = 0;
4228        if (flash->cbfn)
4229                flash->cbfn(flash->cbarg, flash->status);
4230}
4231
4232static void
4233bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4234{
4235        struct bfa_flash_s      *flash = cbarg;
4236
4237        bfa_trc(flash, event);
4238        switch (event) {
4239        case BFA_IOC_E_DISABLED:
4240        case BFA_IOC_E_FAILED:
4241                if (flash->op_busy) {
4242                        flash->status = BFA_STATUS_IOC_FAILURE;
4243                        flash->cbfn(flash->cbarg, flash->status);
4244                        flash->op_busy = 0;
4245                }
4246                break;
4247
4248        default:
4249                break;
4250        }
4251}
4252
4253/*
4254 * Send flash attribute query request.
4255 *
4256 * @param[in] cbarg - callback argument
4257 */
4258static void
4259bfa_flash_query_send(void *cbarg)
4260{
4261        struct bfa_flash_s *flash = cbarg;
4262        struct bfi_flash_query_req_s *msg =
4263                        (struct bfi_flash_query_req_s *) flash->mb.msg;
4264
4265        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4266                bfa_ioc_portid(flash->ioc));
4267        bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4268                flash->dbuf_pa);
4269        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4270}
4271
4272/*
4273 * Send flash write request.
4274 *
4275 * @param[in] cbarg - callback argument
4276 */
4277static void
4278bfa_flash_write_send(struct bfa_flash_s *flash)
4279{
4280        struct bfi_flash_write_req_s *msg =
4281                        (struct bfi_flash_write_req_s *) flash->mb.msg;
4282        u32     len;
4283
4284        msg->type = be32_to_cpu(flash->type);
4285        msg->instance = flash->instance;
4286        msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4287        len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4288                flash->residue : BFA_FLASH_DMA_BUF_SZ;
4289        msg->length = be32_to_cpu(len);
4290
4291        /* indicate if it's the last msg of the whole write operation */
4292        msg->last = (len == flash->residue) ? 1 : 0;
4293
4294        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4295                        bfa_ioc_portid(flash->ioc));
4296        bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4297        memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4298        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4299
4300        flash->residue -= len;
4301        flash->offset += len;
4302}
4303
4304/*
4305 * Send flash read request.
4306 *
4307 * @param[in] cbarg - callback argument
4308 */
4309static void
4310bfa_flash_read_send(void *cbarg)
4311{
4312        struct bfa_flash_s *flash = cbarg;
4313        struct bfi_flash_read_req_s *msg =
4314                        (struct bfi_flash_read_req_s *) flash->mb.msg;
4315        u32     len;
4316
4317        msg->type = be32_to_cpu(flash->type);
4318        msg->instance = flash->instance;
4319        msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4320        len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4321                        flash->residue : BFA_FLASH_DMA_BUF_SZ;
4322        msg->length = be32_to_cpu(len);
4323        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4324                bfa_ioc_portid(flash->ioc));
4325        bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4326        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4327}
4328
4329/*
4330 * Send flash erase request.
4331 *
4332 * @param[in] cbarg - callback argument
4333 */
4334static void
4335bfa_flash_erase_send(void *cbarg)
4336{
4337        struct bfa_flash_s *flash = cbarg;
4338        struct bfi_flash_erase_req_s *msg =
4339                        (struct bfi_flash_erase_req_s *) flash->mb.msg;
4340
4341        msg->type = be32_to_cpu(flash->type);
4342        msg->instance = flash->instance;
4343        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4344                        bfa_ioc_portid(flash->ioc));
4345        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4346}
4347
4348/*
4349 * Process flash response messages upon receiving interrupts.
4350 *
4351 * @param[in] flasharg - flash structure
4352 * @param[in] msg - message structure
4353 */
4354static void
4355bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4356{
4357        struct bfa_flash_s *flash = flasharg;
4358        u32     status;
4359
4360        union {
4361                struct bfi_flash_query_rsp_s *query;
4362                struct bfi_flash_erase_rsp_s *erase;
4363                struct bfi_flash_write_rsp_s *write;
4364                struct bfi_flash_read_rsp_s *read;
4365                struct bfi_flash_event_s *event;
4366                struct bfi_mbmsg_s   *msg;
4367        } m;
4368
4369        m.msg = msg;
4370        bfa_trc(flash, msg->mh.msg_id);
4371
4372        if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4373                /* receiving response after ioc failure */
4374                bfa_trc(flash, 0x9999);
4375                return;
4376        }
4377
4378        switch (msg->mh.msg_id) {
4379        case BFI_FLASH_I2H_QUERY_RSP:
4380                status = be32_to_cpu(m.query->status);
4381                bfa_trc(flash, status);
4382                if (status == BFA_STATUS_OK) {
4383                        u32     i;
4384                        struct bfa_flash_attr_s *attr, *f;
4385
4386                        attr = (struct bfa_flash_attr_s *) flash->ubuf;
4387                        f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4388                        attr->status = be32_to_cpu(f->status);
4389                        attr->npart = be32_to_cpu(f->npart);
4390                        bfa_trc(flash, attr->status);
4391                        bfa_trc(flash, attr->npart);
4392                        for (i = 0; i < attr->npart; i++) {
4393                                attr->part[i].part_type =
4394                                        be32_to_cpu(f->part[i].part_type);
4395                                attr->part[i].part_instance =
4396                                        be32_to_cpu(f->part[i].part_instance);
4397                                attr->part[i].part_off =
4398                                        be32_to_cpu(f->part[i].part_off);
4399                                attr->part[i].part_size =
4400                                        be32_to_cpu(f->part[i].part_size);
4401                                attr->part[i].part_len =
4402                                        be32_to_cpu(f->part[i].part_len);
4403                                attr->part[i].part_status =
4404                                        be32_to_cpu(f->part[i].part_status);
4405                        }
4406                }
4407                flash->status = status;
4408                bfa_flash_cb(flash);
4409                break;
4410        case BFI_FLASH_I2H_ERASE_RSP:
4411                status = be32_to_cpu(m.erase->status);
4412                bfa_trc(flash, status);
4413                flash->status = status;
4414                bfa_flash_cb(flash);
4415                break;
4416        case BFI_FLASH_I2H_WRITE_RSP:
4417                status = be32_to_cpu(m.write->status);
4418                bfa_trc(flash, status);
4419                if (status != BFA_STATUS_OK || flash->residue == 0) {
4420                        flash->status = status;
4421                        bfa_flash_cb(flash);
4422                } else {
4423                        bfa_trc(flash, flash->offset);
4424                        bfa_flash_write_send(flash);
4425                }
4426                break;
4427        case BFI_FLASH_I2H_READ_RSP:
4428                status = be32_to_cpu(m.read->status);
4429                bfa_trc(flash, status);
4430                if (status != BFA_STATUS_OK) {
4431                        flash->status = status;
4432                        bfa_flash_cb(flash);
4433                } else {
4434                        u32 len = be32_to_cpu(m.read->length);
4435                        bfa_trc(flash, flash->offset);
4436                        bfa_trc(flash, len);
4437                        memcpy(flash->ubuf + flash->offset,
4438                                flash->dbuf_kva, len);
4439                        flash->residue -= len;
4440                        flash->offset += len;
4441                        if (flash->residue == 0) {
4442                                flash->status = status;
4443                                bfa_flash_cb(flash);
4444                        } else
4445                                bfa_flash_read_send(flash);
4446                }
4447                break;
4448        case BFI_FLASH_I2H_BOOT_VER_RSP:
4449                break;
4450        case BFI_FLASH_I2H_EVENT:
4451                status = be32_to_cpu(m.event->status);
4452                bfa_trc(flash, status);
4453                if (status == BFA_STATUS_BAD_FWCFG)
4454                        bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4455                else if (status == BFA_STATUS_INVALID_VENDOR) {
4456                        u32 param;
4457                        param = be32_to_cpu(m.event->param);
4458                        bfa_trc(flash, param);
4459                        bfa_ioc_aen_post(flash->ioc,
4460                                BFA_IOC_AEN_INVALID_VENDOR);
4461                }
4462                break;
4463
4464        default:
4465                WARN_ON(1);
4466        }
4467}
4468
4469/*
4470 * Flash memory info API.
4471 *
4472 * @param[in] mincfg - minimal cfg variable
4473 */
4474u32
4475bfa_flash_meminfo(bfa_boolean_t mincfg)
4476{
4477        /* min driver doesn't need flash */
4478        if (mincfg)
4479                return 0;
4480        return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4481}
4482
4483/*
4484 * Flash attach API.
4485 *
4486 * @param[in] flash - flash structure
4487 * @param[in] ioc  - ioc structure
4488 * @param[in] dev  - device structure
4489 * @param[in] trcmod - trace module
4490 * @param[in] logmod - log module
4491 */
4492void
4493bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4494                struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4495{
4496        flash->ioc = ioc;
4497        flash->trcmod = trcmod;
4498        flash->cbfn = NULL;
4499        flash->cbarg = NULL;
4500        flash->op_busy = 0;
4501
4502        bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4503        bfa_q_qe_init(&flash->ioc_notify);
4504        bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4505        list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4506
4507        /* min driver doesn't need flash */
4508        if (mincfg) {
4509                flash->dbuf_kva = NULL;
4510                flash->dbuf_pa = 0;
4511        }
4512}
4513
4514/*
4515 * Claim memory for flash
4516 *
4517 * @param[in] flash - flash structure
4518 * @param[in] dm_kva - pointer to virtual memory address
4519 * @param[in] dm_pa - physical memory address
4520 * @param[in] mincfg - minimal cfg variable
4521 */
4522void
4523bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4524                bfa_boolean_t mincfg)
4525{
4526        if (mincfg)
4527                return;
4528
4529        flash->dbuf_kva = dm_kva;
4530        flash->dbuf_pa = dm_pa;
4531        memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4532        dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4533        dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4534}
4535
4536/*
4537 * Get flash attribute.
4538 *
4539 * @param[in] flash - flash structure
4540 * @param[in] attr - flash attribute structure
4541 * @param[in] cbfn - callback function
4542 * @param[in] cbarg - callback argument
4543 *
4544 * Return status.
4545 */
4546bfa_status_t
4547bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4548                bfa_cb_flash_t cbfn, void *cbarg)
4549{
4550        bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4551
4552        if (!bfa_ioc_is_operational(flash->ioc))
4553                return BFA_STATUS_IOC_NON_OP;
4554
4555        if (flash->op_busy) {
4556                bfa_trc(flash, flash->op_busy);
4557                return BFA_STATUS_DEVBUSY;
4558        }
4559
4560        flash->op_busy = 1;
4561        flash->cbfn = cbfn;
4562        flash->cbarg = cbarg;
4563        flash->ubuf = (u8 *) attr;
4564        bfa_flash_query_send(flash);
4565
4566        return BFA_STATUS_OK;
4567}
4568
4569/*
4570 * Erase flash partition.
4571 *
4572 * @param[in] flash - flash structure
4573 * @param[in] type - flash partition type
4574 * @param[in] instance - flash partition instance
4575 * @param[in] cbfn - callback function
4576 * @param[in] cbarg - callback argument
4577 *
4578 * Return status.
4579 */
4580bfa_status_t
4581bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4582                u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4583{
4584        bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4585        bfa_trc(flash, type);
4586        bfa_trc(flash, instance);
4587
4588        if (!bfa_ioc_is_operational(flash->ioc))
4589                return BFA_STATUS_IOC_NON_OP;
4590
4591        if (flash->op_busy) {
4592                bfa_trc(flash, flash->op_busy);
4593                return BFA_STATUS_DEVBUSY;
4594        }
4595
4596        flash->op_busy = 1;
4597        flash->cbfn = cbfn;
4598        flash->cbarg = cbarg;
4599        flash->type = type;
4600        flash->instance = instance;
4601
4602        bfa_flash_erase_send(flash);
4603        bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4604                                instance, type);
4605        return BFA_STATUS_OK;
4606}
4607
4608/*
4609 * Update flash partition.
4610 *
4611 * @param[in] flash - flash structure
4612 * @param[in] type - flash partition type
4613 * @param[in] instance - flash partition instance
4614 * @param[in] buf - update data buffer
4615 * @param[in] len - data buffer length
4616 * @param[in] offset - offset relative to the partition starting address
4617 * @param[in] cbfn - callback function
4618 * @param[in] cbarg - callback argument
4619 *
4620 * Return status.
4621 */
4622bfa_status_t
4623bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4624                u8 instance, void *buf, u32 len, u32 offset,
4625                bfa_cb_flash_t cbfn, void *cbarg)
4626{
4627        bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4628        bfa_trc(flash, type);
4629        bfa_trc(flash, instance);
4630        bfa_trc(flash, len);
4631        bfa_trc(flash, offset);
4632
4633        if (!bfa_ioc_is_operational(flash->ioc))
4634                return BFA_STATUS_IOC_NON_OP;
4635
4636        /*
4637         * 'len' must be in word (4-byte) boundary
4638         * 'offset' must be in sector (16kb) boundary
4639         */
4640        if (!len || (len & 0x03) || (offset & 0x00003FFF))
4641                return BFA_STATUS_FLASH_BAD_LEN;
4642
4643        if (type == BFA_FLASH_PART_MFG)
4644                return BFA_STATUS_EINVAL;
4645
4646        if (flash->op_busy) {
4647                bfa_trc(flash, flash->op_busy);
4648                return BFA_STATUS_DEVBUSY;
4649        }
4650
4651        flash->op_busy = 1;
4652        flash->cbfn = cbfn;
4653        flash->cbarg = cbarg;
4654        flash->type = type;
4655        flash->instance = instance;
4656        flash->residue = len;
4657        flash->offset = 0;
4658        flash->addr_off = offset;
4659        flash->ubuf = buf;
4660
4661        bfa_flash_write_send(flash);
4662        return BFA_STATUS_OK;
4663}
4664
4665/*
4666 * Read flash partition.
4667 *
4668 * @param[in] flash - flash structure
4669 * @param[in] type - flash partition type
4670 * @param[in] instance - flash partition instance
4671 * @param[in] buf - read data buffer
4672 * @param[in] len - data buffer length
4673 * @param[in] offset - offset relative to the partition starting address
4674 * @param[in] cbfn - callback function
4675 * @param[in] cbarg - callback argument
4676 *
4677 * Return status.
4678 */
4679bfa_status_t
4680bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4681                u8 instance, void *buf, u32 len, u32 offset,
4682                bfa_cb_flash_t cbfn, void *cbarg)
4683{
4684        bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4685        bfa_trc(flash, type);
4686        bfa_trc(flash, instance);
4687        bfa_trc(flash, len);
4688        bfa_trc(flash, offset);
4689
4690        if (!bfa_ioc_is_operational(flash->ioc))
4691                return BFA_STATUS_IOC_NON_OP;
4692
4693        /*
4694         * 'len' must be in word (4-byte) boundary
4695         * 'offset' must be in sector (16kb) boundary
4696         */
4697        if (!len || (len & 0x03) || (offset & 0x00003FFF))
4698                return BFA_STATUS_FLASH_BAD_LEN;
4699
4700        if (flash->op_busy) {
4701                bfa_trc(flash, flash->op_busy);
4702                return BFA_STATUS_DEVBUSY;
4703        }
4704
4705        flash->op_busy = 1;
4706        flash->cbfn = cbfn;
4707        flash->cbarg = cbarg;
4708        flash->type = type;
4709        flash->instance = instance;
4710        flash->residue = len;
4711        flash->offset = 0;
4712        flash->addr_off = offset;
4713        flash->ubuf = buf;
4714        bfa_flash_read_send(flash);
4715
4716        return BFA_STATUS_OK;
4717}
4718
4719/*
4720 *      DIAG module specific
4721 */
4722
4723#define BFA_DIAG_MEMTEST_TOV    50000   /* memtest timeout in msec */
4724#define CT2_BFA_DIAG_MEMTEST_TOV        (9*30*1000)  /* 4.5 min */
4725
4726/* IOC event handler */
4727static void
4728bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4729{
4730        struct bfa_diag_s *diag = diag_arg;
4731
4732        bfa_trc(diag, event);
4733        bfa_trc(diag, diag->block);
4734        bfa_trc(diag, diag->fwping.lock);
4735        bfa_trc(diag, diag->tsensor.lock);
4736
4737        switch (event) {
4738        case BFA_IOC_E_DISABLED:
4739        case BFA_IOC_E_FAILED:
4740                if (diag->fwping.lock) {
4741                        diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4742                        diag->fwping.cbfn(diag->fwping.cbarg,
4743                                        diag->fwping.status);
4744                        diag->fwping.lock = 0;
4745                }
4746
4747                if (diag->tsensor.lock) {
4748                        diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4749                        diag->tsensor.cbfn(diag->tsensor.cbarg,
4750                                           diag->tsensor.status);
4751                        diag->tsensor.lock = 0;
4752                }
4753
4754                if (diag->block) {
4755                        if (diag->timer_active) {
4756                                bfa_timer_stop(&diag->timer);
4757                                diag->timer_active = 0;
4758                        }
4759
4760                        diag->status = BFA_STATUS_IOC_FAILURE;
4761                        diag->cbfn(diag->cbarg, diag->status);
4762                        diag->block = 0;
4763                }
4764                break;
4765
4766        default:
4767                break;
4768        }
4769}
4770
4771static void
4772bfa_diag_memtest_done(void *cbarg)
4773{
4774        struct bfa_diag_s *diag = cbarg;
4775        struct bfa_ioc_s  *ioc = diag->ioc;
4776        struct bfa_diag_memtest_result *res = diag->result;
4777        u32     loff = BFI_BOOT_MEMTEST_RES_ADDR;
4778        u32     pgnum, pgoff, i;
4779
4780        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4781        pgoff = PSS_SMEM_PGOFF(loff);
4782
4783        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4784
4785        for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4786                         sizeof(u32)); i++) {
4787                /* read test result from smem */
4788                *((u32 *) res + i) =
4789                        bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4790                loff += sizeof(u32);
4791        }
4792
4793        /* Reset IOC fwstates to BFI_IOC_UNINIT */
4794        bfa_ioc_reset_fwstate(ioc);
4795
4796        res->status = swab32(res->status);
4797        bfa_trc(diag, res->status);
4798
4799        if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4800                diag->status = BFA_STATUS_OK;
4801        else {
4802                diag->status = BFA_STATUS_MEMTEST_FAILED;
4803                res->addr = swab32(res->addr);
4804                res->exp = swab32(res->exp);
4805                res->act = swab32(res->act);
4806                res->err_status = swab32(res->err_status);
4807                res->err_status1 = swab32(res->err_status1);
4808                res->err_addr = swab32(res->err_addr);
4809                bfa_trc(diag, res->addr);
4810                bfa_trc(diag, res->exp);
4811                bfa_trc(diag, res->act);
4812                bfa_trc(diag, res->err_status);
4813                bfa_trc(diag, res->err_status1);
4814                bfa_trc(diag, res->err_addr);
4815        }
4816        diag->timer_active = 0;
4817        diag->cbfn(diag->cbarg, diag->status);
4818        diag->block = 0;
4819}
4820
4821/*
4822 * Firmware ping
4823 */
4824
4825/*
4826 * Perform DMA test directly
4827 */
4828static void
4829diag_fwping_send(struct bfa_diag_s *diag)
4830{
4831        struct bfi_diag_fwping_req_s *fwping_req;
4832        u32     i;
4833
4834        bfa_trc(diag, diag->fwping.dbuf_pa);
4835
4836        /* fill DMA area with pattern */
4837        for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4838                *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4839
4840        /* Fill mbox msg */
4841        fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4842
4843        /* Setup SG list */
4844        bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4845                        diag->fwping.dbuf_pa);
4846        /* Set up dma count */
4847        fwping_req->count = cpu_to_be32(diag->fwping.count);
4848        /* Set up data pattern */
4849        fwping_req->data = diag->fwping.data;
4850
4851        /* build host command */
4852        bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4853                bfa_ioc_portid(diag->ioc));
4854
4855        /* send mbox cmd */
4856        bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4857}
4858
4859static void
4860diag_fwping_comp(struct bfa_diag_s *diag,
4861                 struct bfi_diag_fwping_rsp_s *diag_rsp)
4862{
4863        u32     rsp_data = diag_rsp->data;
4864        u8      rsp_dma_status = diag_rsp->dma_status;
4865
4866        bfa_trc(diag, rsp_data);
4867        bfa_trc(diag, rsp_dma_status);
4868
4869        if (rsp_dma_status == BFA_STATUS_OK) {
4870                u32     i, pat;
4871                pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4872                        diag->fwping.data;
4873                /* Check mbox data */
4874                if (diag->fwping.data != rsp_data) {
4875                        bfa_trc(diag, rsp_data);
4876                        diag->fwping.result->dmastatus =
4877                                        BFA_STATUS_DATACORRUPTED;
4878                        diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4879                        diag->fwping.cbfn(diag->fwping.cbarg,
4880                                        diag->fwping.status);
4881                        diag->fwping.lock = 0;
4882                        return;
4883                }
4884                /* Check dma pattern */
4885                for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4886                        if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4887                                bfa_trc(diag, i);
4888                                bfa_trc(diag, pat);
4889                                bfa_trc(diag,
4890                                        *((u32 *)diag->fwping.dbuf_kva + i));
4891                                diag->fwping.result->dmastatus =
4892                                                BFA_STATUS_DATACORRUPTED;
4893                                diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4894                                diag->fwping.cbfn(diag->fwping.cbarg,
4895                                                diag->fwping.status);
4896                                diag->fwping.lock = 0;
4897                                return;
4898                        }
4899                }
4900                diag->fwping.result->dmastatus = BFA_STATUS_OK;
4901                diag->fwping.status = BFA_STATUS_OK;
4902                diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4903                diag->fwping.lock = 0;
4904        } else {
4905                diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4906                diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4907                diag->fwping.lock = 0;
4908        }
4909}
4910
4911/*
4912 * Temperature Sensor
4913 */
4914
4915static void
4916diag_tempsensor_send(struct bfa_diag_s *diag)
4917{
4918        struct bfi_diag_ts_req_s *msg;
4919
4920        msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4921        bfa_trc(diag, msg->temp);
4922        /* build host command */
4923        bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4924                bfa_ioc_portid(diag->ioc));
4925        /* send mbox cmd */
4926        bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4927}
4928
4929static void
4930diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4931{
4932        if (!diag->tsensor.lock) {
4933                /* receiving response after ioc failure */
4934                bfa_trc(diag, diag->tsensor.lock);
4935                return;
4936        }
4937
4938        /*
4939         * ASIC junction tempsensor is a reg read operation
4940         * it will always return OK
4941         */
4942        diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4943        diag->tsensor.temp->ts_junc = rsp->ts_junc;
4944        diag->tsensor.temp->ts_brd = rsp->ts_brd;
4945
4946        if (rsp->ts_brd) {
4947                /* tsensor.temp->status is brd_temp status */
4948                diag->tsensor.temp->status = rsp->status;
4949                if (rsp->status == BFA_STATUS_OK) {
4950                        diag->tsensor.temp->brd_temp =
4951                                be16_to_cpu(rsp->brd_temp);
4952                } else
4953                        diag->tsensor.temp->brd_temp = 0;
4954        }
4955
4956        bfa_trc(diag, rsp->status);
4957        bfa_trc(diag, rsp->ts_junc);
4958        bfa_trc(diag, rsp->temp);
4959        bfa_trc(diag, rsp->ts_brd);
4960        bfa_trc(diag, rsp->brd_temp);
4961
4962        /* tsensor status is always good bcos we always have junction temp */
4963        diag->tsensor.status = BFA_STATUS_OK;
4964        diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4965        diag->tsensor.lock = 0;
4966}
4967
4968/*
4969 *      LED Test command
4970 */
4971static void
4972diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4973{
4974        struct bfi_diag_ledtest_req_s  *msg;
4975
4976        msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4977        /* build host command */
4978        bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4979                        bfa_ioc_portid(diag->ioc));
4980
4981        /*
4982         * convert the freq from N blinks per 10 sec to
4983         * crossbow ontime value. We do it here because division is need
4984         */
4985        if (ledtest->freq)
4986                ledtest->freq = 500 / ledtest->freq;
4987
4988        if (ledtest->freq == 0)
4989                ledtest->freq = 1;
4990
4991        bfa_trc(diag, ledtest->freq);
4992        /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4993        msg->cmd = (u8) ledtest->cmd;
4994        msg->color = (u8) ledtest->color;
4995        msg->portid = bfa_ioc_portid(diag->ioc);
4996        msg->led = ledtest->led;
4997        msg->freq = cpu_to_be16(ledtest->freq);
4998
4999        /* send mbox cmd */
5000        bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
5001}
5002
5003static void
5004diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
5005{
5006        bfa_trc(diag, diag->ledtest.lock);
5007        diag->ledtest.lock = BFA_FALSE;
5008        /* no bfa_cb_queue is needed because driver is not waiting */
5009}
5010
5011/*
5012 * Port beaconing
5013 */
5014static void
5015diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
5016{
5017        struct bfi_diag_portbeacon_req_s *msg;
5018
5019        msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
5020        /* build host command */
5021        bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
5022                bfa_ioc_portid(diag->ioc));
5023        msg->beacon = beacon;
5024        msg->period = cpu_to_be32(sec);
5025        /* send mbox cmd */
5026        bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
5027}
5028
5029static void
5030diag_portbeacon_comp(struct bfa_diag_s *diag)
5031{
5032        bfa_trc(diag, diag->beacon.state);
5033        diag->beacon.state = BFA_FALSE;
5034        if (diag->cbfn_beacon)
5035                diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
5036}
5037
5038/*
5039 *      Diag hmbox handler
5040 */
5041void
5042bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
5043{
5044        struct bfa_diag_s *diag = diagarg;
5045
5046        switch (msg->mh.msg_id) {
5047        case BFI_DIAG_I2H_PORTBEACON:
5048                diag_portbeacon_comp(diag);
5049                break;
5050        case BFI_DIAG_I2H_FWPING:
5051                diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
5052                break;
5053        case BFI_DIAG_I2H_TEMPSENSOR:
5054                diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
5055                break;
5056        case BFI_DIAG_I2H_LEDTEST:
5057                diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
5058                break;
5059        default:
5060                bfa_trc(diag, msg->mh.msg_id);
5061                WARN_ON(1);
5062        }
5063}
5064
5065/*
5066 * Gen RAM Test
5067 *
5068 *   @param[in] *diag           - diag data struct
5069 *   @param[in] *memtest        - mem test params input from upper layer,
5070 *   @param[in] pattern         - mem test pattern
5071 *   @param[in] *result         - mem test result
5072 *   @param[in] cbfn            - mem test callback functioin
5073 *   @param[in] cbarg           - callback functioin arg
5074 *
5075 *   @param[out]
5076 */
5077bfa_status_t
5078bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
5079                u32 pattern, struct bfa_diag_memtest_result *result,
5080                bfa_cb_diag_t cbfn, void *cbarg)
5081{
5082        u32     memtest_tov;
5083
5084        bfa_trc(diag, pattern);
5085
5086        if (!bfa_ioc_adapter_is_disabled(diag->ioc))
5087                return BFA_STATUS_ADAPTER_ENABLED;
5088
5089        /* check to see if there is another destructive diag cmd running */
5090        if (diag->block) {
5091                bfa_trc(diag, diag->block);
5092                return BFA_STATUS_DEVBUSY;
5093        } else
5094                diag->block = 1;
5095
5096        diag->result = result;
5097        diag->cbfn = cbfn;
5098        diag->cbarg = cbarg;
5099
5100        /* download memtest code and take LPU0 out of reset */
5101        bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
5102
5103        memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
5104                       CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
5105        bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
5106                        bfa_diag_memtest_done, diag, memtest_tov);
5107        diag->timer_active = 1;
5108        return BFA_STATUS_OK;
5109}
5110
5111/*
5112 * DIAG firmware ping command
5113 *
5114 *   @param[in] *diag           - diag data struct
5115 *   @param[in] cnt             - dma loop count for testing PCIE
5116 *   @param[in] data            - data pattern to pass in fw
5117 *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
5118 *   @param[in] cbfn            - callback function
5119 *   @param[in] *cbarg          - callback functioin arg
5120 *
5121 *   @param[out]
5122 */
5123bfa_status_t
5124bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
5125                struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
5126                void *cbarg)
5127{
5128        bfa_trc(diag, cnt);
5129        bfa_trc(diag, data);
5130
5131        if (!bfa_ioc_is_operational(diag->ioc))
5132                return BFA_STATUS_IOC_NON_OP;
5133
5134        if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
5135            ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
5136                return BFA_STATUS_CMD_NOTSUPP;
5137
5138        /* check to see if there is another destructive diag cmd running */
5139        if (diag->block || diag->fwping.lock) {
5140                bfa_trc(diag, diag->block);
5141                bfa_trc(diag, diag->fwping.lock);
5142                return BFA_STATUS_DEVBUSY;
5143        }
5144
5145        /* Initialization */
5146        diag->fwping.lock = 1;
5147        diag->fwping.cbfn = cbfn;
5148        diag->fwping.cbarg = cbarg;
5149        diag->fwping.result = result;
5150        diag->fwping.data = data;
5151        diag->fwping.count = cnt;
5152
5153        /* Init test results */
5154        diag->fwping.result->data = 0;
5155        diag->fwping.result->status = BFA_STATUS_OK;
5156
5157        /* kick off the first ping */
5158        diag_fwping_send(diag);
5159        return BFA_STATUS_OK;
5160}
5161
5162/*
5163 * Read Temperature Sensor
5164 *
5165 *   @param[in] *diag           - diag data struct
5166 *   @param[in] *result         - pt to bfa_diag_temp_t data struct
5167 *   @param[in] cbfn            - callback function
5168 *   @param[in] *cbarg          - callback functioin arg
5169 *
5170 *   @param[out]
5171 */
5172bfa_status_t
5173bfa_diag_tsensor_query(struct bfa_diag_s *diag,
5174                struct bfa_diag_results_tempsensor_s *result,
5175                bfa_cb_diag_t cbfn, void *cbarg)
5176{
5177        /* check to see if there is a destructive diag cmd running */
5178        if (diag->block || diag->tsensor.lock) {
5179                bfa_trc(diag, diag->block);
5180                bfa_trc(diag, diag->tsensor.lock);
5181                return BFA_STATUS_DEVBUSY;
5182        }
5183
5184        if (!bfa_ioc_is_operational(diag->ioc))
5185                return BFA_STATUS_IOC_NON_OP;
5186
5187        /* Init diag mod params */
5188        diag->tsensor.lock = 1;
5189        diag->tsensor.temp = result;
5190        diag->tsensor.cbfn = cbfn;
5191        diag->tsensor.cbarg = cbarg;
5192        diag->tsensor.status = BFA_STATUS_OK;
5193
5194        /* Send msg to fw */
5195        diag_tempsensor_send(diag);
5196
5197        return BFA_STATUS_OK;
5198}
5199
5200/*
5201 * LED Test command
5202 *
5203 *   @param[in] *diag           - diag data struct
5204 *   @param[in] *ledtest        - pt to ledtest data structure
5205 *
5206 *   @param[out]
5207 */
5208bfa_status_t
5209bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
5210{
5211        bfa_trc(diag, ledtest->cmd);
5212
5213        if (!bfa_ioc_is_operational(diag->ioc))
5214                return BFA_STATUS_IOC_NON_OP;
5215
5216        if (diag->beacon.state)
5217                return BFA_STATUS_BEACON_ON;
5218
5219        if (diag->ledtest.lock)
5220                return BFA_STATUS_LEDTEST_OP;
5221
5222        /* Send msg to fw */
5223        diag->ledtest.lock = BFA_TRUE;
5224        diag_ledtest_send(diag, ledtest);
5225
5226        return BFA_STATUS_OK;
5227}
5228
5229/*
5230 * Port beaconing command
5231 *
5232 *   @param[in] *diag           - diag data struct
5233 *   @param[in] beacon          - port beaconing 1:ON   0:OFF
5234 *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
5235 *   @param[in] sec             - beaconing duration in seconds
5236 *
5237 *   @param[out]
5238 */
5239bfa_status_t
5240bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5241                bfa_boolean_t link_e2e_beacon, uint32_t sec)
5242{
5243        bfa_trc(diag, beacon);
5244        bfa_trc(diag, link_e2e_beacon);
5245        bfa_trc(diag, sec);
5246
5247        if (!bfa_ioc_is_operational(diag->ioc))
5248                return BFA_STATUS_IOC_NON_OP;
5249
5250        if (diag->ledtest.lock)
5251                return BFA_STATUS_LEDTEST_OP;
5252
5253        if (diag->beacon.state && beacon)       /* beacon alread on */
5254                return BFA_STATUS_BEACON_ON;
5255
5256        diag->beacon.state      = beacon;
5257        diag->beacon.link_e2e   = link_e2e_beacon;
5258        if (diag->cbfn_beacon)
5259                diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5260
5261        /* Send msg to fw */
5262        diag_portbeacon_send(diag, beacon, sec);
5263
5264        return BFA_STATUS_OK;
5265}
5266
5267/*
5268 * Return DMA memory needed by diag module.
5269 */
5270u32
5271bfa_diag_meminfo(void)
5272{
5273        return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5274}
5275
5276/*
5277 *      Attach virtual and physical memory for Diag.
5278 */
5279void
5280bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5281        bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5282{
5283        diag->dev = dev;
5284        diag->ioc = ioc;
5285        diag->trcmod = trcmod;
5286
5287        diag->block = 0;
5288        diag->cbfn = NULL;
5289        diag->cbarg = NULL;
5290        diag->result = NULL;
5291        diag->cbfn_beacon = cbfn_beacon;
5292
5293        bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5294        bfa_q_qe_init(&diag->ioc_notify);
5295        bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5296        list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5297}
5298
5299void
5300bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5301{
5302        diag->fwping.dbuf_kva = dm_kva;
5303        diag->fwping.dbuf_pa = dm_pa;
5304        memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5305}
5306
5307/*
5308 *      PHY module specific
5309 */
5310#define BFA_PHY_DMA_BUF_SZ      0x02000         /* 8k dma buffer */
5311#define BFA_PHY_LOCK_STATUS     0x018878        /* phy semaphore status reg */
5312
5313static void
5314bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5315{
5316        int i, m = sz >> 2;
5317
5318        for (i = 0; i < m; i++)
5319                obuf[i] = be32_to_cpu(ibuf[i]);
5320}
5321
5322static bfa_boolean_t
5323bfa_phy_present(struct bfa_phy_s *phy)
5324{
5325        return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5326}
5327
5328static void
5329bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5330{
5331        struct bfa_phy_s *phy = cbarg;
5332
5333        bfa_trc(phy, event);
5334
5335        switch (event) {
5336        case BFA_IOC_E_DISABLED:
5337        case BFA_IOC_E_FAILED:
5338                if (phy->op_busy) {
5339                        phy->status = BFA_STATUS_IOC_FAILURE;
5340                        phy->cbfn(phy->cbarg, phy->status);
5341                        phy->op_busy = 0;
5342                }
5343                break;
5344
5345        default:
5346                break;
5347        }
5348}
5349
5350/*
5351 * Send phy attribute query request.
5352 *
5353 * @param[in] cbarg - callback argument
5354 */
5355static void
5356bfa_phy_query_send(void *cbarg)
5357{
5358        struct bfa_phy_s *phy = cbarg;
5359        struct bfi_phy_query_req_s *msg =
5360                        (struct bfi_phy_query_req_s *) phy->mb.msg;
5361
5362        msg->instance = phy->instance;
5363        bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5364                bfa_ioc_portid(phy->ioc));
5365        bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5366        bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5367}
5368
5369/*
5370 * Send phy write request.
5371 *
5372 * @param[in] cbarg - callback argument
5373 */
5374static void
5375bfa_phy_write_send(void *cbarg)
5376{
5377        struct bfa_phy_s *phy = cbarg;
5378        struct bfi_phy_write_req_s *msg =
5379                        (struct bfi_phy_write_req_s *) phy->mb.msg;
5380        u32     len;
5381        u16     *buf, *dbuf;
5382        int     i, sz;
5383
5384        msg->instance = phy->instance;
5385        msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5386        len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5387                        phy->residue : BFA_PHY_DMA_BUF_SZ;
5388        msg->length = cpu_to_be32(len);
5389
5390        /* indicate if it's the last msg of the whole write operation */
5391        msg->last = (len == phy->residue) ? 1 : 0;
5392
5393        bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5394                bfa_ioc_portid(phy->ioc));
5395        bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5396
5397        buf = (u16 *) (phy->ubuf + phy->offset);
5398        dbuf = (u16 *)phy->dbuf_kva;
5399        sz = len >> 1;
5400        for (i = 0; i < sz; i++)
5401                buf[i] = cpu_to_be16(dbuf[i]);
5402
5403        bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5404
5405        phy->residue -= len;
5406        phy->offset += len;
5407}
5408
5409/*
5410 * Send phy read request.
5411 *
5412 * @param[in] cbarg - callback argument
5413 */
5414static void
5415bfa_phy_read_send(void *cbarg)
5416{
5417        struct bfa_phy_s *phy = cbarg;
5418        struct bfi_phy_read_req_s *msg =
5419                        (struct bfi_phy_read_req_s *) phy->mb.msg;
5420        u32     len;
5421
5422        msg->instance = phy->instance;
5423        msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5424        len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5425                        phy->residue : BFA_PHY_DMA_BUF_SZ;
5426        msg->length = cpu_to_be32(len);
5427        bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5428                bfa_ioc_portid(phy->ioc));
5429        bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5430        bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5431}
5432
5433/*
5434 * Send phy stats request.
5435 *
5436 * @param[in] cbarg - callback argument
5437 */
5438static void
5439bfa_phy_stats_send(void *cbarg)
5440{
5441        struct bfa_phy_s *phy = cbarg;
5442        struct bfi_phy_stats_req_s *msg =
5443                        (struct bfi_phy_stats_req_s *) phy->mb.msg;
5444
5445        msg->instance = phy->instance;
5446        bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5447                bfa_ioc_portid(phy->ioc));
5448        bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5449        bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5450}
5451
5452/*
5453 * Flash memory info API.
5454 *
5455 * @param[in] mincfg - minimal cfg variable
5456 */
5457u32
5458bfa_phy_meminfo(bfa_boolean_t mincfg)
5459{
5460        /* min driver doesn't need phy */
5461        if (mincfg)
5462                return 0;
5463
5464        return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5465}
5466
5467/*
5468 * Flash attach API.
5469 *
5470 * @param[in] phy - phy structure
5471 * @param[in] ioc  - ioc structure
5472 * @param[in] dev  - device structure
5473 * @param[in] trcmod - trace module
5474 * @param[in] logmod - log module
5475 */
5476void
5477bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5478                struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5479{
5480        phy->ioc = ioc;
5481        phy->trcmod = trcmod;
5482        phy->cbfn = NULL;
5483        phy->cbarg = NULL;
5484        phy->op_busy = 0;
5485
5486        bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5487        bfa_q_qe_init(&phy->ioc_notify);
5488        bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5489        list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5490
5491        /* min driver doesn't need phy */
5492        if (mincfg) {
5493                phy->dbuf_kva = NULL;
5494                phy->dbuf_pa = 0;
5495        }
5496}
5497
5498/*
5499 * Claim memory for phy
5500 *
5501 * @param[in] phy - phy structure
5502 * @param[in] dm_kva - pointer to virtual memory address
5503 * @param[in] dm_pa - physical memory address
5504 * @param[in] mincfg - minimal cfg variable
5505 */
5506void
5507bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5508                bfa_boolean_t mincfg)
5509{
5510        if (mincfg)
5511                return;
5512
5513        phy->dbuf_kva = dm_kva;
5514        phy->dbuf_pa = dm_pa;
5515        memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5516        dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5517        dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5518}
5519
5520bfa_boolean_t
5521bfa_phy_busy(struct bfa_ioc_s *ioc)
5522{
5523        void __iomem    *rb;
5524
5525        rb = bfa_ioc_bar0(ioc);
5526        return readl(rb + BFA_PHY_LOCK_STATUS);
5527}
5528
5529/*
5530 * Get phy attribute.
5531 *
5532 * @param[in] phy - phy structure
5533 * @param[in] attr - phy attribute structure
5534 * @param[in] cbfn - callback function
5535 * @param[in] cbarg - callback argument
5536 *
5537 * Return status.
5538 */
5539bfa_status_t
5540bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5541                struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5542{
5543        bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5544        bfa_trc(phy, instance);
5545
5546        if (!bfa_phy_present(phy))
5547                return BFA_STATUS_PHY_NOT_PRESENT;
5548
5549        if (!bfa_ioc_is_operational(phy->ioc))
5550                return BFA_STATUS_IOC_NON_OP;
5551
5552        if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5553                bfa_trc(phy, phy->op_busy);
5554                return BFA_STATUS_DEVBUSY;
5555        }
5556
5557        phy->op_busy = 1;
5558        phy->cbfn = cbfn;
5559        phy->cbarg = cbarg;
5560        phy->instance = instance;
5561        phy->ubuf = (uint8_t *) attr;
5562        bfa_phy_query_send(phy);
5563
5564        return BFA_STATUS_OK;
5565}
5566
5567/*
5568 * Get phy stats.
5569 *
5570 * @param[in] phy - phy structure
5571 * @param[in] instance - phy image instance
5572 * @param[in] stats - pointer to phy stats
5573 * @param[in] cbfn - callback function
5574 * @param[in] cbarg - callback argument
5575 *
5576 * Return status.
5577 */
5578bfa_status_t
5579bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5580                struct bfa_phy_stats_s *stats,
5581                bfa_cb_phy_t cbfn, void *cbarg)
5582{
5583        bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5584        bfa_trc(phy, instance);
5585
5586        if (!bfa_phy_present(phy))
5587                return BFA_STATUS_PHY_NOT_PRESENT;
5588
5589        if (!bfa_ioc_is_operational(phy->ioc))
5590                return BFA_STATUS_IOC_NON_OP;
5591
5592        if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5593                bfa_trc(phy, phy->op_busy);
5594                return BFA_STATUS_DEVBUSY;
5595        }
5596
5597        phy->op_busy = 1;
5598        phy->cbfn = cbfn;
5599        phy->cbarg = cbarg;
5600        phy->instance = instance;
5601        phy->ubuf = (u8 *) stats;
5602        bfa_phy_stats_send(phy);
5603
5604        return BFA_STATUS_OK;
5605}
5606
5607/*
5608 * Update phy image.
5609 *
5610 * @param[in] phy - phy structure
5611 * @param[in] instance - phy image instance
5612 * @param[in] buf - update data buffer
5613 * @param[in] len - data buffer length
5614 * @param[in] offset - offset relative to starting address
5615 * @param[in] cbfn - callback function
5616 * @param[in] cbarg - callback argument
5617 *
5618 * Return status.
5619 */
5620bfa_status_t
5621bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5622                void *buf, u32 len, u32 offset,
5623                bfa_cb_phy_t cbfn, void *cbarg)
5624{
5625        bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5626        bfa_trc(phy, instance);
5627        bfa_trc(phy, len);
5628        bfa_trc(phy, offset);
5629
5630        if (!bfa_phy_present(phy))
5631                return BFA_STATUS_PHY_NOT_PRESENT;
5632
5633        if (!bfa_ioc_is_operational(phy->ioc))
5634                return BFA_STATUS_IOC_NON_OP;
5635
5636        /* 'len' must be in word (4-byte) boundary */
5637        if (!len || (len & 0x03))
5638                return BFA_STATUS_FAILED;
5639
5640        if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5641                bfa_trc(phy, phy->op_busy);
5642                return BFA_STATUS_DEVBUSY;
5643        }
5644
5645        phy->op_busy = 1;
5646        phy->cbfn = cbfn;
5647        phy->cbarg = cbarg;
5648        phy->instance = instance;
5649        phy->residue = len;
5650        phy->offset = 0;
5651        phy->addr_off = offset;
5652        phy->ubuf = buf;
5653
5654        bfa_phy_write_send(phy);
5655        return BFA_STATUS_OK;
5656}
5657
5658/*
5659 * Read phy image.
5660 *
5661 * @param[in] phy - phy structure
5662 * @param[in] instance - phy image instance
5663 * @param[in] buf - read data buffer
5664 * @param[in] len - data buffer length
5665 * @param[in] offset - offset relative to starting address
5666 * @param[in] cbfn - callback function
5667 * @param[in] cbarg - callback argument
5668 *
5669 * Return status.
5670 */
5671bfa_status_t
5672bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5673                void *buf, u32 len, u32 offset,
5674                bfa_cb_phy_t cbfn, void *cbarg)
5675{
5676        bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5677        bfa_trc(phy, instance);
5678        bfa_trc(phy, len);
5679        bfa_trc(phy, offset);
5680
5681        if (!bfa_phy_present(phy))
5682                return BFA_STATUS_PHY_NOT_PRESENT;
5683
5684        if (!bfa_ioc_is_operational(phy->ioc))
5685                return BFA_STATUS_IOC_NON_OP;
5686
5687        /* 'len' must be in word (4-byte) boundary */
5688        if (!len || (len & 0x03))
5689                return BFA_STATUS_FAILED;
5690
5691        if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5692                bfa_trc(phy, phy->op_busy);
5693                return BFA_STATUS_DEVBUSY;
5694        }
5695
5696        phy->op_busy = 1;
5697        phy->cbfn = cbfn;
5698        phy->cbarg = cbarg;
5699        phy->instance = instance;
5700        phy->residue = len;
5701        phy->offset = 0;
5702        phy->addr_off = offset;
5703        phy->ubuf = buf;
5704        bfa_phy_read_send(phy);
5705
5706        return BFA_STATUS_OK;
5707}
5708
5709/*
5710 * Process phy response messages upon receiving interrupts.
5711 *
5712 * @param[in] phyarg - phy structure
5713 * @param[in] msg - message structure
5714 */
5715void
5716bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5717{
5718        struct bfa_phy_s *phy = phyarg;
5719        u32     status;
5720
5721        union {
5722                struct bfi_phy_query_rsp_s *query;
5723                struct bfi_phy_stats_rsp_s *stats;
5724                struct bfi_phy_write_rsp_s *write;
5725                struct bfi_phy_read_rsp_s *read;
5726                struct bfi_mbmsg_s   *msg;
5727        } m;
5728
5729        m.msg = msg;
5730        bfa_trc(phy, msg->mh.msg_id);
5731
5732        if (!phy->op_busy) {
5733                /* receiving response after ioc failure */
5734                bfa_trc(phy, 0x9999);
5735                return;
5736        }
5737
5738        switch (msg->mh.msg_id) {
5739        case BFI_PHY_I2H_QUERY_RSP:
5740                status = be32_to_cpu(m.query->status);
5741                bfa_trc(phy, status);
5742
5743                if (status == BFA_STATUS_OK) {
5744                        struct bfa_phy_attr_s *attr =
5745                                (struct bfa_phy_attr_s *) phy->ubuf;
5746                        bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5747                                        sizeof(struct bfa_phy_attr_s));
5748                        bfa_trc(phy, attr->status);
5749                        bfa_trc(phy, attr->length);
5750                }
5751
5752                phy->status = status;
5753                phy->op_busy = 0;
5754                if (phy->cbfn)
5755                        phy->cbfn(phy->cbarg, phy->status);
5756                break;
5757        case BFI_PHY_I2H_STATS_RSP:
5758                status = be32_to_cpu(m.stats->status);
5759                bfa_trc(phy, status);
5760
5761                if (status == BFA_STATUS_OK) {
5762                        struct bfa_phy_stats_s *stats =
5763                                (struct bfa_phy_stats_s *) phy->ubuf;
5764                        bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5765                                sizeof(struct bfa_phy_stats_s));
5766                                bfa_trc(phy, stats->status);
5767                }
5768
5769                phy->status = status;
5770                phy->op_busy = 0;
5771                if (phy->cbfn)
5772                        phy->cbfn(phy->cbarg, phy->status);
5773                break;
5774        case BFI_PHY_I2H_WRITE_RSP:
5775                status = be32_to_cpu(m.write->status);
5776                bfa_trc(phy, status);
5777
5778                if (status != BFA_STATUS_OK || phy->residue == 0) {
5779                        phy->status = status;
5780                        phy->op_busy = 0;
5781                        if (phy->cbfn)
5782                                phy->cbfn(phy->cbarg, phy->status);
5783                } else {
5784                        bfa_trc(phy, phy->offset);
5785                        bfa_phy_write_send(phy);
5786                }
5787                break;
5788        case BFI_PHY_I2H_READ_RSP:
5789                status = be32_to_cpu(m.read->status);
5790                bfa_trc(phy, status);
5791
5792                if (status != BFA_STATUS_OK) {
5793                        phy->status = status;
5794                        phy->op_busy = 0;
5795                        if (phy->cbfn)
5796                                phy->cbfn(phy->cbarg, phy->status);
5797                } else {
5798                        u32 len = be32_to_cpu(m.read->length);
5799                        u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5800                        u16 *dbuf = (u16 *)phy->dbuf_kva;
5801                        int i, sz = len >> 1;
5802
5803                        bfa_trc(phy, phy->offset);
5804                        bfa_trc(phy, len);
5805
5806                        for (i = 0; i < sz; i++)
5807                                buf[i] = be16_to_cpu(dbuf[i]);
5808
5809                        phy->residue -= len;
5810                        phy->offset += len;
5811
5812                        if (phy->residue == 0) {
5813                                phy->status = status;
5814                                phy->op_busy = 0;
5815                                if (phy->cbfn)
5816                                        phy->cbfn(phy->cbarg, phy->status);
5817                        } else
5818                                bfa_phy_read_send(phy);
5819                }
5820                break;
5821        default:
5822                WARN_ON(1);
5823        }
5824}
5825
5826/*
5827 * DCONF state machine events
5828 */
5829enum bfa_dconf_event {
5830        BFA_DCONF_SM_INIT               = 1,    /* dconf Init */
5831        BFA_DCONF_SM_FLASH_COMP         = 2,    /* read/write to flash */
5832        BFA_DCONF_SM_WR                 = 3,    /* binding change, map */
5833        BFA_DCONF_SM_TIMEOUT            = 4,    /* Start timer */
5834        BFA_DCONF_SM_EXIT               = 5,    /* exit dconf module */
5835        BFA_DCONF_SM_IOCDISABLE         = 6,    /* IOC disable event */
5836};
5837
5838/* forward declaration of DCONF state machine */
5839static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5840                                enum bfa_dconf_event event);
5841static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5842                                enum bfa_dconf_event event);
5843static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5844                                enum bfa_dconf_event event);
5845static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5846                                enum bfa_dconf_event event);
5847static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5848                                enum bfa_dconf_event event);
5849static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5850                                enum bfa_dconf_event event);
5851static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5852                                enum bfa_dconf_event event);
5853
5854static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5855static void bfa_dconf_timer(void *cbarg);
5856static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5857static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5858
5859/*
5860 * Beginning state of dconf module. Waiting for an event to start.
5861 */
5862static void
5863bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5864{
5865        bfa_status_t bfa_status;
5866        bfa_trc(dconf->bfa, event);
5867
5868        switch (event) {
5869        case BFA_DCONF_SM_INIT:
5870                if (dconf->min_cfg) {
5871                        bfa_trc(dconf->bfa, dconf->min_cfg);
5872                        bfa_fsm_send_event(&dconf->bfa->iocfc,
5873                                        IOCFC_E_DCONF_DONE);
5874                        return;
5875                }
5876                bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5877                bfa_timer_start(dconf->bfa, &dconf->timer,
5878                        bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5879                bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5880                                        BFA_FLASH_PART_DRV, dconf->instance,
5881                                        dconf->dconf,
5882                                        sizeof(struct bfa_dconf_s), 0,
5883                                        bfa_dconf_init_cb, dconf->bfa);
5884                if (bfa_status != BFA_STATUS_OK) {
5885                        bfa_timer_stop(&dconf->timer);
5886                        bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5887                        bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5888                        return;
5889                }
5890                break;
5891        case BFA_DCONF_SM_EXIT:
5892                bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5893        case BFA_DCONF_SM_IOCDISABLE:
5894        case BFA_DCONF_SM_WR:
5895        case BFA_DCONF_SM_FLASH_COMP:
5896                break;
5897        default:
5898                bfa_sm_fault(dconf->bfa, event);
5899        }
5900}
5901
5902/*
5903 * Read flash for dconf entries and make a call back to the driver once done.
5904 */
5905static void
5906bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5907                        enum bfa_dconf_event event)
5908{
5909        bfa_trc(dconf->bfa, event);
5910
5911        switch (event) {
5912        case BFA_DCONF_SM_FLASH_COMP:
5913                bfa_timer_stop(&dconf->timer);
5914                bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5915                break;
5916        case BFA_DCONF_SM_TIMEOUT:
5917                bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5918                bfa_ioc_suspend(&dconf->bfa->ioc);
5919                break;
5920        case BFA_DCONF_SM_EXIT:
5921                bfa_timer_stop(&dconf->timer);
5922                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5923                bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5924                break;
5925        case BFA_DCONF_SM_IOCDISABLE:
5926                bfa_timer_stop(&dconf->timer);
5927                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5928                break;
5929        default:
5930                bfa_sm_fault(dconf->bfa, event);
5931        }
5932}
5933
5934/*
5935 * DCONF Module is in ready state. Has completed the initialization.
5936 */
5937static void
5938bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5939{
5940        bfa_trc(dconf->bfa, event);
5941
5942        switch (event) {
5943        case BFA_DCONF_SM_WR:
5944                bfa_timer_start(dconf->bfa, &dconf->timer,
5945                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5946                bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5947                break;
5948        case BFA_DCONF_SM_EXIT:
5949                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5950                bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5951                break;
5952        case BFA_DCONF_SM_INIT:
5953        case BFA_DCONF_SM_IOCDISABLE:
5954                break;
5955        default:
5956                bfa_sm_fault(dconf->bfa, event);
5957        }
5958}
5959
5960/*
5961 * entries are dirty, write back to the flash.
5962 */
5963
5964static void
5965bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5966{
5967        bfa_trc(dconf->bfa, event);
5968
5969        switch (event) {
5970        case BFA_DCONF_SM_TIMEOUT:
5971                bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5972                bfa_dconf_flash_write(dconf);
5973                break;
5974        case BFA_DCONF_SM_WR:
5975                bfa_timer_stop(&dconf->timer);
5976                bfa_timer_start(dconf->bfa, &dconf->timer,
5977                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5978                break;
5979        case BFA_DCONF_SM_EXIT:
5980                bfa_timer_stop(&dconf->timer);
5981                bfa_timer_start(dconf->bfa, &dconf->timer,
5982                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5983                bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5984                bfa_dconf_flash_write(dconf);
5985                break;
5986        case BFA_DCONF_SM_FLASH_COMP:
5987                break;
5988        case BFA_DCONF_SM_IOCDISABLE:
5989                bfa_timer_stop(&dconf->timer);
5990                bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5991                break;
5992        default:
5993                bfa_sm_fault(dconf->bfa, event);
5994        }
5995}
5996
5997/*
5998 * Sync the dconf entries to the flash.
5999 */
6000static void
6001bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
6002                        enum bfa_dconf_event event)
6003{
6004        bfa_trc(dconf->bfa, event);
6005
6006        switch (event) {
6007        case BFA_DCONF_SM_IOCDISABLE:
6008        case BFA_DCONF_SM_FLASH_COMP:
6009                bfa_timer_stop(&dconf->timer);
6010        case BFA_DCONF_SM_TIMEOUT:
6011                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6012                bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6013                break;
6014        default:
6015                bfa_sm_fault(dconf->bfa, event);
6016        }
6017}
6018
6019static void
6020bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
6021{
6022        bfa_trc(dconf->bfa, event);
6023
6024        switch (event) {
6025        case BFA_DCONF_SM_FLASH_COMP:
6026                bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
6027                break;
6028        case BFA_DCONF_SM_WR:
6029                bfa_timer_start(dconf->bfa, &dconf->timer,
6030                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6031                bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6032                break;
6033        case BFA_DCONF_SM_EXIT:
6034                bfa_timer_start(dconf->bfa, &dconf->timer,
6035                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6036                bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
6037                break;
6038        case BFA_DCONF_SM_IOCDISABLE:
6039                bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
6040                break;
6041        default:
6042                bfa_sm_fault(dconf->bfa, event);
6043        }
6044}
6045
6046static void
6047bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
6048                        enum bfa_dconf_event event)
6049{
6050        bfa_trc(dconf->bfa, event);
6051
6052        switch (event) {
6053        case BFA_DCONF_SM_INIT:
6054                bfa_timer_start(dconf->bfa, &dconf->timer,
6055                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6056                bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6057                break;
6058        case BFA_DCONF_SM_EXIT:
6059                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6060                bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6061                break;
6062        case BFA_DCONF_SM_IOCDISABLE:
6063                break;
6064        default:
6065                bfa_sm_fault(dconf->bfa, event);
6066        }
6067}
6068
6069/*
6070 * Compute and return memory needed by DRV_CFG module.
6071 */
6072void
6073bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
6074                  struct bfa_s *bfa)
6075{
6076        struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
6077
6078        if (cfg->drvcfg.min_cfg)
6079                bfa_mem_kva_setup(meminfo, dconf_kva,
6080                                sizeof(struct bfa_dconf_hdr_s));
6081        else
6082                bfa_mem_kva_setup(meminfo, dconf_kva,
6083                                sizeof(struct bfa_dconf_s));
6084}
6085
6086void
6087bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg)
6088{
6089        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6090
6091        dconf->bfad = bfad;
6092        dconf->bfa = bfa;
6093        dconf->instance = bfa->ioc.port_id;
6094        bfa_trc(bfa, dconf->instance);
6095
6096        dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
6097        if (cfg->drvcfg.min_cfg) {
6098                bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
6099                dconf->min_cfg = BFA_TRUE;
6100        } else {
6101                dconf->min_cfg = BFA_FALSE;
6102                bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
6103        }
6104
6105        bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
6106        bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6107}
6108
6109static void
6110bfa_dconf_init_cb(void *arg, bfa_status_t status)
6111{
6112        struct bfa_s *bfa = arg;
6113        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6114
6115        if (status == BFA_STATUS_OK) {
6116                bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
6117                if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
6118                        dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
6119                if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
6120                        dconf->dconf->hdr.version = BFI_DCONF_VERSION;
6121        }
6122        bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6123        bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
6124}
6125
6126void
6127bfa_dconf_modinit(struct bfa_s *bfa)
6128{
6129        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6130        bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
6131}
6132
6133static void bfa_dconf_timer(void *cbarg)
6134{
6135        struct bfa_dconf_mod_s *dconf = cbarg;
6136        bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
6137}
6138
6139void
6140bfa_dconf_iocdisable(struct bfa_s *bfa)
6141{
6142        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6143        bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
6144}
6145
6146static bfa_status_t
6147bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
6148{
6149        bfa_status_t bfa_status;
6150        bfa_trc(dconf->bfa, 0);
6151
6152        bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
6153                                BFA_FLASH_PART_DRV, dconf->instance,
6154                                dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
6155                                bfa_dconf_cbfn, dconf);
6156        if (bfa_status != BFA_STATUS_OK)
6157                WARN_ON(bfa_status);
6158        bfa_trc(dconf->bfa, bfa_status);
6159
6160        return bfa_status;
6161}
6162
6163bfa_status_t
6164bfa_dconf_update(struct bfa_s *bfa)
6165{
6166        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6167        bfa_trc(dconf->bfa, 0);
6168        if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
6169                return BFA_STATUS_FAILED;
6170
6171        if (dconf->min_cfg) {
6172                bfa_trc(dconf->bfa, dconf->min_cfg);
6173                return BFA_STATUS_FAILED;
6174        }
6175
6176        bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
6177        return BFA_STATUS_OK;
6178}
6179
6180static void
6181bfa_dconf_cbfn(void *arg, bfa_status_t status)
6182{
6183        struct bfa_dconf_mod_s *dconf = arg;
6184        WARN_ON(status);
6185        bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6186}
6187
6188void
6189bfa_dconf_modexit(struct bfa_s *bfa)
6190{
6191        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6192        bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
6193}
6194
6195/*
6196 * FRU specific functions
6197 */
6198
6199#define BFA_FRU_DMA_BUF_SZ      0x02000         /* 8k dma buffer */
6200#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
6201#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
6202
6203static void
6204bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
6205{
6206        struct bfa_fru_s *fru = cbarg;
6207
6208        bfa_trc(fru, event);
6209
6210        switch (event) {
6211        case BFA_IOC_E_DISABLED:
6212        case BFA_IOC_E_FAILED:
6213                if (fru->op_busy) {
6214                        fru->status = BFA_STATUS_IOC_FAILURE;
6215                        fru->cbfn(fru->cbarg, fru->status);
6216                        fru->op_busy = 0;
6217                }
6218                break;
6219
6220        default:
6221                break;
6222        }
6223}
6224
6225/*
6226 * Send fru write request.
6227 *
6228 * @param[in] cbarg - callback argument
6229 */
6230static void
6231bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6232{
6233        struct bfa_fru_s *fru = cbarg;
6234        struct bfi_fru_write_req_s *msg =
6235                        (struct bfi_fru_write_req_s *) fru->mb.msg;
6236        u32 len;
6237
6238        msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6239        len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6240                                fru->residue : BFA_FRU_DMA_BUF_SZ;
6241        msg->length = cpu_to_be32(len);
6242
6243        /*
6244         * indicate if it's the last msg of the whole write operation
6245         */
6246        msg->last = (len == fru->residue) ? 1 : 0;
6247
6248        msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
6249        bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6250        bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6251
6252        memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6253        bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6254
6255        fru->residue -= len;
6256        fru->offset += len;
6257}
6258
6259/*
6260 * Send fru read request.
6261 *
6262 * @param[in] cbarg - callback argument
6263 */
6264static void
6265bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6266{
6267        struct bfa_fru_s *fru = cbarg;
6268        struct bfi_fru_read_req_s *msg =
6269                        (struct bfi_fru_read_req_s *) fru->mb.msg;
6270        u32 len;
6271
6272        msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6273        len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6274                                fru->residue : BFA_FRU_DMA_BUF_SZ;
6275        msg->length = cpu_to_be32(len);
6276        bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6277        bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6278        bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6279}
6280
6281/*
6282 * Flash memory info API.
6283 *
6284 * @param[in] mincfg - minimal cfg variable
6285 */
6286u32
6287bfa_fru_meminfo(bfa_boolean_t mincfg)
6288{
6289        /* min driver doesn't need fru */
6290        if (mincfg)
6291                return 0;
6292
6293        return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6294}
6295
6296/*
6297 * Flash attach API.
6298 *
6299 * @param[in] fru - fru structure
6300 * @param[in] ioc  - ioc structure
6301 * @param[in] dev  - device structure
6302 * @param[in] trcmod - trace module
6303 * @param[in] logmod - log module
6304 */
6305void
6306bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6307        struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6308{
6309        fru->ioc = ioc;
6310        fru->trcmod = trcmod;
6311        fru->cbfn = NULL;
6312        fru->cbarg = NULL;
6313        fru->op_busy = 0;
6314
6315        bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6316        bfa_q_qe_init(&fru->ioc_notify);
6317        bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6318        list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6319
6320        /* min driver doesn't need fru */
6321        if (mincfg) {
6322                fru->dbuf_kva = NULL;
6323                fru->dbuf_pa = 0;
6324        }
6325}
6326
6327/*
6328 * Claim memory for fru
6329 *
6330 * @param[in] fru - fru structure
6331 * @param[in] dm_kva - pointer to virtual memory address
6332 * @param[in] dm_pa - frusical memory address
6333 * @param[in] mincfg - minimal cfg variable
6334 */
6335void
6336bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6337        bfa_boolean_t mincfg)
6338{
6339        if (mincfg)
6340                return;
6341
6342        fru->dbuf_kva = dm_kva;
6343        fru->dbuf_pa = dm_pa;
6344        memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6345        dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6346        dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6347}
6348
6349/*
6350 * Update fru vpd image.
6351 *
6352 * @param[in] fru - fru structure
6353 * @param[in] buf - update data buffer
6354 * @param[in] len - data buffer length
6355 * @param[in] offset - offset relative to starting address
6356 * @param[in] cbfn - callback function
6357 * @param[in] cbarg - callback argument
6358 *
6359 * Return status.
6360 */
6361bfa_status_t
6362bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6363                  bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
6364{
6365        bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6366        bfa_trc(fru, len);
6367        bfa_trc(fru, offset);
6368
6369        if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
6370                fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6371                return BFA_STATUS_FRU_NOT_PRESENT;
6372
6373        if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6374                return BFA_STATUS_CMD_NOTSUPP;
6375
6376        if (!bfa_ioc_is_operational(fru->ioc))
6377                return BFA_STATUS_IOC_NON_OP;
6378
6379        if (fru->op_busy) {
6380                bfa_trc(fru, fru->op_busy);
6381                return BFA_STATUS_DEVBUSY;
6382        }
6383
6384        fru->op_busy = 1;
6385
6386        fru->cbfn = cbfn;
6387        fru->cbarg = cbarg;
6388        fru->residue = len;
6389        fru->offset = 0;
6390        fru->addr_off = offset;
6391        fru->ubuf = buf;
6392        fru->trfr_cmpl = trfr_cmpl;
6393
6394        bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6395
6396        return BFA_STATUS_OK;
6397}
6398
6399/*
6400 * Read fru vpd image.
6401 *
6402 * @param[in] fru - fru structure
6403 * @param[in] buf - read data buffer
6404 * @param[in] len - data buffer length
6405 * @param[in] offset - offset relative to starting address
6406 * @param[in] cbfn - callback function
6407 * @param[in] cbarg - callback argument
6408 *
6409 * Return status.
6410 */
6411bfa_status_t
6412bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6413                bfa_cb_fru_t cbfn, void *cbarg)
6414{
6415        bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6416        bfa_trc(fru, len);
6417        bfa_trc(fru, offset);
6418
6419        if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6420                return BFA_STATUS_FRU_NOT_PRESENT;
6421
6422        if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
6423                fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6424                return BFA_STATUS_CMD_NOTSUPP;
6425
6426        if (!bfa_ioc_is_operational(fru->ioc))
6427                return BFA_STATUS_IOC_NON_OP;
6428
6429        if (fru->op_busy) {
6430                bfa_trc(fru, fru->op_busy);
6431                return BFA_STATUS_DEVBUSY;
6432        }
6433
6434        fru->op_busy = 1;
6435
6436        fru->cbfn = cbfn;
6437        fru->cbarg = cbarg;
6438        fru->residue = len;
6439        fru->offset = 0;
6440        fru->addr_off = offset;
6441        fru->ubuf = buf;
6442        bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6443
6444        return BFA_STATUS_OK;
6445}
6446
6447/*
6448 * Get maximum size fru vpd image.
6449 *
6450 * @param[in] fru - fru structure
6451 * @param[out] size - maximum size of fru vpd data
6452 *
6453 * Return status.
6454 */
6455bfa_status_t
6456bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6457{
6458        if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6459                return BFA_STATUS_FRU_NOT_PRESENT;
6460
6461        if (!bfa_ioc_is_operational(fru->ioc))
6462                return BFA_STATUS_IOC_NON_OP;
6463
6464        if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
6465                fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
6466                *max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6467        else
6468                return BFA_STATUS_CMD_NOTSUPP;
6469        return BFA_STATUS_OK;
6470}
6471/*
6472 * tfru write.
6473 *
6474 * @param[in] fru - fru structure
6475 * @param[in] buf - update data buffer
6476 * @param[in] len - data buffer length
6477 * @param[in] offset - offset relative to starting address
6478 * @param[in] cbfn - callback function
6479 * @param[in] cbarg - callback argument
6480 *
6481 * Return status.
6482 */
6483bfa_status_t
6484bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6485               bfa_cb_fru_t cbfn, void *cbarg)
6486{
6487        bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6488        bfa_trc(fru, len);
6489        bfa_trc(fru, offset);
6490        bfa_trc(fru, *((u8 *) buf));
6491
6492        if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6493                return BFA_STATUS_FRU_NOT_PRESENT;
6494
6495        if (!bfa_ioc_is_operational(fru->ioc))
6496                return BFA_STATUS_IOC_NON_OP;
6497
6498        if (fru->op_busy) {
6499                bfa_trc(fru, fru->op_busy);
6500                return BFA_STATUS_DEVBUSY;
6501        }
6502
6503        fru->op_busy = 1;
6504
6505        fru->cbfn = cbfn;
6506        fru->cbarg = cbarg;
6507        fru->residue = len;
6508        fru->offset = 0;
6509        fru->addr_off = offset;
6510        fru->ubuf = buf;
6511
6512        bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6513
6514        return BFA_STATUS_OK;
6515}
6516
6517/*
6518 * tfru read.
6519 *
6520 * @param[in] fru - fru structure
6521 * @param[in] buf - read data buffer
6522 * @param[in] len - data buffer length
6523 * @param[in] offset - offset relative to starting address
6524 * @param[in] cbfn - callback function
6525 * @param[in] cbarg - callback argument
6526 *
6527 * Return status.
6528 */
6529bfa_status_t
6530bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6531              bfa_cb_fru_t cbfn, void *cbarg)
6532{
6533        bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6534        bfa_trc(fru, len);
6535        bfa_trc(fru, offset);
6536
6537        if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6538                return BFA_STATUS_FRU_NOT_PRESENT;
6539
6540        if (!bfa_ioc_is_operational(fru->ioc))
6541                return BFA_STATUS_IOC_NON_OP;
6542
6543        if (fru->op_busy) {
6544                bfa_trc(fru, fru->op_busy);
6545                return BFA_STATUS_DEVBUSY;
6546        }
6547
6548        fru->op_busy = 1;
6549
6550        fru->cbfn = cbfn;
6551        fru->cbarg = cbarg;
6552        fru->residue = len;
6553        fru->offset = 0;
6554        fru->addr_off = offset;
6555        fru->ubuf = buf;
6556        bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6557
6558        return BFA_STATUS_OK;
6559}
6560
6561/*
6562 * Process fru response messages upon receiving interrupts.
6563 *
6564 * @param[in] fruarg - fru structure
6565 * @param[in] msg - message structure
6566 */
6567void
6568bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6569{
6570        struct bfa_fru_s *fru = fruarg;
6571        struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6572        u32 status;
6573
6574        bfa_trc(fru, msg->mh.msg_id);
6575
6576        if (!fru->op_busy) {
6577                /*
6578                 * receiving response after ioc failure
6579                 */
6580                bfa_trc(fru, 0x9999);
6581                return;
6582        }
6583
6584        switch (msg->mh.msg_id) {
6585        case BFI_FRUVPD_I2H_WRITE_RSP:
6586        case BFI_TFRU_I2H_WRITE_RSP:
6587                status = be32_to_cpu(rsp->status);
6588                bfa_trc(fru, status);
6589
6590                if (status != BFA_STATUS_OK || fru->residue == 0) {
6591                        fru->status = status;
6592                        fru->op_busy = 0;
6593                        if (fru->cbfn)
6594                                fru->cbfn(fru->cbarg, fru->status);
6595                } else {
6596                        bfa_trc(fru, fru->offset);
6597                        if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6598                                bfa_fru_write_send(fru,
6599                                        BFI_FRUVPD_H2I_WRITE_REQ);
6600                        else
6601                                bfa_fru_write_send(fru,
6602                                        BFI_TFRU_H2I_WRITE_REQ);
6603                }
6604                break;
6605        case BFI_FRUVPD_I2H_READ_RSP:
6606        case BFI_TFRU_I2H_READ_RSP:
6607                status = be32_to_cpu(rsp->status);
6608                bfa_trc(fru, status);
6609
6610                if (status != BFA_STATUS_OK) {
6611                        fru->status = status;
6612                        fru->op_busy = 0;
6613                        if (fru->cbfn)
6614                                fru->cbfn(fru->cbarg, fru->status);
6615                } else {
6616                        u32 len = be32_to_cpu(rsp->length);
6617
6618                        bfa_trc(fru, fru->offset);
6619                        bfa_trc(fru, len);
6620
6621                        memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6622                        fru->residue -= len;
6623                        fru->offset += len;
6624
6625                        if (fru->residue == 0) {
6626                                fru->status = status;
6627                                fru->op_busy = 0;
6628                                if (fru->cbfn)
6629                                        fru->cbfn(fru->cbarg, fru->status);
6630                        } else {
6631                                if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6632                                        bfa_fru_read_send(fru,
6633                                                BFI_FRUVPD_H2I_READ_REQ);
6634                                else
6635                                        bfa_fru_read_send(fru,
6636                                                BFI_TFRU_H2I_READ_REQ);
6637                        }
6638                }
6639                break;
6640        default:
6641                WARN_ON(1);
6642        }
6643}
6644
6645/*
6646 * register definitions
6647 */
6648#define FLI_CMD_REG                     0x0001d000
6649#define FLI_RDDATA_REG                  0x0001d010
6650#define FLI_ADDR_REG                    0x0001d004
6651#define FLI_DEV_STATUS_REG              0x0001d014
6652
6653#define BFA_FLASH_FIFO_SIZE             128     /* fifo size */
6654#define BFA_FLASH_CHECK_MAX             10000   /* max # of status check */
6655#define BFA_FLASH_BLOCKING_OP_MAX       1000000 /* max # of blocking op check */
6656#define BFA_FLASH_WIP_MASK              0x01    /* write in progress bit mask */
6657
6658enum bfa_flash_cmd {
6659        BFA_FLASH_FAST_READ     = 0x0b, /* fast read */
6660        BFA_FLASH_READ_STATUS   = 0x05, /* read status */
6661};
6662
6663/**
6664 * @brief hardware error definition
6665 */
6666enum bfa_flash_err {
6667        BFA_FLASH_NOT_PRESENT   = -1,   /*!< flash not present */
6668        BFA_FLASH_UNINIT        = -2,   /*!< flash not initialized */
6669        BFA_FLASH_BAD           = -3,   /*!< flash bad */
6670        BFA_FLASH_BUSY          = -4,   /*!< flash busy */
6671        BFA_FLASH_ERR_CMD_ACT   = -5,   /*!< command active never cleared */
6672        BFA_FLASH_ERR_FIFO_CNT  = -6,   /*!< fifo count never cleared */
6673        BFA_FLASH_ERR_WIP       = -7,   /*!< write-in-progress never cleared */
6674        BFA_FLASH_ERR_TIMEOUT   = -8,   /*!< fli timeout */
6675        BFA_FLASH_ERR_LEN       = -9,   /*!< invalid length */
6676};
6677
6678/**
6679 * @brief flash command register data structure
6680 */
6681union bfa_flash_cmd_reg_u {
6682        struct {
6683#ifdef __BIG_ENDIAN
6684                u32     act:1;
6685                u32     rsv:1;
6686                u32     write_cnt:9;
6687                u32     read_cnt:9;
6688                u32     addr_cnt:4;
6689                u32     cmd:8;
6690#else
6691                u32     cmd:8;
6692                u32     addr_cnt:4;
6693                u32     read_cnt:9;
6694                u32     write_cnt:9;
6695                u32     rsv:1;
6696                u32     act:1;
6697#endif
6698        } r;
6699        u32     i;
6700};
6701
6702/**
6703 * @brief flash device status register data structure
6704 */
6705union bfa_flash_dev_status_reg_u {
6706        struct {
6707#ifdef __BIG_ENDIAN
6708                u32     rsv:21;
6709                u32     fifo_cnt:6;
6710                u32     busy:1;
6711                u32     init_status:1;
6712                u32     present:1;
6713                u32     bad:1;
6714                u32     good:1;
6715#else
6716                u32     good:1;
6717                u32     bad:1;
6718                u32     present:1;
6719                u32     init_status:1;
6720                u32     busy:1;
6721                u32     fifo_cnt:6;
6722                u32     rsv:21;
6723#endif
6724        } r;
6725        u32     i;
6726};
6727
6728/**
6729 * @brief flash address register data structure
6730 */
6731union bfa_flash_addr_reg_u {
6732        struct {
6733#ifdef __BIG_ENDIAN
6734                u32     addr:24;
6735                u32     dummy:8;
6736#else
6737                u32     dummy:8;
6738                u32     addr:24;
6739#endif
6740        } r;
6741        u32     i;
6742};
6743
6744/**
6745 * dg flash_raw_private Flash raw private functions
6746 */
6747static void
6748bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
6749                  u8 rd_cnt, u8 ad_cnt, u8 op)
6750{
6751        union bfa_flash_cmd_reg_u cmd;
6752
6753        cmd.i = 0;
6754        cmd.r.act = 1;
6755        cmd.r.write_cnt = wr_cnt;
6756        cmd.r.read_cnt = rd_cnt;
6757        cmd.r.addr_cnt = ad_cnt;
6758        cmd.r.cmd = op;
6759        writel(cmd.i, (pci_bar + FLI_CMD_REG));
6760}
6761
6762static void
6763bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
6764{
6765        union bfa_flash_addr_reg_u addr;
6766
6767        addr.r.addr = address & 0x00ffffff;
6768        addr.r.dummy = 0;
6769        writel(addr.i, (pci_bar + FLI_ADDR_REG));
6770}
6771
6772static int
6773bfa_flash_cmd_act_check(void __iomem *pci_bar)
6774{
6775        union bfa_flash_cmd_reg_u cmd;
6776
6777        cmd.i = readl(pci_bar + FLI_CMD_REG);
6778
6779        if (cmd.r.act)
6780                return BFA_FLASH_ERR_CMD_ACT;
6781
6782        return 0;
6783}
6784
6785/**
6786 * @brief
6787 * Flush FLI data fifo.
6788 *
6789 * @param[in] pci_bar - pci bar address
6790 * @param[in] dev_status - device status
6791 *
6792 * Return 0 on success, negative error number on error.
6793 */
6794static u32
6795bfa_flash_fifo_flush(void __iomem *pci_bar)
6796{
6797        u32 i;
6798        u32 t;
6799        union bfa_flash_dev_status_reg_u dev_status;
6800
6801        dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6802
6803        if (!dev_status.r.fifo_cnt)
6804                return 0;
6805
6806        /* fifo counter in terms of words */
6807        for (i = 0; i < dev_status.r.fifo_cnt; i++)
6808                t = readl(pci_bar + FLI_RDDATA_REG);
6809
6810        /*
6811         * Check the device status. It may take some time.
6812         */
6813        for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6814                dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6815                if (!dev_status.r.fifo_cnt)
6816                        break;
6817        }
6818
6819        if (dev_status.r.fifo_cnt)
6820                return BFA_FLASH_ERR_FIFO_CNT;
6821
6822        return 0;
6823}
6824
6825/**
6826 * @brief
6827 * Read flash status.
6828 *
6829 * @param[in] pci_bar - pci bar address
6830 *
6831 * Return 0 on success, negative error number on error.
6832*/
6833static u32
6834bfa_flash_status_read(void __iomem *pci_bar)
6835{
6836        union bfa_flash_dev_status_reg_u        dev_status;
6837        int                             status;
6838        u32                     ret_status;
6839        int                             i;
6840
6841        status = bfa_flash_fifo_flush(pci_bar);
6842        if (status < 0)
6843                return status;
6844
6845        bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
6846
6847        for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6848                status = bfa_flash_cmd_act_check(pci_bar);
6849                if (!status)
6850                        break;
6851        }
6852
6853        if (status)
6854                return status;
6855
6856        dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6857        if (!dev_status.r.fifo_cnt)
6858                return BFA_FLASH_BUSY;
6859
6860        ret_status = readl(pci_bar + FLI_RDDATA_REG);
6861        ret_status >>= 24;
6862
6863        status = bfa_flash_fifo_flush(pci_bar);
6864        if (status < 0)
6865                return status;
6866
6867        return ret_status;
6868}
6869
6870/**
6871 * @brief
6872 * Start flash read operation.
6873 *
6874 * @param[in] pci_bar - pci bar address
6875 * @param[in] offset - flash address offset
6876 * @param[in] len - read data length
6877 * @param[in] buf - read data buffer
6878 *
6879 * Return 0 on success, negative error number on error.
6880 */
6881static u32
6882bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
6883                         char *buf)
6884{
6885        int status;
6886
6887        /*
6888         * len must be mutiple of 4 and not exceeding fifo size
6889         */
6890        if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
6891                return BFA_FLASH_ERR_LEN;
6892
6893        /*
6894         * check status
6895         */
6896        status = bfa_flash_status_read(pci_bar);
6897        if (status == BFA_FLASH_BUSY)
6898                status = bfa_flash_status_read(pci_bar);
6899
6900        if (status < 0)
6901                return status;
6902
6903        /*
6904         * check if write-in-progress bit is cleared
6905         */
6906        if (status & BFA_FLASH_WIP_MASK)
6907                return BFA_FLASH_ERR_WIP;
6908
6909        bfa_flash_set_addr(pci_bar, offset);
6910
6911        bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
6912
6913        return 0;
6914}
6915
6916/**
6917 * @brief
6918 * Check flash read operation.
6919 *
6920 * @param[in] pci_bar - pci bar address
6921 *
6922 * Return flash device status, 1 if busy, 0 if not.
6923 */
6924static u32
6925bfa_flash_read_check(void __iomem *pci_bar)
6926{
6927        if (bfa_flash_cmd_act_check(pci_bar))
6928                return 1;
6929
6930        return 0;
6931}
6932/**
6933 * @brief
6934 * End flash read operation.
6935 *
6936 * @param[in] pci_bar - pci bar address
6937 * @param[in] len - read data length
6938 * @param[in] buf - read data buffer
6939 *
6940 */
6941static void
6942bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
6943{
6944
6945        u32 i;
6946
6947        /*
6948         * read data fifo up to 32 words
6949         */
6950        for (i = 0; i < len; i += 4) {
6951                u32 w = readl(pci_bar + FLI_RDDATA_REG);
6952                *((u32 *) (buf + i)) = swab32(w);
6953        }
6954
6955        bfa_flash_fifo_flush(pci_bar);
6956}
6957
6958/**
6959 * @brief
6960 * Perform flash raw read.
6961 *
6962 * @param[in] pci_bar - pci bar address
6963 * @param[in] offset - flash partition address offset
6964 * @param[in] buf - read data buffer
6965 * @param[in] len - read data length
6966 *
6967 * Return status.
6968 */
6969
6970
6971#define FLASH_BLOCKING_OP_MAX   500
6972#define FLASH_SEM_LOCK_REG      0x18820
6973
6974static int
6975bfa_raw_sem_get(void __iomem *bar)
6976{
6977        int     locked;
6978
6979        locked = readl((bar + FLASH_SEM_LOCK_REG));
6980        return !locked;
6981
6982}
6983
6984bfa_status_t
6985bfa_flash_sem_get(void __iomem *bar)
6986{
6987        u32 n = FLASH_BLOCKING_OP_MAX;
6988
6989        while (!bfa_raw_sem_get(bar)) {
6990                if (--n <= 0)
6991                        return BFA_STATUS_BADFLASH;
6992                mdelay(10);
6993        }
6994        return BFA_STATUS_OK;
6995}
6996
6997void
6998bfa_flash_sem_put(void __iomem *bar)
6999{
7000        writel(0, (bar + FLASH_SEM_LOCK_REG));
7001}
7002
7003bfa_status_t
7004bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
7005                       u32 len)
7006{
7007        u32 n;
7008        int status;
7009        u32 off, l, s, residue, fifo_sz;
7010
7011        residue = len;
7012        off = 0;
7013        fifo_sz = BFA_FLASH_FIFO_SIZE;
7014        status = bfa_flash_sem_get(pci_bar);
7015        if (status != BFA_STATUS_OK)
7016                return status;
7017
7018        while (residue) {
7019                s = offset + off;
7020                n = s / fifo_sz;
7021                l = (n + 1) * fifo_sz - s;
7022                if (l > residue)
7023                        l = residue;
7024
7025                status = bfa_flash_read_start(pci_bar, offset + off, l,
7026                                                                &buf[off]);
7027                if (status < 0) {
7028                        bfa_flash_sem_put(pci_bar);
7029                        return BFA_STATUS_FAILED;
7030                }
7031
7032                n = BFA_FLASH_BLOCKING_OP_MAX;
7033                while (bfa_flash_read_check(pci_bar)) {
7034                        if (--n <= 0) {
7035                                bfa_flash_sem_put(pci_bar);
7036                                return BFA_STATUS_FAILED;
7037                        }
7038                }
7039
7040                bfa_flash_read_end(pci_bar, l, &buf[off]);
7041
7042                residue -= l;
7043                off += l;
7044        }
7045        bfa_flash_sem_put(pci_bar);
7046
7047        return BFA_STATUS_OK;
7048}
7049