linux/drivers/scsi/bfa/bfa_ioc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
   3 * All rights reserved
   4 * www.brocade.com
   5 *
   6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of the GNU General Public License (GPL) Version 2 as
  10 * published by the Free Software Foundation
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 */
  17
  18#include "bfad_drv.h"
  19#include "bfad_im.h"
  20#include "bfa_ioc.h"
  21#include "bfi_reg.h"
  22#include "bfa_defs.h"
  23#include "bfa_defs_svc.h"
  24
  25BFA_TRC_FILE(CNA, IOC);
  26
  27/*
  28 * IOC local definitions
  29 */
  30#define BFA_IOC_TOV             3000    /* msecs */
  31#define BFA_IOC_HWSEM_TOV       500     /* msecs */
  32#define BFA_IOC_HB_TOV          500     /* msecs */
  33#define BFA_IOC_TOV_RECOVER      BFA_IOC_HB_TOV
  34#define BFA_IOC_POLL_TOV        BFA_TIMER_FREQ
  35
  36#define bfa_ioc_timer_start(__ioc)                                      \
  37        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
  38                        bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  39#define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
  40
  41#define bfa_hb_timer_start(__ioc)                                       \
  42        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,         \
  43                        bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  44#define bfa_hb_timer_stop(__ioc)        bfa_timer_stop(&(__ioc)->hb_timer)
  45
  46#define BFA_DBG_FWTRC_OFF(_fn)  (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  47
  48/*
  49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  50 */
  51
  52#define bfa_ioc_firmware_lock(__ioc)                    \
  53                        ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  54#define bfa_ioc_firmware_unlock(__ioc)                  \
  55                        ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  56#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  57#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  58#define bfa_ioc_notify_fail(__ioc)              \
  59                        ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
  60#define bfa_ioc_sync_start(__ioc)               \
  61                        ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  62#define bfa_ioc_sync_join(__ioc)                \
  63                        ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  64#define bfa_ioc_sync_leave(__ioc)               \
  65                        ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
  66#define bfa_ioc_sync_ack(__ioc)                 \
  67                        ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
  68#define bfa_ioc_sync_complete(__ioc)            \
  69                        ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
  70#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)           \
  71                        ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
  72#define bfa_ioc_get_cur_ioc_fwstate(__ioc)              \
  73                        ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
  74#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)           \
  75                ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
  76#define bfa_ioc_get_alt_ioc_fwstate(__ioc)              \
  77                        ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
  78
  79#define bfa_ioc_mbox_cmd_pending(__ioc)         \
  80                        (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  81                        readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  82
  83bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  84
  85/*
  86 * forward declarations
  87 */
  88static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  89static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  90static void bfa_ioc_timeout(void *ioc);
  91static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
  92static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  93static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  94static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  95static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  96static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  97static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
  98static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
  99static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
 100                                enum bfa_ioc_event_e event);
 101static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
 102static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
 103static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
 104static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 105
 106/*
 107 * IOC state machine definitions/declarations
 108 */
 109enum ioc_event {
 110        IOC_E_RESET             = 1,    /*  IOC reset request           */
 111        IOC_E_ENABLE            = 2,    /*  IOC enable request          */
 112        IOC_E_DISABLE           = 3,    /*  IOC disable request */
 113        IOC_E_DETACH            = 4,    /*  driver detach cleanup       */
 114        IOC_E_ENABLED           = 5,    /*  f/w enabled         */
 115        IOC_E_FWRSP_GETATTR     = 6,    /*  IOC get attribute response  */
 116        IOC_E_DISABLED          = 7,    /*  f/w disabled                */
 117        IOC_E_PFFAILED          = 8,    /*  failure notice by iocpf sm  */
 118        IOC_E_HBFAIL            = 9,    /*  heartbeat failure           */
 119        IOC_E_HWERROR           = 10,   /*  hardware error interrupt    */
 120        IOC_E_TIMEOUT           = 11,   /*  timeout                     */
 121        IOC_E_HWFAILED          = 12,   /*  PCI mapping failure notice  */
 122};
 123
 124bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
 125bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
 126bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
 127bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
 128bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
 129bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
 130bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
 131bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 132bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
 133bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
 134
 135static struct bfa_sm_table_s ioc_sm_table[] = {
 136        {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
 137        {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
 138        {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
 139        {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
 140        {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
 141        {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
 142        {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
 143        {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
 144        {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 145        {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
 146};
 147
 148/*
 149 * IOCPF state machine definitions/declarations
 150 */
 151
 152#define bfa_iocpf_timer_start(__ioc)                                    \
 153        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
 154                        bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
 155#define bfa_iocpf_timer_stop(__ioc)     bfa_timer_stop(&(__ioc)->ioc_timer)
 156
 157#define bfa_iocpf_poll_timer_start(__ioc)                               \
 158        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
 159                        bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
 160
 161#define bfa_sem_timer_start(__ioc)                                      \
 162        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,        \
 163                        bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
 164#define bfa_sem_timer_stop(__ioc)       bfa_timer_stop(&(__ioc)->sem_timer)
 165
 166/*
 167 * Forward declareations for iocpf state machine
 168 */
 169static void bfa_iocpf_timeout(void *ioc_arg);
 170static void bfa_iocpf_sem_timeout(void *ioc_arg);
 171static void bfa_iocpf_poll_timeout(void *ioc_arg);
 172
 173/*
 174 * IOCPF state machine events
 175 */
 176enum iocpf_event {
 177        IOCPF_E_ENABLE          = 1,    /*  IOCPF enable request        */
 178        IOCPF_E_DISABLE         = 2,    /*  IOCPF disable request       */
 179        IOCPF_E_STOP            = 3,    /*  stop on driver detach       */
 180        IOCPF_E_FWREADY         = 4,    /*  f/w initialization done     */
 181        IOCPF_E_FWRSP_ENABLE    = 5,    /*  enable f/w response */
 182        IOCPF_E_FWRSP_DISABLE   = 6,    /*  disable f/w response        */
 183        IOCPF_E_FAIL            = 7,    /*  failure notice by ioc sm    */
 184        IOCPF_E_INITFAIL        = 8,    /*  init fail notice by ioc sm  */
 185        IOCPF_E_GETATTRFAIL     = 9,    /*  init fail notice by ioc sm  */
 186        IOCPF_E_SEMLOCKED       = 10,   /*  h/w semaphore is locked     */
 187        IOCPF_E_TIMEOUT         = 11,   /*  f/w response timeout        */
 188        IOCPF_E_SEM_ERROR       = 12,   /*  h/w sem mapping error       */
 189};
 190
 191/*
 192 * IOCPF states
 193 */
 194enum bfa_iocpf_state {
 195        BFA_IOCPF_RESET         = 1,    /*  IOC is in reset state */
 196        BFA_IOCPF_SEMWAIT       = 2,    /*  Waiting for IOC h/w semaphore */
 197        BFA_IOCPF_HWINIT        = 3,    /*  IOC h/w is being initialized */
 198        BFA_IOCPF_READY         = 4,    /*  IOCPF is initialized */
 199        BFA_IOCPF_INITFAIL      = 5,    /*  IOCPF failed */
 200        BFA_IOCPF_FAIL          = 6,    /*  IOCPF failed */
 201        BFA_IOCPF_DISABLING     = 7,    /*  IOCPF is being disabled */
 202        BFA_IOCPF_DISABLED      = 8,    /*  IOCPF is disabled */
 203        BFA_IOCPF_FWMISMATCH    = 9,    /*  IOC f/w different from drivers */
 204};
 205
 206bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
 207bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
 208bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
 209bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
 210bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
 211bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
 212bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
 213bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
 214                                                enum iocpf_event);
 215bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
 216bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
 217bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
 218bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
 219bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
 220                                                enum iocpf_event);
 221bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
 222
 223static struct bfa_sm_table_s iocpf_sm_table[] = {
 224        {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
 225        {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
 226        {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
 227        {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
 228        {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
 229        {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
 230        {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
 231        {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
 232        {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
 233        {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
 234        {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
 235        {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
 236        {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
 237        {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
 238};
 239
 240/*
 241 * IOC State Machine
 242 */
 243
 244/*
 245 * Beginning state. IOC uninit state.
 246 */
 247
 248static void
 249bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
 250{
 251}
 252
 253/*
 254 * IOC is in uninit state.
 255 */
 256static void
 257bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
 258{
 259        bfa_trc(ioc, event);
 260
 261        switch (event) {
 262        case IOC_E_RESET:
 263                bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
 264                break;
 265
 266        default:
 267                bfa_sm_fault(ioc, event);
 268        }
 269}
 270/*
 271 * Reset entry actions -- initialize state machine
 272 */
 273static void
 274bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
 275{
 276        bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
 277}
 278
 279/*
 280 * IOC is in reset state.
 281 */
 282static void
 283bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
 284{
 285        bfa_trc(ioc, event);
 286
 287        switch (event) {
 288        case IOC_E_ENABLE:
 289                bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 290                break;
 291
 292        case IOC_E_DISABLE:
 293                bfa_ioc_disable_comp(ioc);
 294                break;
 295
 296        case IOC_E_DETACH:
 297                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 298                break;
 299
 300        default:
 301                bfa_sm_fault(ioc, event);
 302        }
 303}
 304
 305
 306static void
 307bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
 308{
 309        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
 310}
 311
 312/*
 313 * Host IOC function is being enabled, awaiting response from firmware.
 314 * Semaphore is acquired.
 315 */
 316static void
 317bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 318{
 319        bfa_trc(ioc, event);
 320
 321        switch (event) {
 322        case IOC_E_ENABLED:
 323                bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 324                break;
 325
 326        case IOC_E_PFFAILED:
 327                /* !!! fall through !!! */
 328        case IOC_E_HWERROR:
 329                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 330                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 331                if (event != IOC_E_PFFAILED)
 332                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
 333                break;
 334
 335        case IOC_E_HWFAILED:
 336                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 337                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 338                break;
 339
 340        case IOC_E_DISABLE:
 341                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 342                break;
 343
 344        case IOC_E_DETACH:
 345                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 346                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 347                break;
 348
 349        case IOC_E_ENABLE:
 350                break;
 351
 352        default:
 353                bfa_sm_fault(ioc, event);
 354        }
 355}
 356
 357
 358static void
 359bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
 360{
 361        bfa_ioc_timer_start(ioc);
 362        bfa_ioc_send_getattr(ioc);
 363}
 364
 365/*
 366 * IOC configuration in progress. Timer is active.
 367 */
 368static void
 369bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
 370{
 371        bfa_trc(ioc, event);
 372
 373        switch (event) {
 374        case IOC_E_FWRSP_GETATTR:
 375                bfa_ioc_timer_stop(ioc);
 376                bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 377                break;
 378
 379        case IOC_E_PFFAILED:
 380        case IOC_E_HWERROR:
 381                bfa_ioc_timer_stop(ioc);
 382                /* !!! fall through !!! */
 383        case IOC_E_TIMEOUT:
 384                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 385                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 386                if (event != IOC_E_PFFAILED)
 387                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
 388                break;
 389
 390        case IOC_E_DISABLE:
 391                bfa_ioc_timer_stop(ioc);
 392                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 393                break;
 394
 395        case IOC_E_ENABLE:
 396                break;
 397
 398        default:
 399                bfa_sm_fault(ioc, event);
 400        }
 401}
 402
 403static void
 404bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
 405{
 406        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 407
 408        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 409        bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
 410        bfa_ioc_hb_monitor(ioc);
 411        BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
 412        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
 413}
 414
 415static void
 416bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
 417{
 418        bfa_trc(ioc, event);
 419
 420        switch (event) {
 421        case IOC_E_ENABLE:
 422                break;
 423
 424        case IOC_E_DISABLE:
 425                bfa_hb_timer_stop(ioc);
 426                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 427                break;
 428
 429        case IOC_E_PFFAILED:
 430        case IOC_E_HWERROR:
 431                bfa_hb_timer_stop(ioc);
 432                /* !!! fall through !!! */
 433        case IOC_E_HBFAIL:
 434                if (ioc->iocpf.auto_recover)
 435                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
 436                else
 437                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 438
 439                bfa_ioc_fail_notify(ioc);
 440
 441                if (event != IOC_E_PFFAILED)
 442                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 443                break;
 444
 445        default:
 446                bfa_sm_fault(ioc, event);
 447        }
 448}
 449
 450
 451static void
 452bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
 453{
 454        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 455        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
 456        BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
 457        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
 458}
 459
 460/*
 461 * IOC is being disabled
 462 */
 463static void
 464bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 465{
 466        bfa_trc(ioc, event);
 467
 468        switch (event) {
 469        case IOC_E_DISABLED:
 470                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 471                break;
 472
 473        case IOC_E_HWERROR:
 474                /*
 475                 * No state change.  Will move to disabled state
 476                 * after iocpf sm completes failure processing and
 477                 * moves to disabled state.
 478                 */
 479                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 480                break;
 481
 482        case IOC_E_HWFAILED:
 483                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 484                bfa_ioc_disable_comp(ioc);
 485                break;
 486
 487        default:
 488                bfa_sm_fault(ioc, event);
 489        }
 490}
 491
 492/*
 493 * IOC disable completion entry.
 494 */
 495static void
 496bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
 497{
 498        bfa_ioc_disable_comp(ioc);
 499}
 500
 501static void
 502bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
 503{
 504        bfa_trc(ioc, event);
 505
 506        switch (event) {
 507        case IOC_E_ENABLE:
 508                bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 509                break;
 510
 511        case IOC_E_DISABLE:
 512                ioc->cbfn->disable_cbfn(ioc->bfa);
 513                break;
 514
 515        case IOC_E_DETACH:
 516                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 517                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 518                break;
 519
 520        default:
 521                bfa_sm_fault(ioc, event);
 522        }
 523}
 524
 525
 526static void
 527bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
 528{
 529        bfa_trc(ioc, 0);
 530}
 531
 532/*
 533 * Hardware initialization retry.
 534 */
 535static void
 536bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
 537{
 538        bfa_trc(ioc, event);
 539
 540        switch (event) {
 541        case IOC_E_ENABLED:
 542                bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 543                break;
 544
 545        case IOC_E_PFFAILED:
 546        case IOC_E_HWERROR:
 547                /*
 548                 * Initialization retry failed.
 549                 */
 550                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 551                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 552                if (event != IOC_E_PFFAILED)
 553                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
 554                break;
 555
 556        case IOC_E_HWFAILED:
 557                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 558                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 559                break;
 560
 561        case IOC_E_ENABLE:
 562                break;
 563
 564        case IOC_E_DISABLE:
 565                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 566                break;
 567
 568        case IOC_E_DETACH:
 569                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 570                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 571                break;
 572
 573        default:
 574                bfa_sm_fault(ioc, event);
 575        }
 576}
 577
 578
 579static void
 580bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
 581{
 582        bfa_trc(ioc, 0);
 583}
 584
 585/*
 586 * IOC failure.
 587 */
 588static void
 589bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
 590{
 591        bfa_trc(ioc, event);
 592
 593        switch (event) {
 594
 595        case IOC_E_ENABLE:
 596                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 597                break;
 598
 599        case IOC_E_DISABLE:
 600                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 601                break;
 602
 603        case IOC_E_DETACH:
 604                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 605                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 606                break;
 607
 608        case IOC_E_HWERROR:
 609        case IOC_E_HWFAILED:
 610                /*
 611                 * HB failure / HW error notification, ignore.
 612                 */
 613                break;
 614        default:
 615                bfa_sm_fault(ioc, event);
 616        }
 617}
 618
 619static void
 620bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
 621{
 622        bfa_trc(ioc, 0);
 623}
 624
 625static void
 626bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
 627{
 628        bfa_trc(ioc, event);
 629
 630        switch (event) {
 631        case IOC_E_ENABLE:
 632                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 633                break;
 634
 635        case IOC_E_DISABLE:
 636                ioc->cbfn->disable_cbfn(ioc->bfa);
 637                break;
 638
 639        case IOC_E_DETACH:
 640                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 641                break;
 642
 643        case IOC_E_HWERROR:
 644                /* Ignore - already in hwfail state */
 645                break;
 646
 647        default:
 648                bfa_sm_fault(ioc, event);
 649        }
 650}
 651
 652/*
 653 * IOCPF State Machine
 654 */
 655
 656/*
 657 * Reset entry actions -- initialize state machine
 658 */
 659static void
 660bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
 661{
 662        iocpf->fw_mismatch_notified = BFA_FALSE;
 663        iocpf->auto_recover = bfa_auto_recover;
 664}
 665
 666/*
 667 * Beginning state. IOC is in reset state.
 668 */
 669static void
 670bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 671{
 672        struct bfa_ioc_s *ioc = iocpf->ioc;
 673
 674        bfa_trc(ioc, event);
 675
 676        switch (event) {
 677        case IOCPF_E_ENABLE:
 678                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 679                break;
 680
 681        case IOCPF_E_STOP:
 682                break;
 683
 684        default:
 685                bfa_sm_fault(ioc, event);
 686        }
 687}
 688
 689/*
 690 * Semaphore should be acquired for version check.
 691 */
 692static void
 693bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 694{
 695        struct bfi_ioc_image_hdr_s      fwhdr;
 696        u32     r32, fwstate, pgnum, pgoff, loff = 0;
 697        int     i;
 698
 699        /*
 700         * Spin on init semaphore to serialize.
 701         */
 702        r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 703        while (r32 & 0x1) {
 704                udelay(20);
 705                r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 706        }
 707
 708        /* h/w sem init */
 709        fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
 710        if (fwstate == BFI_IOC_UNINIT) {
 711                writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 712                goto sem_get;
 713        }
 714
 715        bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
 716
 717        if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
 718                writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 719                goto sem_get;
 720        }
 721
 722        /*
 723         * Clear fwver hdr
 724         */
 725        pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
 726        pgoff = PSS_SMEM_PGOFF(loff);
 727        writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
 728
 729        for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
 730                bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
 731                loff += sizeof(u32);
 732        }
 733
 734        bfa_trc(iocpf->ioc, fwstate);
 735        bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
 736        bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
 737        bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
 738
 739        /*
 740         * Unlock the hw semaphore. Should be here only once per boot.
 741         */
 742        bfa_ioc_ownership_reset(iocpf->ioc);
 743
 744        /*
 745         * unlock init semaphore.
 746         */
 747        writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 748
 749sem_get:
 750        bfa_ioc_hw_sem_get(iocpf->ioc);
 751}
 752
 753/*
 754 * Awaiting h/w semaphore to continue with version check.
 755 */
 756static void
 757bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 758{
 759        struct bfa_ioc_s *ioc = iocpf->ioc;
 760
 761        bfa_trc(ioc, event);
 762
 763        switch (event) {
 764        case IOCPF_E_SEMLOCKED:
 765                if (bfa_ioc_firmware_lock(ioc)) {
 766                        if (bfa_ioc_sync_start(ioc)) {
 767                                bfa_ioc_sync_join(ioc);
 768                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 769                        } else {
 770                                bfa_ioc_firmware_unlock(ioc);
 771                                writel(1, ioc->ioc_regs.ioc_sem_reg);
 772                                bfa_sem_timer_start(ioc);
 773                        }
 774                } else {
 775                        writel(1, ioc->ioc_regs.ioc_sem_reg);
 776                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
 777                }
 778                break;
 779
 780        case IOCPF_E_SEM_ERROR:
 781                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 782                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
 783                break;
 784
 785        case IOCPF_E_DISABLE:
 786                bfa_sem_timer_stop(ioc);
 787                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 788                bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 789                break;
 790
 791        case IOCPF_E_STOP:
 792                bfa_sem_timer_stop(ioc);
 793                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 794                break;
 795
 796        default:
 797                bfa_sm_fault(ioc, event);
 798        }
 799}
 800
 801/*
 802 * Notify enable completion callback.
 803 */
 804static void
 805bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
 806{
 807        /*
 808         * Call only the first time sm enters fwmismatch state.
 809         */
 810        if (iocpf->fw_mismatch_notified == BFA_FALSE)
 811                bfa_ioc_pf_fwmismatch(iocpf->ioc);
 812
 813        iocpf->fw_mismatch_notified = BFA_TRUE;
 814        bfa_iocpf_timer_start(iocpf->ioc);
 815}
 816
 817/*
 818 * Awaiting firmware version match.
 819 */
 820static void
 821bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 822{
 823        struct bfa_ioc_s *ioc = iocpf->ioc;
 824
 825        bfa_trc(ioc, event);
 826
 827        switch (event) {
 828        case IOCPF_E_TIMEOUT:
 829                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 830                break;
 831
 832        case IOCPF_E_DISABLE:
 833                bfa_iocpf_timer_stop(ioc);
 834                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 835                bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 836                break;
 837
 838        case IOCPF_E_STOP:
 839                bfa_iocpf_timer_stop(ioc);
 840                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 841                break;
 842
 843        default:
 844                bfa_sm_fault(ioc, event);
 845        }
 846}
 847
 848/*
 849 * Request for semaphore.
 850 */
 851static void
 852bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
 853{
 854        bfa_ioc_hw_sem_get(iocpf->ioc);
 855}
 856
 857/*
 858 * Awaiting semaphore for h/w initialzation.
 859 */
 860static void
 861bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 862{
 863        struct bfa_ioc_s *ioc = iocpf->ioc;
 864
 865        bfa_trc(ioc, event);
 866
 867        switch (event) {
 868        case IOCPF_E_SEMLOCKED:
 869                if (bfa_ioc_sync_complete(ioc)) {
 870                        bfa_ioc_sync_join(ioc);
 871                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 872                } else {
 873                        writel(1, ioc->ioc_regs.ioc_sem_reg);
 874                        bfa_sem_timer_start(ioc);
 875                }
 876                break;
 877
 878        case IOCPF_E_SEM_ERROR:
 879                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 880                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
 881                break;
 882
 883        case IOCPF_E_DISABLE:
 884                bfa_sem_timer_stop(ioc);
 885                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 886                break;
 887
 888        default:
 889                bfa_sm_fault(ioc, event);
 890        }
 891}
 892
 893static void
 894bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
 895{
 896        iocpf->poll_time = 0;
 897        bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
 898}
 899
 900/*
 901 * Hardware is being initialized. Interrupts are enabled.
 902 * Holding hardware semaphore lock.
 903 */
 904static void
 905bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 906{
 907        struct bfa_ioc_s *ioc = iocpf->ioc;
 908
 909        bfa_trc(ioc, event);
 910
 911        switch (event) {
 912        case IOCPF_E_FWREADY:
 913                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
 914                break;
 915
 916        case IOCPF_E_TIMEOUT:
 917                writel(1, ioc->ioc_regs.ioc_sem_reg);
 918                bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
 919                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 920                break;
 921
 922        case IOCPF_E_DISABLE:
 923                bfa_iocpf_timer_stop(ioc);
 924                bfa_ioc_sync_leave(ioc);
 925                writel(1, ioc->ioc_regs.ioc_sem_reg);
 926                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 927                break;
 928
 929        default:
 930                bfa_sm_fault(ioc, event);
 931        }
 932}
 933
 934static void
 935bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
 936{
 937        bfa_iocpf_timer_start(iocpf->ioc);
 938        /*
 939         * Enable Interrupts before sending fw IOC ENABLE cmd.
 940         */
 941        iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
 942        bfa_ioc_send_enable(iocpf->ioc);
 943}
 944
 945/*
 946 * Host IOC function is being enabled, awaiting response from firmware.
 947 * Semaphore is acquired.
 948 */
 949static void
 950bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 951{
 952        struct bfa_ioc_s *ioc = iocpf->ioc;
 953
 954        bfa_trc(ioc, event);
 955
 956        switch (event) {
 957        case IOCPF_E_FWRSP_ENABLE:
 958                bfa_iocpf_timer_stop(ioc);
 959                writel(1, ioc->ioc_regs.ioc_sem_reg);
 960                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
 961                break;
 962
 963        case IOCPF_E_INITFAIL:
 964                bfa_iocpf_timer_stop(ioc);
 965                /*
 966                 * !!! fall through !!!
 967                 */
 968
 969        case IOCPF_E_TIMEOUT:
 970                writel(1, ioc->ioc_regs.ioc_sem_reg);
 971                if (event == IOCPF_E_TIMEOUT)
 972                        bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
 973                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 974                break;
 975
 976        case IOCPF_E_DISABLE:
 977                bfa_iocpf_timer_stop(ioc);
 978                writel(1, ioc->ioc_regs.ioc_sem_reg);
 979                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
 980                break;
 981
 982        default:
 983                bfa_sm_fault(ioc, event);
 984        }
 985}
 986
 987static void
 988bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
 989{
 990        bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
 991}
 992
 993static void
 994bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 995{
 996        struct bfa_ioc_s *ioc = iocpf->ioc;
 997
 998        bfa_trc(ioc, event);
 999
1000        switch (event) {
1001        case IOCPF_E_DISABLE:
1002                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1003                break;
1004
1005        case IOCPF_E_GETATTRFAIL:
1006                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1007                break;
1008
1009        case IOCPF_E_FAIL:
1010                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1011                break;
1012
1013        default:
1014                bfa_sm_fault(ioc, event);
1015        }
1016}
1017
1018static void
1019bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1020{
1021        bfa_iocpf_timer_start(iocpf->ioc);
1022        bfa_ioc_send_disable(iocpf->ioc);
1023}
1024
1025/*
1026 * IOC is being disabled
1027 */
1028static void
1029bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1030{
1031        struct bfa_ioc_s *ioc = iocpf->ioc;
1032
1033        bfa_trc(ioc, event);
1034
1035        switch (event) {
1036        case IOCPF_E_FWRSP_DISABLE:
1037                bfa_iocpf_timer_stop(ioc);
1038                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1039                break;
1040
1041        case IOCPF_E_FAIL:
1042                bfa_iocpf_timer_stop(ioc);
1043                /*
1044                 * !!! fall through !!!
1045                 */
1046
1047        case IOCPF_E_TIMEOUT:
1048                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1049                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1050                break;
1051
1052        case IOCPF_E_FWRSP_ENABLE:
1053                break;
1054
1055        default:
1056                bfa_sm_fault(ioc, event);
1057        }
1058}
1059
1060static void
1061bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1062{
1063        bfa_ioc_hw_sem_get(iocpf->ioc);
1064}
1065
1066/*
1067 * IOC hb ack request is being removed.
1068 */
1069static void
1070bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1071{
1072        struct bfa_ioc_s *ioc = iocpf->ioc;
1073
1074        bfa_trc(ioc, event);
1075
1076        switch (event) {
1077        case IOCPF_E_SEMLOCKED:
1078                bfa_ioc_sync_leave(ioc);
1079                writel(1, ioc->ioc_regs.ioc_sem_reg);
1080                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1081                break;
1082
1083        case IOCPF_E_SEM_ERROR:
1084                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1085                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1086                break;
1087
1088        case IOCPF_E_FAIL:
1089                break;
1090
1091        default:
1092                bfa_sm_fault(ioc, event);
1093        }
1094}
1095
1096/*
1097 * IOC disable completion entry.
1098 */
1099static void
1100bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1101{
1102        bfa_ioc_mbox_flush(iocpf->ioc);
1103        bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1104}
1105
1106static void
1107bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1108{
1109        struct bfa_ioc_s *ioc = iocpf->ioc;
1110
1111        bfa_trc(ioc, event);
1112
1113        switch (event) {
1114        case IOCPF_E_ENABLE:
1115                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1116                break;
1117
1118        case IOCPF_E_STOP:
1119                bfa_ioc_firmware_unlock(ioc);
1120                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1121                break;
1122
1123        default:
1124                bfa_sm_fault(ioc, event);
1125        }
1126}
1127
1128static void
1129bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1130{
1131        bfa_ioc_debug_save_ftrc(iocpf->ioc);
1132        bfa_ioc_hw_sem_get(iocpf->ioc);
1133}
1134
1135/*
1136 * Hardware initialization failed.
1137 */
1138static void
1139bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1140{
1141        struct bfa_ioc_s *ioc = iocpf->ioc;
1142
1143        bfa_trc(ioc, event);
1144
1145        switch (event) {
1146        case IOCPF_E_SEMLOCKED:
1147                bfa_ioc_notify_fail(ioc);
1148                bfa_ioc_sync_leave(ioc);
1149                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1150                writel(1, ioc->ioc_regs.ioc_sem_reg);
1151                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1152                break;
1153
1154        case IOCPF_E_SEM_ERROR:
1155                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1156                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1157                break;
1158
1159        case IOCPF_E_DISABLE:
1160                bfa_sem_timer_stop(ioc);
1161                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1162                break;
1163
1164        case IOCPF_E_STOP:
1165                bfa_sem_timer_stop(ioc);
1166                bfa_ioc_firmware_unlock(ioc);
1167                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1168                break;
1169
1170        case IOCPF_E_FAIL:
1171                break;
1172
1173        default:
1174                bfa_sm_fault(ioc, event);
1175        }
1176}
1177
1178static void
1179bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1180{
1181        bfa_trc(iocpf->ioc, 0);
1182}
1183
1184/*
1185 * Hardware initialization failed.
1186 */
1187static void
1188bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1189{
1190        struct bfa_ioc_s *ioc = iocpf->ioc;
1191
1192        bfa_trc(ioc, event);
1193
1194        switch (event) {
1195        case IOCPF_E_DISABLE:
1196                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1197                break;
1198
1199        case IOCPF_E_STOP:
1200                bfa_ioc_firmware_unlock(ioc);
1201                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1202                break;
1203
1204        default:
1205                bfa_sm_fault(ioc, event);
1206        }
1207}
1208
1209static void
1210bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1211{
1212        /*
1213         * Mark IOC as failed in hardware and stop firmware.
1214         */
1215        bfa_ioc_lpu_stop(iocpf->ioc);
1216
1217        /*
1218         * Flush any queued up mailbox requests.
1219         */
1220        bfa_ioc_mbox_flush(iocpf->ioc);
1221
1222        bfa_ioc_hw_sem_get(iocpf->ioc);
1223}
1224
1225static void
1226bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1227{
1228        struct bfa_ioc_s *ioc = iocpf->ioc;
1229
1230        bfa_trc(ioc, event);
1231
1232        switch (event) {
1233        case IOCPF_E_SEMLOCKED:
1234                bfa_ioc_sync_ack(ioc);
1235                bfa_ioc_notify_fail(ioc);
1236                if (!iocpf->auto_recover) {
1237                        bfa_ioc_sync_leave(ioc);
1238                        bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1239                        writel(1, ioc->ioc_regs.ioc_sem_reg);
1240                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1241                } else {
1242                        if (bfa_ioc_sync_complete(ioc))
1243                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1244                        else {
1245                                writel(1, ioc->ioc_regs.ioc_sem_reg);
1246                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1247                        }
1248                }
1249                break;
1250
1251        case IOCPF_E_SEM_ERROR:
1252                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1253                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1254                break;
1255
1256        case IOCPF_E_DISABLE:
1257                bfa_sem_timer_stop(ioc);
1258                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1259                break;
1260
1261        case IOCPF_E_FAIL:
1262                break;
1263
1264        default:
1265                bfa_sm_fault(ioc, event);
1266        }
1267}
1268
1269static void
1270bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1271{
1272        bfa_trc(iocpf->ioc, 0);
1273}
1274
1275/*
1276 * IOC is in failed state.
1277 */
1278static void
1279bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1280{
1281        struct bfa_ioc_s *ioc = iocpf->ioc;
1282
1283        bfa_trc(ioc, event);
1284
1285        switch (event) {
1286        case IOCPF_E_DISABLE:
1287                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1288                break;
1289
1290        default:
1291                bfa_sm_fault(ioc, event);
1292        }
1293}
1294
1295/*
1296 *  BFA IOC private functions
1297 */
1298
1299/*
1300 * Notify common modules registered for notification.
1301 */
1302static void
1303bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1304{
1305        struct bfa_ioc_notify_s *notify;
1306        struct list_head        *qe;
1307
1308        list_for_each(qe, &ioc->notify_q) {
1309                notify = (struct bfa_ioc_notify_s *)qe;
1310                notify->cbfn(notify->cbarg, event);
1311        }
1312}
1313
1314static void
1315bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1316{
1317        ioc->cbfn->disable_cbfn(ioc->bfa);
1318        bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1319}
1320
1321bfa_boolean_t
1322bfa_ioc_sem_get(void __iomem *sem_reg)
1323{
1324        u32 r32;
1325        int cnt = 0;
1326#define BFA_SEM_SPINCNT 3000
1327
1328        r32 = readl(sem_reg);
1329
1330        while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1331                cnt++;
1332                udelay(2);
1333                r32 = readl(sem_reg);
1334        }
1335
1336        if (!(r32 & 1))
1337                return BFA_TRUE;
1338
1339        return BFA_FALSE;
1340}
1341
1342static void
1343bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1344{
1345        u32     r32;
1346
1347        /*
1348         * First read to the semaphore register will return 0, subsequent reads
1349         * will return 1. Semaphore is released by writing 1 to the register
1350         */
1351        r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1352        if (r32 == ~0) {
1353                WARN_ON(r32 == ~0);
1354                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1355                return;
1356        }
1357        if (!(r32 & 1)) {
1358                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1359                return;
1360        }
1361
1362        bfa_sem_timer_start(ioc);
1363}
1364
1365/*
1366 * Initialize LPU local memory (aka secondary memory / SRAM)
1367 */
1368static void
1369bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1370{
1371        u32     pss_ctl;
1372        int             i;
1373#define PSS_LMEM_INIT_TIME  10000
1374
1375        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1376        pss_ctl &= ~__PSS_LMEM_RESET;
1377        pss_ctl |= __PSS_LMEM_INIT_EN;
1378
1379        /*
1380         * i2c workaround 12.5khz clock
1381         */
1382        pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1383        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1384
1385        /*
1386         * wait for memory initialization to be complete
1387         */
1388        i = 0;
1389        do {
1390                pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1391                i++;
1392        } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1393
1394        /*
1395         * If memory initialization is not successful, IOC timeout will catch
1396         * such failures.
1397         */
1398        WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1399        bfa_trc(ioc, pss_ctl);
1400
1401        pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1402        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1403}
1404
1405static void
1406bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1407{
1408        u32     pss_ctl;
1409
1410        /*
1411         * Take processor out of reset.
1412         */
1413        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1414        pss_ctl &= ~__PSS_LPU0_RESET;
1415
1416        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1417}
1418
1419static void
1420bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1421{
1422        u32     pss_ctl;
1423
1424        /*
1425         * Put processors in reset.
1426         */
1427        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1428        pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1429
1430        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1431}
1432
1433/*
1434 * Get driver and firmware versions.
1435 */
1436void
1437bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1438{
1439        u32     pgnum, pgoff;
1440        u32     loff = 0;
1441        int             i;
1442        u32     *fwsig = (u32 *) fwhdr;
1443
1444        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1445        pgoff = PSS_SMEM_PGOFF(loff);
1446        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1447
1448        for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1449             i++) {
1450                fwsig[i] =
1451                        bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1452                loff += sizeof(u32);
1453        }
1454}
1455
1456/*
1457 * Returns TRUE if same.
1458 */
1459bfa_boolean_t
1460bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1461{
1462        struct bfi_ioc_image_hdr_s *drv_fwhdr;
1463        int i;
1464
1465        drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1466                bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1467
1468        for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1469                if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) {
1470                        bfa_trc(ioc, i);
1471                        bfa_trc(ioc, fwhdr->md5sum[i]);
1472                        bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1473                        return BFA_FALSE;
1474                }
1475        }
1476
1477        bfa_trc(ioc, fwhdr->md5sum[0]);
1478        return BFA_TRUE;
1479}
1480
1481/*
1482 * Return true if current running version is valid. Firmware signature and
1483 * execution context (driver/bios) must match.
1484 */
1485static bfa_boolean_t
1486bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1487{
1488        struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1489
1490        bfa_ioc_fwver_get(ioc, &fwhdr);
1491        drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1492                bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1493
1494        if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) {
1495                bfa_trc(ioc, fwhdr.signature);
1496                bfa_trc(ioc, drv_fwhdr->signature);
1497                return BFA_FALSE;
1498        }
1499
1500        if (swab32(fwhdr.bootenv) != boot_env) {
1501                bfa_trc(ioc, fwhdr.bootenv);
1502                bfa_trc(ioc, boot_env);
1503                return BFA_FALSE;
1504        }
1505
1506        return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1507}
1508
1509/*
1510 * Conditionally flush any pending message from firmware at start.
1511 */
1512static void
1513bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1514{
1515        u32     r32;
1516
1517        r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1518        if (r32)
1519                writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1520}
1521
1522static void
1523bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1524{
1525        enum bfi_ioc_state ioc_fwstate;
1526        bfa_boolean_t fwvalid;
1527        u32 boot_type;
1528        u32 boot_env;
1529
1530        ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1531
1532        if (force)
1533                ioc_fwstate = BFI_IOC_UNINIT;
1534
1535        bfa_trc(ioc, ioc_fwstate);
1536
1537        boot_type = BFI_FWBOOT_TYPE_NORMAL;
1538        boot_env = BFI_FWBOOT_ENV_OS;
1539
1540        /*
1541         * check if firmware is valid
1542         */
1543        fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1544                BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1545
1546        if (!fwvalid) {
1547                bfa_ioc_boot(ioc, boot_type, boot_env);
1548                bfa_ioc_poll_fwinit(ioc);
1549                return;
1550        }
1551
1552        /*
1553         * If hardware initialization is in progress (initialized by other IOC),
1554         * just wait for an initialization completion interrupt.
1555         */
1556        if (ioc_fwstate == BFI_IOC_INITING) {
1557                bfa_ioc_poll_fwinit(ioc);
1558                return;
1559        }
1560
1561        /*
1562         * If IOC function is disabled and firmware version is same,
1563         * just re-enable IOC.
1564         *
1565         * If option rom, IOC must not be in operational state. With
1566         * convergence, IOC will be in operational state when 2nd driver
1567         * is loaded.
1568         */
1569        if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1570
1571                /*
1572                 * When using MSI-X any pending firmware ready event should
1573                 * be flushed. Otherwise MSI-X interrupts are not delivered.
1574                 */
1575                bfa_ioc_msgflush(ioc);
1576                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1577                return;
1578        }
1579
1580        /*
1581         * Initialize the h/w for any other states.
1582         */
1583        bfa_ioc_boot(ioc, boot_type, boot_env);
1584        bfa_ioc_poll_fwinit(ioc);
1585}
1586
1587static void
1588bfa_ioc_timeout(void *ioc_arg)
1589{
1590        struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1591
1592        bfa_trc(ioc, 0);
1593        bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1594}
1595
1596void
1597bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1598{
1599        u32 *msgp = (u32 *) ioc_msg;
1600        u32 i;
1601
1602        bfa_trc(ioc, msgp[0]);
1603        bfa_trc(ioc, len);
1604
1605        WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1606
1607        /*
1608         * first write msg to mailbox registers
1609         */
1610        for (i = 0; i < len / sizeof(u32); i++)
1611                writel(cpu_to_le32(msgp[i]),
1612                        ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1613
1614        for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1615                writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1616
1617        /*
1618         * write 1 to mailbox CMD to trigger LPU event
1619         */
1620        writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1621        (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1622}
1623
1624static void
1625bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1626{
1627        struct bfi_ioc_ctrl_req_s enable_req;
1628        struct timeval tv;
1629
1630        bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1631                    bfa_ioc_portid(ioc));
1632        enable_req.clscode = cpu_to_be16(ioc->clscode);
1633        do_gettimeofday(&tv);
1634        enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1635        bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1636}
1637
1638static void
1639bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1640{
1641        struct bfi_ioc_ctrl_req_s disable_req;
1642
1643        bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1644                    bfa_ioc_portid(ioc));
1645        bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1646}
1647
1648static void
1649bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1650{
1651        struct bfi_ioc_getattr_req_s    attr_req;
1652
1653        bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1654                    bfa_ioc_portid(ioc));
1655        bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1656        bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1657}
1658
1659static void
1660bfa_ioc_hb_check(void *cbarg)
1661{
1662        struct bfa_ioc_s  *ioc = cbarg;
1663        u32     hb_count;
1664
1665        hb_count = readl(ioc->ioc_regs.heartbeat);
1666        if (ioc->hb_count == hb_count) {
1667                bfa_ioc_recover(ioc);
1668                return;
1669        } else {
1670                ioc->hb_count = hb_count;
1671        }
1672
1673        bfa_ioc_mbox_poll(ioc);
1674        bfa_hb_timer_start(ioc);
1675}
1676
1677static void
1678bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1679{
1680        ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1681        bfa_hb_timer_start(ioc);
1682}
1683
1684/*
1685 *      Initiate a full firmware download.
1686 */
1687static void
1688bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1689                    u32 boot_env)
1690{
1691        u32 *fwimg;
1692        u32 pgnum, pgoff;
1693        u32 loff = 0;
1694        u32 chunkno = 0;
1695        u32 i;
1696        u32 asicmode;
1697
1698        bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1699        fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1700
1701        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1702        pgoff = PSS_SMEM_PGOFF(loff);
1703
1704        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1705
1706        for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
1707
1708                if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1709                        chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1710                        fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1711                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1712                }
1713
1714                /*
1715                 * write smem
1716                 */
1717                bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1718                        cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]));
1719
1720                loff += sizeof(u32);
1721
1722                /*
1723                 * handle page offset wrap around
1724                 */
1725                loff = PSS_SMEM_PGOFF(loff);
1726                if (loff == 0) {
1727                        pgnum++;
1728                        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1729                }
1730        }
1731
1732        writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1733                        ioc->ioc_regs.host_page_num_fn);
1734
1735        /*
1736         * Set boot type and device mode at the end.
1737         */
1738        asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1739                                ioc->port0_mode, ioc->port1_mode);
1740        bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1741                        swab32(asicmode));
1742        bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1743                        swab32(boot_type));
1744        bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1745                        swab32(boot_env));
1746}
1747
1748
1749/*
1750 * Update BFA configuration from firmware configuration.
1751 */
1752static void
1753bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1754{
1755        struct bfi_ioc_attr_s   *attr = ioc->attr;
1756
1757        attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1758        attr->card_type     = be32_to_cpu(attr->card_type);
1759        attr->maxfrsize     = be16_to_cpu(attr->maxfrsize);
1760        ioc->fcmode     = (attr->port_mode == BFI_PORT_MODE_FC);
1761        attr->mfg_year  = be16_to_cpu(attr->mfg_year);
1762
1763        bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1764}
1765
1766/*
1767 * Attach time initialization of mbox logic.
1768 */
1769static void
1770bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1771{
1772        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1773        int     mc;
1774
1775        INIT_LIST_HEAD(&mod->cmd_q);
1776        for (mc = 0; mc < BFI_MC_MAX; mc++) {
1777                mod->mbhdlr[mc].cbfn = NULL;
1778                mod->mbhdlr[mc].cbarg = ioc->bfa;
1779        }
1780}
1781
1782/*
1783 * Mbox poll timer -- restarts any pending mailbox requests.
1784 */
1785static void
1786bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1787{
1788        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1789        struct bfa_mbox_cmd_s           *cmd;
1790        u32                     stat;
1791
1792        /*
1793         * If no command pending, do nothing
1794         */
1795        if (list_empty(&mod->cmd_q))
1796                return;
1797
1798        /*
1799         * If previous command is not yet fetched by firmware, do nothing
1800         */
1801        stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1802        if (stat)
1803                return;
1804
1805        /*
1806         * Enqueue command to firmware.
1807         */
1808        bfa_q_deq(&mod->cmd_q, &cmd);
1809        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1810}
1811
1812/*
1813 * Cleanup any pending requests.
1814 */
1815static void
1816bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
1817{
1818        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1819        struct bfa_mbox_cmd_s           *cmd;
1820
1821        while (!list_empty(&mod->cmd_q))
1822                bfa_q_deq(&mod->cmd_q, &cmd);
1823}
1824
1825/*
1826 * Read data from SMEM to host through PCI memmap
1827 *
1828 * @param[in]   ioc     memory for IOC
1829 * @param[in]   tbuf    app memory to store data from smem
1830 * @param[in]   soff    smem offset
1831 * @param[in]   sz      size of smem in bytes
1832 */
1833static bfa_status_t
1834bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1835{
1836        u32 pgnum, loff;
1837        __be32 r32;
1838        int i, len;
1839        u32 *buf = tbuf;
1840
1841        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1842        loff = PSS_SMEM_PGOFF(soff);
1843        bfa_trc(ioc, pgnum);
1844        bfa_trc(ioc, loff);
1845        bfa_trc(ioc, sz);
1846
1847        /*
1848         *  Hold semaphore to serialize pll init and fwtrc.
1849         */
1850        if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1851                bfa_trc(ioc, 0);
1852                return BFA_STATUS_FAILED;
1853        }
1854
1855        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1856
1857        len = sz/sizeof(u32);
1858        bfa_trc(ioc, len);
1859        for (i = 0; i < len; i++) {
1860                r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1861                buf[i] = swab32(r32);
1862                loff += sizeof(u32);
1863
1864                /*
1865                 * handle page offset wrap around
1866                 */
1867                loff = PSS_SMEM_PGOFF(loff);
1868                if (loff == 0) {
1869                        pgnum++;
1870                        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1871                }
1872        }
1873        writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1874                        ioc->ioc_regs.host_page_num_fn);
1875        /*
1876         *  release semaphore.
1877         */
1878        readl(ioc->ioc_regs.ioc_init_sem_reg);
1879        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1880
1881        bfa_trc(ioc, pgnum);
1882        return BFA_STATUS_OK;
1883}
1884
1885/*
1886 * Clear SMEM data from host through PCI memmap
1887 *
1888 * @param[in]   ioc     memory for IOC
1889 * @param[in]   soff    smem offset
1890 * @param[in]   sz      size of smem in bytes
1891 */
1892static bfa_status_t
1893bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1894{
1895        int i, len;
1896        u32 pgnum, loff;
1897
1898        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1899        loff = PSS_SMEM_PGOFF(soff);
1900        bfa_trc(ioc, pgnum);
1901        bfa_trc(ioc, loff);
1902        bfa_trc(ioc, sz);
1903
1904        /*
1905         *  Hold semaphore to serialize pll init and fwtrc.
1906         */
1907        if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1908                bfa_trc(ioc, 0);
1909                return BFA_STATUS_FAILED;
1910        }
1911
1912        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1913
1914        len = sz/sizeof(u32); /* len in words */
1915        bfa_trc(ioc, len);
1916        for (i = 0; i < len; i++) {
1917                bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1918                loff += sizeof(u32);
1919
1920                /*
1921                 * handle page offset wrap around
1922                 */
1923                loff = PSS_SMEM_PGOFF(loff);
1924                if (loff == 0) {
1925                        pgnum++;
1926                        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1927                }
1928        }
1929        writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1930                        ioc->ioc_regs.host_page_num_fn);
1931
1932        /*
1933         *  release semaphore.
1934         */
1935        readl(ioc->ioc_regs.ioc_init_sem_reg);
1936        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1937        bfa_trc(ioc, pgnum);
1938        return BFA_STATUS_OK;
1939}
1940
1941static void
1942bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1943{
1944        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1945
1946        /*
1947         * Notify driver and common modules registered for notification.
1948         */
1949        ioc->cbfn->hbfail_cbfn(ioc->bfa);
1950        bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
1951
1952        bfa_ioc_debug_save_ftrc(ioc);
1953
1954        BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1955                "Heart Beat of IOC has failed\n");
1956        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
1957
1958}
1959
1960static void
1961bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1962{
1963        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1964        /*
1965         * Provide enable completion callback.
1966         */
1967        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1968        BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
1969                "Running firmware version is incompatible "
1970                "with the driver version\n");
1971        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
1972}
1973
1974bfa_status_t
1975bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1976{
1977
1978        /*
1979         *  Hold semaphore so that nobody can access the chip during init.
1980         */
1981        bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1982
1983        bfa_ioc_pll_init_asic(ioc);
1984
1985        ioc->pllinit = BFA_TRUE;
1986
1987        /*
1988         * Initialize LMEM
1989         */
1990        bfa_ioc_lmem_init(ioc);
1991
1992        /*
1993         *  release semaphore.
1994         */
1995        readl(ioc->ioc_regs.ioc_init_sem_reg);
1996        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1997
1998        return BFA_STATUS_OK;
1999}
2000
2001/*
2002 * Interface used by diag module to do firmware boot with memory test
2003 * as the entry vector.
2004 */
2005void
2006bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2007{
2008        bfa_ioc_stats(ioc, ioc_boots);
2009
2010        if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2011                return;
2012
2013        /*
2014         * Initialize IOC state of all functions on a chip reset.
2015         */
2016        if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2017                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2018                bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2019        } else {
2020                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2021                bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2022        }
2023
2024        bfa_ioc_msgflush(ioc);
2025        bfa_ioc_download_fw(ioc, boot_type, boot_env);
2026        bfa_ioc_lpu_start(ioc);
2027}
2028
2029/*
2030 * Enable/disable IOC failure auto recovery.
2031 */
2032void
2033bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2034{
2035        bfa_auto_recover = auto_recover;
2036}
2037
2038
2039
2040bfa_boolean_t
2041bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2042{
2043        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2044}
2045
2046bfa_boolean_t
2047bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2048{
2049        u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
2050
2051        return ((r32 != BFI_IOC_UNINIT) &&
2052                (r32 != BFI_IOC_INITING) &&
2053                (r32 != BFI_IOC_MEMTEST));
2054}
2055
2056bfa_boolean_t
2057bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2058{
2059        __be32  *msgp = mbmsg;
2060        u32     r32;
2061        int             i;
2062
2063        r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2064        if ((r32 & 1) == 0)
2065                return BFA_FALSE;
2066
2067        /*
2068         * read the MBOX msg
2069         */
2070        for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2071             i++) {
2072                r32 = readl(ioc->ioc_regs.lpu_mbox +
2073                                   i * sizeof(u32));
2074                msgp[i] = cpu_to_be32(r32);
2075        }
2076
2077        /*
2078         * turn off mailbox interrupt by clearing mailbox status
2079         */
2080        writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2081        readl(ioc->ioc_regs.lpu_mbox_cmd);
2082
2083        return BFA_TRUE;
2084}
2085
2086void
2087bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2088{
2089        union bfi_ioc_i2h_msg_u *msg;
2090        struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2091
2092        msg = (union bfi_ioc_i2h_msg_u *) m;
2093
2094        bfa_ioc_stats(ioc, ioc_isrs);
2095
2096        switch (msg->mh.msg_id) {
2097        case BFI_IOC_I2H_HBEAT:
2098                break;
2099
2100        case BFI_IOC_I2H_ENABLE_REPLY:
2101                ioc->port_mode = ioc->port_mode_cfg =
2102                                (enum bfa_mode_s)msg->fw_event.port_mode;
2103                ioc->ad_cap_bm = msg->fw_event.cap_bm;
2104                bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2105                break;
2106
2107        case BFI_IOC_I2H_DISABLE_REPLY:
2108                bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2109                break;
2110
2111        case BFI_IOC_I2H_GETATTR_REPLY:
2112                bfa_ioc_getattr_reply(ioc);
2113                break;
2114
2115        default:
2116                bfa_trc(ioc, msg->mh.msg_id);
2117                WARN_ON(1);
2118        }
2119}
2120
2121/*
2122 * IOC attach time initialization and setup.
2123 *
2124 * @param[in]   ioc     memory for IOC
2125 * @param[in]   bfa     driver instance structure
2126 */
2127void
2128bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2129               struct bfa_timer_mod_s *timer_mod)
2130{
2131        ioc->bfa        = bfa;
2132        ioc->cbfn       = cbfn;
2133        ioc->timer_mod  = timer_mod;
2134        ioc->fcmode     = BFA_FALSE;
2135        ioc->pllinit    = BFA_FALSE;
2136        ioc->dbg_fwsave_once = BFA_TRUE;
2137        ioc->iocpf.ioc  = ioc;
2138
2139        bfa_ioc_mbox_attach(ioc);
2140        INIT_LIST_HEAD(&ioc->notify_q);
2141
2142        bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2143        bfa_fsm_send_event(ioc, IOC_E_RESET);
2144}
2145
2146/*
2147 * Driver detach time IOC cleanup.
2148 */
2149void
2150bfa_ioc_detach(struct bfa_ioc_s *ioc)
2151{
2152        bfa_fsm_send_event(ioc, IOC_E_DETACH);
2153        INIT_LIST_HEAD(&ioc->notify_q);
2154}
2155
2156/*
2157 * Setup IOC PCI properties.
2158 *
2159 * @param[in]   pcidev  PCI device information for this IOC
2160 */
2161void
2162bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2163                enum bfi_pcifn_class clscode)
2164{
2165        ioc->clscode    = clscode;
2166        ioc->pcidev     = *pcidev;
2167
2168        /*
2169         * Initialize IOC and device personality
2170         */
2171        ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2172        ioc->asic_mode  = BFI_ASIC_MODE_FC;
2173
2174        switch (pcidev->device_id) {
2175        case BFA_PCI_DEVICE_ID_FC_8G1P:
2176        case BFA_PCI_DEVICE_ID_FC_8G2P:
2177                ioc->asic_gen = BFI_ASIC_GEN_CB;
2178                ioc->fcmode = BFA_TRUE;
2179                ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2180                ioc->ad_cap_bm = BFA_CM_HBA;
2181                break;
2182
2183        case BFA_PCI_DEVICE_ID_CT:
2184                ioc->asic_gen = BFI_ASIC_GEN_CT;
2185                ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2186                ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2187                ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2188                ioc->ad_cap_bm = BFA_CM_CNA;
2189                break;
2190
2191        case BFA_PCI_DEVICE_ID_CT_FC:
2192                ioc->asic_gen = BFI_ASIC_GEN_CT;
2193                ioc->fcmode = BFA_TRUE;
2194                ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2195                ioc->ad_cap_bm = BFA_CM_HBA;
2196                break;
2197
2198        case BFA_PCI_DEVICE_ID_CT2:
2199        case BFA_PCI_DEVICE_ID_CT2_QUAD:
2200                ioc->asic_gen = BFI_ASIC_GEN_CT2;
2201                if (clscode == BFI_PCIFN_CLASS_FC &&
2202                    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2203                        ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2204                        ioc->fcmode = BFA_TRUE;
2205                        ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2206                        ioc->ad_cap_bm = BFA_CM_HBA;
2207                } else {
2208                        ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2209                        ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2210                        if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2211                                ioc->port_mode =
2212                                ioc->port_mode_cfg = BFA_MODE_CNA;
2213                                ioc->ad_cap_bm = BFA_CM_CNA;
2214                        } else {
2215                                ioc->port_mode =
2216                                ioc->port_mode_cfg = BFA_MODE_NIC;
2217                                ioc->ad_cap_bm = BFA_CM_NIC;
2218                        }
2219                }
2220                break;
2221
2222        default:
2223                WARN_ON(1);
2224        }
2225
2226        /*
2227         * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2228         */
2229        if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2230                bfa_ioc_set_cb_hwif(ioc);
2231        else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2232                bfa_ioc_set_ct_hwif(ioc);
2233        else {
2234                WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2235                bfa_ioc_set_ct2_hwif(ioc);
2236                bfa_ioc_ct2_poweron(ioc);
2237        }
2238
2239        bfa_ioc_map_port(ioc);
2240        bfa_ioc_reg_init(ioc);
2241}
2242
2243/*
2244 * Initialize IOC dma memory
2245 *
2246 * @param[in]   dm_kva  kernel virtual address of IOC dma memory
2247 * @param[in]   dm_pa   physical address of IOC dma memory
2248 */
2249void
2250bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2251{
2252        /*
2253         * dma memory for firmware attribute
2254         */
2255        ioc->attr_dma.kva = dm_kva;
2256        ioc->attr_dma.pa = dm_pa;
2257        ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2258}
2259
2260void
2261bfa_ioc_enable(struct bfa_ioc_s *ioc)
2262{
2263        bfa_ioc_stats(ioc, ioc_enables);
2264        ioc->dbg_fwsave_once = BFA_TRUE;
2265
2266        bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2267}
2268
2269void
2270bfa_ioc_disable(struct bfa_ioc_s *ioc)
2271{
2272        bfa_ioc_stats(ioc, ioc_disables);
2273        bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2274}
2275
2276void
2277bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2278{
2279        ioc->dbg_fwsave_once = BFA_TRUE;
2280        bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2281}
2282
2283/*
2284 * Initialize memory for saving firmware trace. Driver must initialize
2285 * trace memory before call bfa_ioc_enable().
2286 */
2287void
2288bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2289{
2290        ioc->dbg_fwsave     = dbg_fwsave;
2291        ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2292}
2293
2294/*
2295 * Register mailbox message handler functions
2296 *
2297 * @param[in]   ioc             IOC instance
2298 * @param[in]   mcfuncs         message class handler functions
2299 */
2300void
2301bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2302{
2303        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2304        int                             mc;
2305
2306        for (mc = 0; mc < BFI_MC_MAX; mc++)
2307                mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2308}
2309
2310/*
2311 * Register mailbox message handler function, to be called by common modules
2312 */
2313void
2314bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2315                    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2316{
2317        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2318
2319        mod->mbhdlr[mc].cbfn    = cbfn;
2320        mod->mbhdlr[mc].cbarg   = cbarg;
2321}
2322
2323/*
2324 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2325 * Responsibility of caller to serialize
2326 *
2327 * @param[in]   ioc     IOC instance
2328 * @param[i]    cmd     Mailbox command
2329 */
2330void
2331bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2332{
2333        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2334        u32                     stat;
2335
2336        /*
2337         * If a previous command is pending, queue new command
2338         */
2339        if (!list_empty(&mod->cmd_q)) {
2340                list_add_tail(&cmd->qe, &mod->cmd_q);
2341                return;
2342        }
2343
2344        /*
2345         * If mailbox is busy, queue command for poll timer
2346         */
2347        stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2348        if (stat) {
2349                list_add_tail(&cmd->qe, &mod->cmd_q);
2350                return;
2351        }
2352
2353        /*
2354         * mailbox is free -- queue command to firmware
2355         */
2356        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2357}
2358
2359/*
2360 * Handle mailbox interrupts
2361 */
2362void
2363bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2364{
2365        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2366        struct bfi_mbmsg_s              m;
2367        int                             mc;
2368
2369        if (bfa_ioc_msgget(ioc, &m)) {
2370                /*
2371                 * Treat IOC message class as special.
2372                 */
2373                mc = m.mh.msg_class;
2374                if (mc == BFI_MC_IOC) {
2375                        bfa_ioc_isr(ioc, &m);
2376                        return;
2377                }
2378
2379                if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2380                        return;
2381
2382                mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2383        }
2384
2385        bfa_ioc_lpu_read_stat(ioc);
2386
2387        /*
2388         * Try to send pending mailbox commands
2389         */
2390        bfa_ioc_mbox_poll(ioc);
2391}
2392
2393void
2394bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2395{
2396        bfa_ioc_stats(ioc, ioc_hbfails);
2397        ioc->stats.hb_count = ioc->hb_count;
2398        bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2399}
2400
2401/*
2402 * return true if IOC is disabled
2403 */
2404bfa_boolean_t
2405bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2406{
2407        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2408                bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2409}
2410
2411/*
2412 * return true if IOC firmware is different.
2413 */
2414bfa_boolean_t
2415bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2416{
2417        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2418                bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2419                bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2420}
2421
2422#define bfa_ioc_state_disabled(__sm)            \
2423        (((__sm) == BFI_IOC_UNINIT) ||          \
2424         ((__sm) == BFI_IOC_INITING) ||         \
2425         ((__sm) == BFI_IOC_HWINIT) ||          \
2426         ((__sm) == BFI_IOC_DISABLED) ||        \
2427         ((__sm) == BFI_IOC_FAIL) ||            \
2428         ((__sm) == BFI_IOC_CFG_DISABLED))
2429
2430/*
2431 * Check if adapter is disabled -- both IOCs should be in a disabled
2432 * state.
2433 */
2434bfa_boolean_t
2435bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2436{
2437        u32     ioc_state;
2438
2439        if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2440                return BFA_FALSE;
2441
2442        ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2443        if (!bfa_ioc_state_disabled(ioc_state))
2444                return BFA_FALSE;
2445
2446        if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2447                ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2448                if (!bfa_ioc_state_disabled(ioc_state))
2449                        return BFA_FALSE;
2450        }
2451
2452        return BFA_TRUE;
2453}
2454
2455/*
2456 * Reset IOC fwstate registers.
2457 */
2458void
2459bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2460{
2461        bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2462        bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2463}
2464
2465#define BFA_MFG_NAME "Brocade"
2466void
2467bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2468                         struct bfa_adapter_attr_s *ad_attr)
2469{
2470        struct bfi_ioc_attr_s   *ioc_attr;
2471
2472        ioc_attr = ioc->attr;
2473
2474        bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2475        bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2476        bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2477        bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2478        memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2479                      sizeof(struct bfa_mfg_vpd_s));
2480
2481        ad_attr->nports = bfa_ioc_get_nports(ioc);
2482        ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2483
2484        bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2485        /* For now, model descr uses same model string */
2486        bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2487
2488        ad_attr->card_type = ioc_attr->card_type;
2489        ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2490
2491        if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2492                ad_attr->prototype = 1;
2493        else
2494                ad_attr->prototype = 0;
2495
2496        ad_attr->pwwn = ioc->attr->pwwn;
2497        ad_attr->mac  = bfa_ioc_get_mac(ioc);
2498
2499        ad_attr->pcie_gen = ioc_attr->pcie_gen;
2500        ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2501        ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2502        ad_attr->asic_rev = ioc_attr->asic_rev;
2503
2504        bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2505
2506        ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2507        ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2508                                  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2509        ad_attr->mfg_day = ioc_attr->mfg_day;
2510        ad_attr->mfg_month = ioc_attr->mfg_month;
2511        ad_attr->mfg_year = ioc_attr->mfg_year;
2512        memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
2513}
2514
2515enum bfa_ioc_type_e
2516bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2517{
2518        if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2519                return BFA_IOC_TYPE_LL;
2520
2521        WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2522
2523        return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2524                ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2525}
2526
2527void
2528bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2529{
2530        memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2531        memcpy((void *)serial_num,
2532                        (void *)ioc->attr->brcd_serialnum,
2533                        BFA_ADAPTER_SERIAL_NUM_LEN);
2534}
2535
2536void
2537bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2538{
2539        memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2540        memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2541}
2542
2543void
2544bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2545{
2546        WARN_ON(!chip_rev);
2547
2548        memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2549
2550        chip_rev[0] = 'R';
2551        chip_rev[1] = 'e';
2552        chip_rev[2] = 'v';
2553        chip_rev[3] = '-';
2554        chip_rev[4] = ioc->attr->asic_rev;
2555        chip_rev[5] = '\0';
2556}
2557
2558void
2559bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2560{
2561        memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2562        memcpy(optrom_ver, ioc->attr->optrom_version,
2563                      BFA_VERSION_LEN);
2564}
2565
2566void
2567bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2568{
2569        memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2570        memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2571}
2572
2573void
2574bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2575{
2576        struct bfi_ioc_attr_s   *ioc_attr;
2577        u8 nports = bfa_ioc_get_nports(ioc);
2578
2579        WARN_ON(!model);
2580        memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2581
2582        ioc_attr = ioc->attr;
2583
2584        if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
2585                (!bfa_mfg_is_mezz(ioc_attr->card_type)))
2586                snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
2587                        BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
2588        else
2589                snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2590                        BFA_MFG_NAME, ioc_attr->card_type);
2591}
2592
2593enum bfa_ioc_state
2594bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2595{
2596        enum bfa_iocpf_state iocpf_st;
2597        enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2598
2599        if (ioc_st == BFA_IOC_ENABLING ||
2600                ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2601
2602                iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2603
2604                switch (iocpf_st) {
2605                case BFA_IOCPF_SEMWAIT:
2606                        ioc_st = BFA_IOC_SEMWAIT;
2607                        break;
2608
2609                case BFA_IOCPF_HWINIT:
2610                        ioc_st = BFA_IOC_HWINIT;
2611                        break;
2612
2613                case BFA_IOCPF_FWMISMATCH:
2614                        ioc_st = BFA_IOC_FWMISMATCH;
2615                        break;
2616
2617                case BFA_IOCPF_FAIL:
2618                        ioc_st = BFA_IOC_FAIL;
2619                        break;
2620
2621                case BFA_IOCPF_INITFAIL:
2622                        ioc_st = BFA_IOC_INITFAIL;
2623                        break;
2624
2625                default:
2626                        break;
2627                }
2628        }
2629
2630        return ioc_st;
2631}
2632
2633void
2634bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2635{
2636        memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2637
2638        ioc_attr->state = bfa_ioc_get_state(ioc);
2639        ioc_attr->port_id = bfa_ioc_portid(ioc);
2640        ioc_attr->port_mode = ioc->port_mode;
2641        ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2642        ioc_attr->cap_bm = ioc->ad_cap_bm;
2643
2644        ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2645
2646        bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2647
2648        ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2649        ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2650        ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
2651        bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2652}
2653
2654mac_t
2655bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2656{
2657        /*
2658         * Check the IOC type and return the appropriate MAC
2659         */
2660        if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2661                return ioc->attr->fcoe_mac;
2662        else
2663                return ioc->attr->mac;
2664}
2665
2666mac_t
2667bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2668{
2669        mac_t   m;
2670
2671        m = ioc->attr->mfg_mac;
2672        if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2673                m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2674        else
2675                bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2676                        bfa_ioc_pcifn(ioc));
2677
2678        return m;
2679}
2680
2681/*
2682 * Send AEN notification
2683 */
2684void
2685bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2686{
2687        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2688        struct bfa_aen_entry_s  *aen_entry;
2689        enum bfa_ioc_type_e ioc_type;
2690
2691        bfad_get_aen_entry(bfad, aen_entry);
2692        if (!aen_entry)
2693                return;
2694
2695        ioc_type = bfa_ioc_get_type(ioc);
2696        switch (ioc_type) {
2697        case BFA_IOC_TYPE_FC:
2698                aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2699                break;
2700        case BFA_IOC_TYPE_FCoE:
2701                aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2702                aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2703                break;
2704        case BFA_IOC_TYPE_LL:
2705                aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2706                break;
2707        default:
2708                WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2709                break;
2710        }
2711
2712        /* Send the AEN notification */
2713        aen_entry->aen_data.ioc.ioc_type = ioc_type;
2714        bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2715                                  BFA_AEN_CAT_IOC, event);
2716}
2717
2718/*
2719 * Retrieve saved firmware trace from a prior IOC failure.
2720 */
2721bfa_status_t
2722bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2723{
2724        int     tlen;
2725
2726        if (ioc->dbg_fwsave_len == 0)
2727                return BFA_STATUS_ENOFSAVE;
2728
2729        tlen = *trclen;
2730        if (tlen > ioc->dbg_fwsave_len)
2731                tlen = ioc->dbg_fwsave_len;
2732
2733        memcpy(trcdata, ioc->dbg_fwsave, tlen);
2734        *trclen = tlen;
2735        return BFA_STATUS_OK;
2736}
2737
2738
2739/*
2740 * Retrieve saved firmware trace from a prior IOC failure.
2741 */
2742bfa_status_t
2743bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2744{
2745        u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2746        int tlen;
2747        bfa_status_t status;
2748
2749        bfa_trc(ioc, *trclen);
2750
2751        tlen = *trclen;
2752        if (tlen > BFA_DBG_FWTRC_LEN)
2753                tlen = BFA_DBG_FWTRC_LEN;
2754
2755        status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2756        *trclen = tlen;
2757        return status;
2758}
2759
2760static void
2761bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2762{
2763        struct bfa_mbox_cmd_s cmd;
2764        struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2765
2766        bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2767                    bfa_ioc_portid(ioc));
2768        req->clscode = cpu_to_be16(ioc->clscode);
2769        bfa_ioc_mbox_queue(ioc, &cmd);
2770}
2771
2772static void
2773bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2774{
2775        u32 fwsync_iter = 1000;
2776
2777        bfa_ioc_send_fwsync(ioc);
2778
2779        /*
2780         * After sending a fw sync mbox command wait for it to
2781         * take effect.  We will not wait for a response because
2782         *    1. fw_sync mbox cmd doesn't have a response.
2783         *    2. Even if we implement that,  interrupts might not
2784         *       be enabled when we call this function.
2785         * So, just keep checking if any mbox cmd is pending, and
2786         * after waiting for a reasonable amount of time, go ahead.
2787         * It is possible that fw has crashed and the mbox command
2788         * is never acknowledged.
2789         */
2790        while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2791                fwsync_iter--;
2792}
2793
2794/*
2795 * Dump firmware smem
2796 */
2797bfa_status_t
2798bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2799                                u32 *offset, int *buflen)
2800{
2801        u32 loff;
2802        int dlen;
2803        bfa_status_t status;
2804        u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2805
2806        if (*offset >= smem_len) {
2807                *offset = *buflen = 0;
2808                return BFA_STATUS_EINVAL;
2809        }
2810
2811        loff = *offset;
2812        dlen = *buflen;
2813
2814        /*
2815         * First smem read, sync smem before proceeding
2816         * No need to sync before reading every chunk.
2817         */
2818        if (loff == 0)
2819                bfa_ioc_fwsync(ioc);
2820
2821        if ((loff + dlen) >= smem_len)
2822                dlen = smem_len - loff;
2823
2824        status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2825
2826        if (status != BFA_STATUS_OK) {
2827                *offset = *buflen = 0;
2828                return status;
2829        }
2830
2831        *offset += dlen;
2832
2833        if (*offset >= smem_len)
2834                *offset = 0;
2835
2836        *buflen = dlen;
2837
2838        return status;
2839}
2840
2841/*
2842 * Firmware statistics
2843 */
2844bfa_status_t
2845bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2846{
2847        u32 loff = BFI_IOC_FWSTATS_OFF + \
2848                BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2849        int tlen;
2850        bfa_status_t status;
2851
2852        if (ioc->stats_busy) {
2853                bfa_trc(ioc, ioc->stats_busy);
2854                return BFA_STATUS_DEVBUSY;
2855        }
2856        ioc->stats_busy = BFA_TRUE;
2857
2858        tlen = sizeof(struct bfa_fw_stats_s);
2859        status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2860
2861        ioc->stats_busy = BFA_FALSE;
2862        return status;
2863}
2864
2865bfa_status_t
2866bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2867{
2868        u32 loff = BFI_IOC_FWSTATS_OFF + \
2869                BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2870        int tlen;
2871        bfa_status_t status;
2872
2873        if (ioc->stats_busy) {
2874                bfa_trc(ioc, ioc->stats_busy);
2875                return BFA_STATUS_DEVBUSY;
2876        }
2877        ioc->stats_busy = BFA_TRUE;
2878
2879        tlen = sizeof(struct bfa_fw_stats_s);
2880        status = bfa_ioc_smem_clr(ioc, loff, tlen);
2881
2882        ioc->stats_busy = BFA_FALSE;
2883        return status;
2884}
2885
2886/*
2887 * Save firmware trace if configured.
2888 */
2889void
2890bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
2891{
2892        int             tlen;
2893
2894        if (ioc->dbg_fwsave_once) {
2895                ioc->dbg_fwsave_once = BFA_FALSE;
2896                if (ioc->dbg_fwsave_len) {
2897                        tlen = ioc->dbg_fwsave_len;
2898                        bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2899                }
2900        }
2901}
2902
2903/*
2904 * Firmware failure detected. Start recovery actions.
2905 */
2906static void
2907bfa_ioc_recover(struct bfa_ioc_s *ioc)
2908{
2909        bfa_ioc_stats(ioc, ioc_hbfails);
2910        ioc->stats.hb_count = ioc->hb_count;
2911        bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2912}
2913
2914/*
2915 *  BFA IOC PF private functions
2916 */
2917static void
2918bfa_iocpf_timeout(void *ioc_arg)
2919{
2920        struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2921
2922        bfa_trc(ioc, 0);
2923        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2924}
2925
2926static void
2927bfa_iocpf_sem_timeout(void *ioc_arg)
2928{
2929        struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
2930
2931        bfa_ioc_hw_sem_get(ioc);
2932}
2933
2934static void
2935bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2936{
2937        u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
2938
2939        bfa_trc(ioc, fwstate);
2940
2941        if (fwstate == BFI_IOC_DISABLED) {
2942                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2943                return;
2944        }
2945
2946        if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
2947                bfa_iocpf_timeout(ioc);
2948        else {
2949                ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2950                bfa_iocpf_poll_timer_start(ioc);
2951        }
2952}
2953
2954static void
2955bfa_iocpf_poll_timeout(void *ioc_arg)
2956{
2957        struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2958
2959        bfa_ioc_poll_fwinit(ioc);
2960}
2961
2962/*
2963 *  bfa timer function
2964 */
2965void
2966bfa_timer_beat(struct bfa_timer_mod_s *mod)
2967{
2968        struct list_head *qh = &mod->timer_q;
2969        struct list_head *qe, *qe_next;
2970        struct bfa_timer_s *elem;
2971        struct list_head timedout_q;
2972
2973        INIT_LIST_HEAD(&timedout_q);
2974
2975        qe = bfa_q_next(qh);
2976
2977        while (qe != qh) {
2978                qe_next = bfa_q_next(qe);
2979
2980                elem = (struct bfa_timer_s *) qe;
2981                if (elem->timeout <= BFA_TIMER_FREQ) {
2982                        elem->timeout = 0;
2983                        list_del(&elem->qe);
2984                        list_add_tail(&elem->qe, &timedout_q);
2985                } else {
2986                        elem->timeout -= BFA_TIMER_FREQ;
2987                }
2988
2989                qe = qe_next;   /* go to next elem */
2990        }
2991
2992        /*
2993         * Pop all the timeout entries
2994         */
2995        while (!list_empty(&timedout_q)) {
2996                bfa_q_deq(&timedout_q, &elem);
2997                elem->timercb(elem->arg);
2998        }
2999}
3000
3001/*
3002 * Should be called with lock protection
3003 */
3004void
3005bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3006                    void (*timercb) (void *), void *arg, unsigned int timeout)
3007{
3008
3009        WARN_ON(timercb == NULL);
3010        WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3011
3012        timer->timeout = timeout;
3013        timer->timercb = timercb;
3014        timer->arg = arg;
3015
3016        list_add_tail(&timer->qe, &mod->timer_q);
3017}
3018
3019/*
3020 * Should be called with lock protection
3021 */
3022void
3023bfa_timer_stop(struct bfa_timer_s *timer)
3024{
3025        WARN_ON(list_empty(&timer->qe));
3026
3027        list_del(&timer->qe);
3028}
3029
3030/*
3031 *      ASIC block related
3032 */
3033static void
3034bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3035{
3036        struct bfa_ablk_cfg_inst_s *cfg_inst;
3037        int i, j;
3038        u16     be16;
3039
3040        for (i = 0; i < BFA_ABLK_MAX; i++) {
3041                cfg_inst = &cfg->inst[i];
3042                for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3043                        be16 = cfg_inst->pf_cfg[j].pers;
3044                        cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3045                        be16 = cfg_inst->pf_cfg[j].num_qpairs;
3046                        cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3047                        be16 = cfg_inst->pf_cfg[j].num_vectors;
3048                        cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3049                        be16 = cfg_inst->pf_cfg[j].bw_min;
3050                        cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3051                        be16 = cfg_inst->pf_cfg[j].bw_max;
3052                        cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3053                }
3054        }
3055}
3056
3057static void
3058bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3059{
3060        struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3061        struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3062        bfa_ablk_cbfn_t cbfn;
3063
3064        WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3065        bfa_trc(ablk->ioc, msg->mh.msg_id);
3066
3067        switch (msg->mh.msg_id) {
3068        case BFI_ABLK_I2H_QUERY:
3069                if (rsp->status == BFA_STATUS_OK) {
3070                        memcpy(ablk->cfg, ablk->dma_addr.kva,
3071                                sizeof(struct bfa_ablk_cfg_s));
3072                        bfa_ablk_config_swap(ablk->cfg);
3073                        ablk->cfg = NULL;
3074                }
3075                break;
3076
3077        case BFI_ABLK_I2H_ADPT_CONFIG:
3078        case BFI_ABLK_I2H_PORT_CONFIG:
3079                /* update config port mode */
3080                ablk->ioc->port_mode_cfg = rsp->port_mode;
3081
3082        case BFI_ABLK_I2H_PF_DELETE:
3083        case BFI_ABLK_I2H_PF_UPDATE:
3084        case BFI_ABLK_I2H_OPTROM_ENABLE:
3085        case BFI_ABLK_I2H_OPTROM_DISABLE:
3086                /* No-op */
3087                break;
3088
3089        case BFI_ABLK_I2H_PF_CREATE:
3090                *(ablk->pcifn) = rsp->pcifn;
3091                ablk->pcifn = NULL;
3092                break;
3093
3094        default:
3095                WARN_ON(1);
3096        }
3097
3098        ablk->busy = BFA_FALSE;
3099        if (ablk->cbfn) {
3100                cbfn = ablk->cbfn;
3101                ablk->cbfn = NULL;
3102                cbfn(ablk->cbarg, rsp->status);
3103        }
3104}
3105
3106static void
3107bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3108{
3109        struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3110
3111        bfa_trc(ablk->ioc, event);
3112
3113        switch (event) {
3114        case BFA_IOC_E_ENABLED:
3115                WARN_ON(ablk->busy != BFA_FALSE);
3116                break;
3117
3118        case BFA_IOC_E_DISABLED:
3119        case BFA_IOC_E_FAILED:
3120                /* Fail any pending requests */
3121                ablk->pcifn = NULL;
3122                if (ablk->busy) {
3123                        if (ablk->cbfn)
3124                                ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3125                        ablk->cbfn = NULL;
3126                        ablk->busy = BFA_FALSE;
3127                }
3128                break;
3129
3130        default:
3131                WARN_ON(1);
3132                break;
3133        }
3134}
3135
3136u32
3137bfa_ablk_meminfo(void)
3138{
3139        return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3140}
3141
3142void
3143bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3144{
3145        ablk->dma_addr.kva = dma_kva;
3146        ablk->dma_addr.pa  = dma_pa;
3147}
3148
3149void
3150bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3151{
3152        ablk->ioc = ioc;
3153
3154        bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3155        bfa_q_qe_init(&ablk->ioc_notify);
3156        bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3157        list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3158}
3159
3160bfa_status_t
3161bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3162                bfa_ablk_cbfn_t cbfn, void *cbarg)
3163{
3164        struct bfi_ablk_h2i_query_s *m;
3165
3166        WARN_ON(!ablk_cfg);
3167
3168        if (!bfa_ioc_is_operational(ablk->ioc)) {
3169                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3170                return BFA_STATUS_IOC_FAILURE;
3171        }
3172
3173        if (ablk->busy) {
3174                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3175                return  BFA_STATUS_DEVBUSY;
3176        }
3177
3178        ablk->cfg = ablk_cfg;
3179        ablk->cbfn  = cbfn;
3180        ablk->cbarg = cbarg;
3181        ablk->busy  = BFA_TRUE;
3182
3183        m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3184        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3185                    bfa_ioc_portid(ablk->ioc));
3186        bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3187        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3188
3189        return BFA_STATUS_OK;
3190}
3191
3192bfa_status_t
3193bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3194                u8 port, enum bfi_pcifn_class personality,
3195                u16 bw_min, u16 bw_max,
3196                bfa_ablk_cbfn_t cbfn, void *cbarg)
3197{
3198        struct bfi_ablk_h2i_pf_req_s *m;
3199
3200        if (!bfa_ioc_is_operational(ablk->ioc)) {
3201                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3202                return BFA_STATUS_IOC_FAILURE;
3203        }
3204
3205        if (ablk->busy) {
3206                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3207                return  BFA_STATUS_DEVBUSY;
3208        }
3209
3210        ablk->pcifn = pcifn;
3211        ablk->cbfn = cbfn;
3212        ablk->cbarg = cbarg;
3213        ablk->busy  = BFA_TRUE;
3214
3215        m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3216        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3217                    bfa_ioc_portid(ablk->ioc));
3218        m->pers = cpu_to_be16((u16)personality);
3219        m->bw_min = cpu_to_be16(bw_min);
3220        m->bw_max = cpu_to_be16(bw_max);
3221        m->port = port;
3222        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3223
3224        return BFA_STATUS_OK;
3225}
3226
3227bfa_status_t
3228bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3229                bfa_ablk_cbfn_t cbfn, void *cbarg)
3230{
3231        struct bfi_ablk_h2i_pf_req_s *m;
3232
3233        if (!bfa_ioc_is_operational(ablk->ioc)) {
3234                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3235                return BFA_STATUS_IOC_FAILURE;
3236        }
3237
3238        if (ablk->busy) {
3239                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3240                return  BFA_STATUS_DEVBUSY;
3241        }
3242
3243        ablk->cbfn  = cbfn;
3244        ablk->cbarg = cbarg;
3245        ablk->busy  = BFA_TRUE;
3246
3247        m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3248        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3249                    bfa_ioc_portid(ablk->ioc));
3250        m->pcifn = (u8)pcifn;
3251        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3252
3253        return BFA_STATUS_OK;
3254}
3255
3256bfa_status_t
3257bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3258                int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3259{
3260        struct bfi_ablk_h2i_cfg_req_s *m;
3261
3262        if (!bfa_ioc_is_operational(ablk->ioc)) {
3263                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3264                return BFA_STATUS_IOC_FAILURE;
3265        }
3266
3267        if (ablk->busy) {
3268                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3269                return  BFA_STATUS_DEVBUSY;
3270        }
3271
3272        ablk->cbfn  = cbfn;
3273        ablk->cbarg = cbarg;
3274        ablk->busy  = BFA_TRUE;
3275
3276        m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3277        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3278                    bfa_ioc_portid(ablk->ioc));
3279        m->mode = (u8)mode;
3280        m->max_pf = (u8)max_pf;
3281        m->max_vf = (u8)max_vf;
3282        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3283
3284        return BFA_STATUS_OK;
3285}
3286
3287bfa_status_t
3288bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3289                int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3290{
3291        struct bfi_ablk_h2i_cfg_req_s *m;
3292
3293        if (!bfa_ioc_is_operational(ablk->ioc)) {
3294                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3295                return BFA_STATUS_IOC_FAILURE;
3296        }
3297
3298        if (ablk->busy) {
3299                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3300                return  BFA_STATUS_DEVBUSY;
3301        }
3302
3303        ablk->cbfn  = cbfn;
3304        ablk->cbarg = cbarg;
3305        ablk->busy  = BFA_TRUE;
3306
3307        m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3308        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3309                bfa_ioc_portid(ablk->ioc));
3310        m->port = (u8)port;
3311        m->mode = (u8)mode;
3312        m->max_pf = (u8)max_pf;
3313        m->max_vf = (u8)max_vf;
3314        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3315
3316        return BFA_STATUS_OK;
3317}
3318
3319bfa_status_t
3320bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3321                   u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3322{
3323        struct bfi_ablk_h2i_pf_req_s *m;
3324
3325        if (!bfa_ioc_is_operational(ablk->ioc)) {
3326                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3327                return BFA_STATUS_IOC_FAILURE;
3328        }
3329
3330        if (ablk->busy) {
3331                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3332                return  BFA_STATUS_DEVBUSY;
3333        }
3334
3335        ablk->cbfn  = cbfn;
3336        ablk->cbarg = cbarg;
3337        ablk->busy  = BFA_TRUE;
3338
3339        m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3340        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3341                bfa_ioc_portid(ablk->ioc));
3342        m->pcifn = (u8)pcifn;
3343        m->bw_min = cpu_to_be16(bw_min);
3344        m->bw_max = cpu_to_be16(bw_max);
3345        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3346
3347        return BFA_STATUS_OK;
3348}
3349
3350bfa_status_t
3351bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3352{
3353        struct bfi_ablk_h2i_optrom_s *m;
3354
3355        if (!bfa_ioc_is_operational(ablk->ioc)) {
3356                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3357                return BFA_STATUS_IOC_FAILURE;
3358        }
3359
3360        if (ablk->busy) {
3361                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3362                return  BFA_STATUS_DEVBUSY;
3363        }
3364
3365        ablk->cbfn  = cbfn;
3366        ablk->cbarg = cbarg;
3367        ablk->busy  = BFA_TRUE;
3368
3369        m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3370        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3371                bfa_ioc_portid(ablk->ioc));
3372        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3373
3374        return BFA_STATUS_OK;
3375}
3376
3377bfa_status_t
3378bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3379{
3380        struct bfi_ablk_h2i_optrom_s *m;
3381
3382        if (!bfa_ioc_is_operational(ablk->ioc)) {
3383                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3384                return BFA_STATUS_IOC_FAILURE;
3385        }
3386
3387        if (ablk->busy) {
3388                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3389                return  BFA_STATUS_DEVBUSY;
3390        }
3391
3392        ablk->cbfn  = cbfn;
3393        ablk->cbarg = cbarg;
3394        ablk->busy  = BFA_TRUE;
3395
3396        m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3397        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3398                bfa_ioc_portid(ablk->ioc));
3399        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3400
3401        return BFA_STATUS_OK;
3402}
3403
3404/*
3405 *      SFP module specific
3406 */
3407
3408/* forward declarations */
3409static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3410static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3411static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3412                                enum bfa_port_speed portspeed);
3413
3414static void
3415bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3416{
3417        bfa_trc(sfp, sfp->lock);
3418        if (sfp->cbfn)
3419                sfp->cbfn(sfp->cbarg, sfp->status);
3420        sfp->lock = 0;
3421        sfp->cbfn = NULL;
3422}
3423
3424static void
3425bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3426{
3427        bfa_trc(sfp, sfp->portspeed);
3428        if (sfp->media) {
3429                bfa_sfp_media_get(sfp);
3430                if (sfp->state_query_cbfn)
3431                        sfp->state_query_cbfn(sfp->state_query_cbarg,
3432                                        sfp->status);
3433                        sfp->media = NULL;
3434                }
3435
3436                if (sfp->portspeed) {
3437                        sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3438                        if (sfp->state_query_cbfn)
3439                                sfp->state_query_cbfn(sfp->state_query_cbarg,
3440                                                sfp->status);
3441                                sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3442                }
3443
3444                sfp->state_query_lock = 0;
3445                sfp->state_query_cbfn = NULL;
3446}
3447
3448/*
3449 *      IOC event handler.
3450 */
3451static void
3452bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3453{
3454        struct bfa_sfp_s *sfp = sfp_arg;
3455
3456        bfa_trc(sfp, event);
3457        bfa_trc(sfp, sfp->lock);
3458        bfa_trc(sfp, sfp->state_query_lock);
3459
3460        switch (event) {
3461        case BFA_IOC_E_DISABLED:
3462        case BFA_IOC_E_FAILED:
3463                if (sfp->lock) {
3464                        sfp->status = BFA_STATUS_IOC_FAILURE;
3465                        bfa_cb_sfp_show(sfp);
3466                }
3467
3468                if (sfp->state_query_lock) {
3469                        sfp->status = BFA_STATUS_IOC_FAILURE;
3470                        bfa_cb_sfp_state_query(sfp);
3471                }
3472                break;
3473
3474        default:
3475                break;
3476        }
3477}
3478
3479/*
3480 * SFP's State Change Notification post to AEN
3481 */
3482static void
3483bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3484{
3485        struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3486        struct bfa_aen_entry_s  *aen_entry;
3487        enum bfa_port_aen_event aen_evt = 0;
3488
3489        bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3490                      ((u64)rsp->event));
3491
3492        bfad_get_aen_entry(bfad, aen_entry);
3493        if (!aen_entry)
3494                return;
3495
3496        aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3497        aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3498        aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3499
3500        switch (rsp->event) {
3501        case BFA_SFP_SCN_INSERTED:
3502                aen_evt = BFA_PORT_AEN_SFP_INSERT;
3503                break;
3504        case BFA_SFP_SCN_REMOVED:
3505                aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3506                break;
3507        case BFA_SFP_SCN_FAILED:
3508                aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3509                break;
3510        case BFA_SFP_SCN_UNSUPPORT:
3511                aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3512                break;
3513        case BFA_SFP_SCN_POM:
3514                aen_evt = BFA_PORT_AEN_SFP_POM;
3515                aen_entry->aen_data.port.level = rsp->pomlvl;
3516                break;
3517        default:
3518                bfa_trc(sfp, rsp->event);
3519                WARN_ON(1);
3520        }
3521
3522        /* Send the AEN notification */
3523        bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3524                                  BFA_AEN_CAT_PORT, aen_evt);
3525}
3526
3527/*
3528 *      SFP get data send
3529 */
3530static void
3531bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3532{
3533        struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3534
3535        bfa_trc(sfp, req->memtype);
3536
3537        /* build host command */
3538        bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3539                        bfa_ioc_portid(sfp->ioc));
3540
3541        /* send mbox cmd */
3542        bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3543}
3544
3545/*
3546 *      SFP is valid, read sfp data
3547 */
3548static void
3549bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3550{
3551        struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3552
3553        WARN_ON(sfp->lock != 0);
3554        bfa_trc(sfp, sfp->state);
3555
3556        sfp->lock = 1;
3557        sfp->memtype = memtype;
3558        req->memtype = memtype;
3559
3560        /* Setup SG list */
3561        bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3562
3563        bfa_sfp_getdata_send(sfp);
3564}
3565
3566/*
3567 *      SFP scn handler
3568 */
3569static void
3570bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3571{
3572        struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3573
3574        switch (rsp->event) {
3575        case BFA_SFP_SCN_INSERTED:
3576                sfp->state = BFA_SFP_STATE_INSERTED;
3577                sfp->data_valid = 0;
3578                bfa_sfp_scn_aen_post(sfp, rsp);
3579                break;
3580        case BFA_SFP_SCN_REMOVED:
3581                sfp->state = BFA_SFP_STATE_REMOVED;
3582                sfp->data_valid = 0;
3583                bfa_sfp_scn_aen_post(sfp, rsp);
3584                 break;
3585        case BFA_SFP_SCN_FAILED:
3586                sfp->state = BFA_SFP_STATE_FAILED;
3587                sfp->data_valid = 0;
3588                bfa_sfp_scn_aen_post(sfp, rsp);
3589                break;
3590        case BFA_SFP_SCN_UNSUPPORT:
3591                sfp->state = BFA_SFP_STATE_UNSUPPORT;
3592                bfa_sfp_scn_aen_post(sfp, rsp);
3593                if (!sfp->lock)
3594                        bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3595                break;
3596        case BFA_SFP_SCN_POM:
3597                bfa_sfp_scn_aen_post(sfp, rsp);
3598                break;
3599        case BFA_SFP_SCN_VALID:
3600                sfp->state = BFA_SFP_STATE_VALID;
3601                if (!sfp->lock)
3602                        bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3603                break;
3604        default:
3605                bfa_trc(sfp, rsp->event);
3606                WARN_ON(1);
3607        }
3608}
3609
3610/*
3611 * SFP show complete
3612 */
3613static void
3614bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3615{
3616        struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3617
3618        if (!sfp->lock) {
3619                /*
3620                 * receiving response after ioc failure
3621                 */
3622                bfa_trc(sfp, sfp->lock);
3623                return;
3624        }
3625
3626        bfa_trc(sfp, rsp->status);
3627        if (rsp->status == BFA_STATUS_OK) {
3628                sfp->data_valid = 1;
3629                if (sfp->state == BFA_SFP_STATE_VALID)
3630                        sfp->status = BFA_STATUS_OK;
3631                else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3632                        sfp->status = BFA_STATUS_SFP_UNSUPP;
3633                else
3634                        bfa_trc(sfp, sfp->state);
3635        } else {
3636                sfp->data_valid = 0;
3637                sfp->status = rsp->status;
3638                /* sfpshow shouldn't change sfp state */
3639        }
3640
3641        bfa_trc(sfp, sfp->memtype);
3642        if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3643                bfa_trc(sfp, sfp->data_valid);
3644                if (sfp->data_valid) {
3645                        u32     size = sizeof(struct sfp_mem_s);
3646                        u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3647                        memcpy(des, sfp->dbuf_kva, size);
3648                }
3649                /*
3650                 * Queue completion callback.
3651                 */
3652                bfa_cb_sfp_show(sfp);
3653        } else
3654                sfp->lock = 0;
3655
3656        bfa_trc(sfp, sfp->state_query_lock);
3657        if (sfp->state_query_lock) {
3658                sfp->state = rsp->state;
3659                /* Complete callback */
3660                bfa_cb_sfp_state_query(sfp);
3661        }
3662}
3663
3664/*
3665 *      SFP query fw sfp state
3666 */
3667static void
3668bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3669{
3670        struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3671
3672        /* Should not be doing query if not in _INIT state */
3673        WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3674        WARN_ON(sfp->state_query_lock != 0);
3675        bfa_trc(sfp, sfp->state);
3676
3677        sfp->state_query_lock = 1;
3678        req->memtype = 0;
3679
3680        if (!sfp->lock)
3681                bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3682}
3683
3684static void
3685bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3686{
3687        enum bfa_defs_sfp_media_e *media = sfp->media;
3688
3689        *media = BFA_SFP_MEDIA_UNKNOWN;
3690
3691        if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3692                *media = BFA_SFP_MEDIA_UNSUPPORT;
3693        else if (sfp->state == BFA_SFP_STATE_VALID) {
3694                union sfp_xcvr_e10g_code_u e10g;
3695                struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3696                u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3697                                (sfpmem->srlid_base.xcvr[5] >> 1);
3698
3699                e10g.b = sfpmem->srlid_base.xcvr[0];
3700                bfa_trc(sfp, e10g.b);
3701                bfa_trc(sfp, xmtr_tech);
3702                /* check fc transmitter tech */
3703                if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3704                    (xmtr_tech & SFP_XMTR_TECH_CP) ||
3705                    (xmtr_tech & SFP_XMTR_TECH_CA))
3706                        *media = BFA_SFP_MEDIA_CU;
3707                else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3708                         (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3709                        *media = BFA_SFP_MEDIA_EL;
3710                else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3711                         (xmtr_tech & SFP_XMTR_TECH_LC))
3712                        *media = BFA_SFP_MEDIA_LW;
3713                else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3714                         (xmtr_tech & SFP_XMTR_TECH_SN) ||
3715                         (xmtr_tech & SFP_XMTR_TECH_SA))
3716                        *media = BFA_SFP_MEDIA_SW;
3717                /* Check 10G Ethernet Compilance code */
3718                else if (e10g.r.e10g_sr)
3719                        *media = BFA_SFP_MEDIA_SW;
3720                else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3721                        *media = BFA_SFP_MEDIA_LW;
3722                else if (e10g.r.e10g_unall)
3723                        *media = BFA_SFP_MEDIA_UNKNOWN;
3724                else
3725                        bfa_trc(sfp, 0);
3726        } else
3727                bfa_trc(sfp, sfp->state);
3728}
3729
3730static bfa_status_t
3731bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3732{
3733        struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3734        struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3735        union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3736        union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3737
3738        if (portspeed == BFA_PORT_SPEED_10GBPS) {
3739                if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3740                        return BFA_STATUS_OK;
3741                else {
3742                        bfa_trc(sfp, e10g.b);
3743                        return BFA_STATUS_UNSUPP_SPEED;
3744                }
3745        }
3746        if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3747            ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3748            ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3749            ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3750            ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3751                return BFA_STATUS_OK;
3752        else {
3753                bfa_trc(sfp, portspeed);
3754                bfa_trc(sfp, fc3.b);
3755                bfa_trc(sfp, e10g.b);
3756                return BFA_STATUS_UNSUPP_SPEED;
3757        }
3758}
3759
3760/*
3761 *      SFP hmbox handler
3762 */
3763void
3764bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3765{
3766        struct bfa_sfp_s *sfp = sfparg;
3767
3768        switch (msg->mh.msg_id) {
3769        case BFI_SFP_I2H_SHOW:
3770                bfa_sfp_show_comp(sfp, msg);
3771                break;
3772
3773        case BFI_SFP_I2H_SCN:
3774                bfa_sfp_scn(sfp, msg);
3775                break;
3776
3777        default:
3778                bfa_trc(sfp, msg->mh.msg_id);
3779                WARN_ON(1);
3780        }
3781}
3782
3783/*
3784 *      Return DMA memory needed by sfp module.
3785 */
3786u32
3787bfa_sfp_meminfo(void)
3788{
3789        return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3790}
3791
3792/*
3793 *      Attach virtual and physical memory for SFP.
3794 */
3795void
3796bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3797                struct bfa_trc_mod_s *trcmod)
3798{
3799        sfp->dev = dev;
3800        sfp->ioc = ioc;
3801        sfp->trcmod = trcmod;
3802
3803        sfp->cbfn = NULL;
3804        sfp->cbarg = NULL;
3805        sfp->sfpmem = NULL;
3806        sfp->lock = 0;
3807        sfp->data_valid = 0;
3808        sfp->state = BFA_SFP_STATE_INIT;
3809        sfp->state_query_lock = 0;
3810        sfp->state_query_cbfn = NULL;
3811        sfp->state_query_cbarg = NULL;
3812        sfp->media = NULL;
3813        sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3814        sfp->is_elb = BFA_FALSE;
3815
3816        bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3817        bfa_q_qe_init(&sfp->ioc_notify);
3818        bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3819        list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3820}
3821
3822/*
3823 *      Claim Memory for SFP
3824 */
3825void
3826bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3827{
3828        sfp->dbuf_kva   = dm_kva;
3829        sfp->dbuf_pa    = dm_pa;
3830        memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3831
3832        dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3833        dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3834}
3835
3836/*
3837 * Show SFP eeprom content
3838 *
3839 * @param[in] sfp   - bfa sfp module
3840 *
3841 * @param[out] sfpmem - sfp eeprom data
3842 *
3843 */
3844bfa_status_t
3845bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3846                bfa_cb_sfp_t cbfn, void *cbarg)
3847{
3848
3849        if (!bfa_ioc_is_operational(sfp->ioc)) {
3850                bfa_trc(sfp, 0);
3851                return BFA_STATUS_IOC_NON_OP;
3852        }
3853
3854        if (sfp->lock) {
3855                bfa_trc(sfp, 0);
3856                return BFA_STATUS_DEVBUSY;
3857        }
3858
3859        sfp->cbfn = cbfn;
3860        sfp->cbarg = cbarg;
3861        sfp->sfpmem = sfpmem;
3862
3863        bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3864        return BFA_STATUS_OK;
3865}
3866
3867/*
3868 * Return SFP Media type
3869 *
3870 * @param[in] sfp   - bfa sfp module
3871 *
3872 * @param[out] media - port speed from user
3873 *
3874 */
3875bfa_status_t
3876bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3877                bfa_cb_sfp_t cbfn, void *cbarg)
3878{
3879        if (!bfa_ioc_is_operational(sfp->ioc)) {
3880                bfa_trc(sfp, 0);
3881                return BFA_STATUS_IOC_NON_OP;
3882        }
3883
3884        sfp->media = media;
3885        if (sfp->state == BFA_SFP_STATE_INIT) {
3886                if (sfp->state_query_lock) {
3887                        bfa_trc(sfp, 0);
3888                        return BFA_STATUS_DEVBUSY;
3889                } else {
3890                        sfp->state_query_cbfn = cbfn;
3891                        sfp->state_query_cbarg = cbarg;
3892                        bfa_sfp_state_query(sfp);
3893                        return BFA_STATUS_SFP_NOT_READY;
3894                }
3895        }
3896
3897        bfa_sfp_media_get(sfp);
3898        return BFA_STATUS_OK;
3899}
3900
3901/*
3902 * Check if user set port speed is allowed by the SFP
3903 *
3904 * @param[in] sfp   - bfa sfp module
3905 * @param[in] portspeed - port speed from user
3906 *
3907 */
3908bfa_status_t
3909bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3910                bfa_cb_sfp_t cbfn, void *cbarg)
3911{
3912        WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3913
3914        if (!bfa_ioc_is_operational(sfp->ioc))
3915                return BFA_STATUS_IOC_NON_OP;
3916
3917        /* For Mezz card, all speed is allowed */
3918        if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3919                return BFA_STATUS_OK;
3920
3921        /* Check SFP state */
3922        sfp->portspeed = portspeed;
3923        if (sfp->state == BFA_SFP_STATE_INIT) {
3924                if (sfp->state_query_lock) {
3925                        bfa_trc(sfp, 0);
3926                        return BFA_STATUS_DEVBUSY;
3927                } else {
3928                        sfp->state_query_cbfn = cbfn;
3929                        sfp->state_query_cbarg = cbarg;
3930                        bfa_sfp_state_query(sfp);
3931                        return BFA_STATUS_SFP_NOT_READY;
3932                }
3933        }
3934
3935        if (sfp->state == BFA_SFP_STATE_REMOVED ||
3936            sfp->state == BFA_SFP_STATE_FAILED) {
3937                bfa_trc(sfp, sfp->state);
3938                return BFA_STATUS_NO_SFP_DEV;
3939        }
3940
3941        if (sfp->state == BFA_SFP_STATE_INSERTED) {
3942                bfa_trc(sfp, sfp->state);
3943                return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
3944        }
3945
3946        /* For eloopback, all speed is allowed */
3947        if (sfp->is_elb)
3948                return BFA_STATUS_OK;
3949
3950        return bfa_sfp_speed_valid(sfp, portspeed);
3951}
3952
3953/*
3954 *      Flash module specific
3955 */
3956
3957/*
3958 * FLASH DMA buffer should be big enough to hold both MFG block and
3959 * asic block(64k) at the same time and also should be 2k aligned to
3960 * avoid write segement to cross sector boundary.
3961 */
3962#define BFA_FLASH_SEG_SZ        2048
3963#define BFA_FLASH_DMA_BUF_SZ    \
3964        BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3965
3966static void
3967bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
3968                        int inst, int type)
3969{
3970        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
3971        struct bfa_aen_entry_s  *aen_entry;
3972
3973        bfad_get_aen_entry(bfad, aen_entry);
3974        if (!aen_entry)
3975                return;
3976
3977        aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
3978        aen_entry->aen_data.audit.partition_inst = inst;
3979        aen_entry->aen_data.audit.partition_type = type;
3980
3981        /* Send the AEN notification */
3982        bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
3983                                  BFA_AEN_CAT_AUDIT, event);
3984}
3985
3986static void
3987bfa_flash_cb(struct bfa_flash_s *flash)
3988{
3989        flash->op_busy = 0;
3990        if (flash->cbfn)
3991                flash->cbfn(flash->cbarg, flash->status);
3992}
3993
3994static void
3995bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
3996{
3997        struct bfa_flash_s      *flash = cbarg;
3998
3999        bfa_trc(flash, event);
4000        switch (event) {
4001        case BFA_IOC_E_DISABLED:
4002        case BFA_IOC_E_FAILED:
4003                if (flash->op_busy) {
4004                        flash->status = BFA_STATUS_IOC_FAILURE;
4005                        flash->cbfn(flash->cbarg, flash->status);
4006                        flash->op_busy = 0;
4007                }
4008                break;
4009
4010        default:
4011                break;
4012        }
4013}
4014
4015/*
4016 * Send flash attribute query request.
4017 *
4018 * @param[in] cbarg - callback argument
4019 */
4020static void
4021bfa_flash_query_send(void *cbarg)
4022{
4023        struct bfa_flash_s *flash = cbarg;
4024        struct bfi_flash_query_req_s *msg =
4025                        (struct bfi_flash_query_req_s *) flash->mb.msg;
4026
4027        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4028                bfa_ioc_portid(flash->ioc));
4029        bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4030                flash->dbuf_pa);
4031        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4032}
4033
4034/*
4035 * Send flash write request.
4036 *
4037 * @param[in] cbarg - callback argument
4038 */
4039static void
4040bfa_flash_write_send(struct bfa_flash_s *flash)
4041{
4042        struct bfi_flash_write_req_s *msg =
4043                        (struct bfi_flash_write_req_s *) flash->mb.msg;
4044        u32     len;
4045
4046        msg->type = be32_to_cpu(flash->type);
4047        msg->instance = flash->instance;
4048        msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4049        len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4050                flash->residue : BFA_FLASH_DMA_BUF_SZ;
4051        msg->length = be32_to_cpu(len);
4052
4053        /* indicate if it's the last msg of the whole write operation */
4054        msg->last = (len == flash->residue) ? 1 : 0;
4055
4056        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4057                        bfa_ioc_portid(flash->ioc));
4058        bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4059        memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4060        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4061
4062        flash->residue -= len;
4063        flash->offset += len;
4064}
4065
4066/*
4067 * Send flash read request.
4068 *
4069 * @param[in] cbarg - callback argument
4070 */
4071static void
4072bfa_flash_read_send(void *cbarg)
4073{
4074        struct bfa_flash_s *flash = cbarg;
4075        struct bfi_flash_read_req_s *msg =
4076                        (struct bfi_flash_read_req_s *) flash->mb.msg;
4077        u32     len;
4078
4079        msg->type = be32_to_cpu(flash->type);
4080        msg->instance = flash->instance;
4081        msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4082        len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4083                        flash->residue : BFA_FLASH_DMA_BUF_SZ;
4084        msg->length = be32_to_cpu(len);
4085        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4086                bfa_ioc_portid(flash->ioc));
4087        bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4088        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4089}
4090
4091/*
4092 * Send flash erase request.
4093 *
4094 * @param[in] cbarg - callback argument
4095 */
4096static void
4097bfa_flash_erase_send(void *cbarg)
4098{
4099        struct bfa_flash_s *flash = cbarg;
4100        struct bfi_flash_erase_req_s *msg =
4101                        (struct bfi_flash_erase_req_s *) flash->mb.msg;
4102
4103        msg->type = be32_to_cpu(flash->type);
4104        msg->instance = flash->instance;
4105        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4106                        bfa_ioc_portid(flash->ioc));
4107        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4108}
4109
4110/*
4111 * Process flash response messages upon receiving interrupts.
4112 *
4113 * @param[in] flasharg - flash structure
4114 * @param[in] msg - message structure
4115 */
4116static void
4117bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4118{
4119        struct bfa_flash_s *flash = flasharg;
4120        u32     status;
4121
4122        union {
4123                struct bfi_flash_query_rsp_s *query;
4124                struct bfi_flash_erase_rsp_s *erase;
4125                struct bfi_flash_write_rsp_s *write;
4126                struct bfi_flash_read_rsp_s *read;
4127                struct bfi_flash_event_s *event;
4128                struct bfi_mbmsg_s   *msg;
4129        } m;
4130
4131        m.msg = msg;
4132        bfa_trc(flash, msg->mh.msg_id);
4133
4134        if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4135                /* receiving response after ioc failure */
4136                bfa_trc(flash, 0x9999);
4137                return;
4138        }
4139
4140        switch (msg->mh.msg_id) {
4141        case BFI_FLASH_I2H_QUERY_RSP:
4142                status = be32_to_cpu(m.query->status);
4143                bfa_trc(flash, status);
4144                if (status == BFA_STATUS_OK) {
4145                        u32     i;
4146                        struct bfa_flash_attr_s *attr, *f;
4147
4148                        attr = (struct bfa_flash_attr_s *) flash->ubuf;
4149                        f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4150                        attr->status = be32_to_cpu(f->status);
4151                        attr->npart = be32_to_cpu(f->npart);
4152                        bfa_trc(flash, attr->status);
4153                        bfa_trc(flash, attr->npart);
4154                        for (i = 0; i < attr->npart; i++) {
4155                                attr->part[i].part_type =
4156                                        be32_to_cpu(f->part[i].part_type);
4157                                attr->part[i].part_instance =
4158                                        be32_to_cpu(f->part[i].part_instance);
4159                                attr->part[i].part_off =
4160                                        be32_to_cpu(f->part[i].part_off);
4161                                attr->part[i].part_size =
4162                                        be32_to_cpu(f->part[i].part_size);
4163                                attr->part[i].part_len =
4164                                        be32_to_cpu(f->part[i].part_len);
4165                                attr->part[i].part_status =
4166                                        be32_to_cpu(f->part[i].part_status);
4167                        }
4168                }
4169                flash->status = status;
4170                bfa_flash_cb(flash);
4171                break;
4172        case BFI_FLASH_I2H_ERASE_RSP:
4173                status = be32_to_cpu(m.erase->status);
4174                bfa_trc(flash, status);
4175                flash->status = status;
4176                bfa_flash_cb(flash);
4177                break;
4178        case BFI_FLASH_I2H_WRITE_RSP:
4179                status = be32_to_cpu(m.write->status);
4180                bfa_trc(flash, status);
4181                if (status != BFA_STATUS_OK || flash->residue == 0) {
4182                        flash->status = status;
4183                        bfa_flash_cb(flash);
4184                } else {
4185                        bfa_trc(flash, flash->offset);
4186                        bfa_flash_write_send(flash);
4187                }
4188                break;
4189        case BFI_FLASH_I2H_READ_RSP:
4190                status = be32_to_cpu(m.read->status);
4191                bfa_trc(flash, status);
4192                if (status != BFA_STATUS_OK) {
4193                        flash->status = status;
4194                        bfa_flash_cb(flash);
4195                } else {
4196                        u32 len = be32_to_cpu(m.read->length);
4197                        bfa_trc(flash, flash->offset);
4198                        bfa_trc(flash, len);
4199                        memcpy(flash->ubuf + flash->offset,
4200                                flash->dbuf_kva, len);
4201                        flash->residue -= len;
4202                        flash->offset += len;
4203                        if (flash->residue == 0) {
4204                                flash->status = status;
4205                                bfa_flash_cb(flash);
4206                        } else
4207                                bfa_flash_read_send(flash);
4208                }
4209                break;
4210        case BFI_FLASH_I2H_BOOT_VER_RSP:
4211                break;
4212        case BFI_FLASH_I2H_EVENT:
4213                status = be32_to_cpu(m.event->status);
4214                bfa_trc(flash, status);
4215                if (status == BFA_STATUS_BAD_FWCFG)
4216                        bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4217                else if (status == BFA_STATUS_INVALID_VENDOR) {
4218                        u32 param;
4219                        param = be32_to_cpu(m.event->param);
4220                        bfa_trc(flash, param);
4221                        bfa_ioc_aen_post(flash->ioc,
4222                                BFA_IOC_AEN_INVALID_VENDOR);
4223                }
4224                break;
4225
4226        default:
4227                WARN_ON(1);
4228        }
4229}
4230
4231/*
4232 * Flash memory info API.
4233 *
4234 * @param[in] mincfg - minimal cfg variable
4235 */
4236u32
4237bfa_flash_meminfo(bfa_boolean_t mincfg)
4238{
4239        /* min driver doesn't need flash */
4240        if (mincfg)
4241                return 0;
4242        return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4243}
4244
4245/*
4246 * Flash attach API.
4247 *
4248 * @param[in] flash - flash structure
4249 * @param[in] ioc  - ioc structure
4250 * @param[in] dev  - device structure
4251 * @param[in] trcmod - trace module
4252 * @param[in] logmod - log module
4253 */
4254void
4255bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4256                struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4257{
4258        flash->ioc = ioc;
4259        flash->trcmod = trcmod;
4260        flash->cbfn = NULL;
4261        flash->cbarg = NULL;
4262        flash->op_busy = 0;
4263
4264        bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4265        bfa_q_qe_init(&flash->ioc_notify);
4266        bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4267        list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4268
4269        /* min driver doesn't need flash */
4270        if (mincfg) {
4271                flash->dbuf_kva = NULL;
4272                flash->dbuf_pa = 0;
4273        }
4274}
4275
4276/*
4277 * Claim memory for flash
4278 *
4279 * @param[in] flash - flash structure
4280 * @param[in] dm_kva - pointer to virtual memory address
4281 * @param[in] dm_pa - physical memory address
4282 * @param[in] mincfg - minimal cfg variable
4283 */
4284void
4285bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4286                bfa_boolean_t mincfg)
4287{
4288        if (mincfg)
4289                return;
4290
4291        flash->dbuf_kva = dm_kva;
4292        flash->dbuf_pa = dm_pa;
4293        memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4294        dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4295        dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4296}
4297
4298/*
4299 * Get flash attribute.
4300 *
4301 * @param[in] flash - flash structure
4302 * @param[in] attr - flash attribute structure
4303 * @param[in] cbfn - callback function
4304 * @param[in] cbarg - callback argument
4305 *
4306 * Return status.
4307 */
4308bfa_status_t
4309bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4310                bfa_cb_flash_t cbfn, void *cbarg)
4311{
4312        bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4313
4314        if (!bfa_ioc_is_operational(flash->ioc))
4315                return BFA_STATUS_IOC_NON_OP;
4316
4317        if (flash->op_busy) {
4318                bfa_trc(flash, flash->op_busy);
4319                return BFA_STATUS_DEVBUSY;
4320        }
4321
4322        flash->op_busy = 1;
4323        flash->cbfn = cbfn;
4324        flash->cbarg = cbarg;
4325        flash->ubuf = (u8 *) attr;
4326        bfa_flash_query_send(flash);
4327
4328        return BFA_STATUS_OK;
4329}
4330
4331/*
4332 * Erase flash partition.
4333 *
4334 * @param[in] flash - flash structure
4335 * @param[in] type - flash partition type
4336 * @param[in] instance - flash partition instance
4337 * @param[in] cbfn - callback function
4338 * @param[in] cbarg - callback argument
4339 *
4340 * Return status.
4341 */
4342bfa_status_t
4343bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4344                u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4345{
4346        bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4347        bfa_trc(flash, type);
4348        bfa_trc(flash, instance);
4349
4350        if (!bfa_ioc_is_operational(flash->ioc))
4351                return BFA_STATUS_IOC_NON_OP;
4352
4353        if (flash->op_busy) {
4354                bfa_trc(flash, flash->op_busy);
4355                return BFA_STATUS_DEVBUSY;
4356        }
4357
4358        flash->op_busy = 1;
4359        flash->cbfn = cbfn;
4360        flash->cbarg = cbarg;
4361        flash->type = type;
4362        flash->instance = instance;
4363
4364        bfa_flash_erase_send(flash);
4365        bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4366                                instance, type);
4367        return BFA_STATUS_OK;
4368}
4369
4370/*
4371 * Update flash partition.
4372 *
4373 * @param[in] flash - flash structure
4374 * @param[in] type - flash partition type
4375 * @param[in] instance - flash partition instance
4376 * @param[in] buf - update data buffer
4377 * @param[in] len - data buffer length
4378 * @param[in] offset - offset relative to the partition starting address
4379 * @param[in] cbfn - callback function
4380 * @param[in] cbarg - callback argument
4381 *
4382 * Return status.
4383 */
4384bfa_status_t
4385bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4386                u8 instance, void *buf, u32 len, u32 offset,
4387                bfa_cb_flash_t cbfn, void *cbarg)
4388{
4389        bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4390        bfa_trc(flash, type);
4391        bfa_trc(flash, instance);
4392        bfa_trc(flash, len);
4393        bfa_trc(flash, offset);
4394
4395        if (!bfa_ioc_is_operational(flash->ioc))
4396                return BFA_STATUS_IOC_NON_OP;
4397
4398        /*
4399         * 'len' must be in word (4-byte) boundary
4400         * 'offset' must be in sector (16kb) boundary
4401         */
4402        if (!len || (len & 0x03) || (offset & 0x00003FFF))
4403                return BFA_STATUS_FLASH_BAD_LEN;
4404
4405        if (type == BFA_FLASH_PART_MFG)
4406                return BFA_STATUS_EINVAL;
4407
4408        if (flash->op_busy) {
4409                bfa_trc(flash, flash->op_busy);
4410                return BFA_STATUS_DEVBUSY;
4411        }
4412
4413        flash->op_busy = 1;
4414        flash->cbfn = cbfn;
4415        flash->cbarg = cbarg;
4416        flash->type = type;
4417        flash->instance = instance;
4418        flash->residue = len;
4419        flash->offset = 0;
4420        flash->addr_off = offset;
4421        flash->ubuf = buf;
4422
4423        bfa_flash_write_send(flash);
4424        return BFA_STATUS_OK;
4425}
4426
4427/*
4428 * Read flash partition.
4429 *
4430 * @param[in] flash - flash structure
4431 * @param[in] type - flash partition type
4432 * @param[in] instance - flash partition instance
4433 * @param[in] buf - read data buffer
4434 * @param[in] len - data buffer length
4435 * @param[in] offset - offset relative to the partition starting address
4436 * @param[in] cbfn - callback function
4437 * @param[in] cbarg - callback argument
4438 *
4439 * Return status.
4440 */
4441bfa_status_t
4442bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4443                u8 instance, void *buf, u32 len, u32 offset,
4444                bfa_cb_flash_t cbfn, void *cbarg)
4445{
4446        bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4447        bfa_trc(flash, type);
4448        bfa_trc(flash, instance);
4449        bfa_trc(flash, len);
4450        bfa_trc(flash, offset);
4451
4452        if (!bfa_ioc_is_operational(flash->ioc))
4453                return BFA_STATUS_IOC_NON_OP;
4454
4455        /*
4456         * 'len' must be in word (4-byte) boundary
4457         * 'offset' must be in sector (16kb) boundary
4458         */
4459        if (!len || (len & 0x03) || (offset & 0x00003FFF))
4460                return BFA_STATUS_FLASH_BAD_LEN;
4461
4462        if (flash->op_busy) {
4463                bfa_trc(flash, flash->op_busy);
4464                return BFA_STATUS_DEVBUSY;
4465        }
4466
4467        flash->op_busy = 1;
4468        flash->cbfn = cbfn;
4469        flash->cbarg = cbarg;
4470        flash->type = type;
4471        flash->instance = instance;
4472        flash->residue = len;
4473        flash->offset = 0;
4474        flash->addr_off = offset;
4475        flash->ubuf = buf;
4476        bfa_flash_read_send(flash);
4477
4478        return BFA_STATUS_OK;
4479}
4480
4481/*
4482 *      DIAG module specific
4483 */
4484
4485#define BFA_DIAG_MEMTEST_TOV    50000   /* memtest timeout in msec */
4486#define CT2_BFA_DIAG_MEMTEST_TOV        (9*30*1000)  /* 4.5 min */
4487
4488/* IOC event handler */
4489static void
4490bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4491{
4492        struct bfa_diag_s *diag = diag_arg;
4493
4494        bfa_trc(diag, event);
4495        bfa_trc(diag, diag->block);
4496        bfa_trc(diag, diag->fwping.lock);
4497        bfa_trc(diag, diag->tsensor.lock);
4498
4499        switch (event) {
4500        case BFA_IOC_E_DISABLED:
4501        case BFA_IOC_E_FAILED:
4502                if (diag->fwping.lock) {
4503                        diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4504                        diag->fwping.cbfn(diag->fwping.cbarg,
4505                                        diag->fwping.status);
4506                        diag->fwping.lock = 0;
4507                }
4508
4509                if (diag->tsensor.lock) {
4510                        diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4511                        diag->tsensor.cbfn(diag->tsensor.cbarg,
4512                                           diag->tsensor.status);
4513                        diag->tsensor.lock = 0;
4514                }
4515
4516                if (diag->block) {
4517                        if (diag->timer_active) {
4518                                bfa_timer_stop(&diag->timer);
4519                                diag->timer_active = 0;
4520                        }
4521
4522                        diag->status = BFA_STATUS_IOC_FAILURE;
4523                        diag->cbfn(diag->cbarg, diag->status);
4524                        diag->block = 0;
4525                }
4526                break;
4527
4528        default:
4529                break;
4530        }
4531}
4532
4533static void
4534bfa_diag_memtest_done(void *cbarg)
4535{
4536        struct bfa_diag_s *diag = cbarg;
4537        struct bfa_ioc_s  *ioc = diag->ioc;
4538        struct bfa_diag_memtest_result *res = diag->result;
4539        u32     loff = BFI_BOOT_MEMTEST_RES_ADDR;
4540        u32     pgnum, pgoff, i;
4541
4542        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4543        pgoff = PSS_SMEM_PGOFF(loff);
4544
4545        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4546
4547        for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4548                         sizeof(u32)); i++) {
4549                /* read test result from smem */
4550                *((u32 *) res + i) =
4551                        bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4552                loff += sizeof(u32);
4553        }
4554
4555        /* Reset IOC fwstates to BFI_IOC_UNINIT */
4556        bfa_ioc_reset_fwstate(ioc);
4557
4558        res->status = swab32(res->status);
4559        bfa_trc(diag, res->status);
4560
4561        if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4562                diag->status = BFA_STATUS_OK;
4563        else {
4564                diag->status = BFA_STATUS_MEMTEST_FAILED;
4565                res->addr = swab32(res->addr);
4566                res->exp = swab32(res->exp);
4567                res->act = swab32(res->act);
4568                res->err_status = swab32(res->err_status);
4569                res->err_status1 = swab32(res->err_status1);
4570                res->err_addr = swab32(res->err_addr);
4571                bfa_trc(diag, res->addr);
4572                bfa_trc(diag, res->exp);
4573                bfa_trc(diag, res->act);
4574                bfa_trc(diag, res->err_status);
4575                bfa_trc(diag, res->err_status1);
4576                bfa_trc(diag, res->err_addr);
4577        }
4578        diag->timer_active = 0;
4579        diag->cbfn(diag->cbarg, diag->status);
4580        diag->block = 0;
4581}
4582
4583/*
4584 * Firmware ping
4585 */
4586
4587/*
4588 * Perform DMA test directly
4589 */
4590static void
4591diag_fwping_send(struct bfa_diag_s *diag)
4592{
4593        struct bfi_diag_fwping_req_s *fwping_req;
4594        u32     i;
4595
4596        bfa_trc(diag, diag->fwping.dbuf_pa);
4597
4598        /* fill DMA area with pattern */
4599        for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4600                *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4601
4602        /* Fill mbox msg */
4603        fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4604
4605        /* Setup SG list */
4606        bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4607                        diag->fwping.dbuf_pa);
4608        /* Set up dma count */
4609        fwping_req->count = cpu_to_be32(diag->fwping.count);
4610        /* Set up data pattern */
4611        fwping_req->data = diag->fwping.data;
4612
4613        /* build host command */
4614        bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4615                bfa_ioc_portid(diag->ioc));
4616
4617        /* send mbox cmd */
4618        bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4619}
4620
4621static void
4622diag_fwping_comp(struct bfa_diag_s *diag,
4623                 struct bfi_diag_fwping_rsp_s *diag_rsp)
4624{
4625        u32     rsp_data = diag_rsp->data;
4626        u8      rsp_dma_status = diag_rsp->dma_status;
4627
4628        bfa_trc(diag, rsp_data);
4629        bfa_trc(diag, rsp_dma_status);
4630
4631        if (rsp_dma_status == BFA_STATUS_OK) {
4632                u32     i, pat;
4633                pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4634                        diag->fwping.data;
4635                /* Check mbox data */
4636                if (diag->fwping.data != rsp_data) {
4637                        bfa_trc(diag, rsp_data);
4638                        diag->fwping.result->dmastatus =
4639                                        BFA_STATUS_DATACORRUPTED;
4640                        diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4641                        diag->fwping.cbfn(diag->fwping.cbarg,
4642                                        diag->fwping.status);
4643                        diag->fwping.lock = 0;
4644                        return;
4645                }
4646                /* Check dma pattern */
4647                for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4648                        if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4649                                bfa_trc(diag, i);
4650                                bfa_trc(diag, pat);
4651                                bfa_trc(diag,
4652                                        *((u32 *)diag->fwping.dbuf_kva + i));
4653                                diag->fwping.result->dmastatus =
4654                                                BFA_STATUS_DATACORRUPTED;
4655                                diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4656                                diag->fwping.cbfn(diag->fwping.cbarg,
4657                                                diag->fwping.status);
4658                                diag->fwping.lock = 0;
4659                                return;
4660                        }
4661                }
4662                diag->fwping.result->dmastatus = BFA_STATUS_OK;
4663                diag->fwping.status = BFA_STATUS_OK;
4664                diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4665                diag->fwping.lock = 0;
4666        } else {
4667                diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4668                diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4669                diag->fwping.lock = 0;
4670        }
4671}
4672
4673/*
4674 * Temperature Sensor
4675 */
4676
4677static void
4678diag_tempsensor_send(struct bfa_diag_s *diag)
4679{
4680        struct bfi_diag_ts_req_s *msg;
4681
4682        msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4683        bfa_trc(diag, msg->temp);
4684        /* build host command */
4685        bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4686                bfa_ioc_portid(diag->ioc));
4687        /* send mbox cmd */
4688        bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4689}
4690
4691static void
4692diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4693{
4694        if (!diag->tsensor.lock) {
4695                /* receiving response after ioc failure */
4696                bfa_trc(diag, diag->tsensor.lock);
4697                return;
4698        }
4699
4700        /*
4701         * ASIC junction tempsensor is a reg read operation
4702         * it will always return OK
4703         */
4704        diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4705        diag->tsensor.temp->ts_junc = rsp->ts_junc;
4706        diag->tsensor.temp->ts_brd = rsp->ts_brd;
4707
4708        if (rsp->ts_brd) {
4709                /* tsensor.temp->status is brd_temp status */
4710                diag->tsensor.temp->status = rsp->status;
4711                if (rsp->status == BFA_STATUS_OK) {
4712                        diag->tsensor.temp->brd_temp =
4713                                be16_to_cpu(rsp->brd_temp);
4714                } else
4715                        diag->tsensor.temp->brd_temp = 0;
4716        }
4717
4718        bfa_trc(diag, rsp->status);
4719        bfa_trc(diag, rsp->ts_junc);
4720        bfa_trc(diag, rsp->temp);
4721        bfa_trc(diag, rsp->ts_brd);
4722        bfa_trc(diag, rsp->brd_temp);
4723
4724        /* tsensor status is always good bcos we always have junction temp */
4725        diag->tsensor.status = BFA_STATUS_OK;
4726        diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4727        diag->tsensor.lock = 0;
4728}
4729
4730/*
4731 *      LED Test command
4732 */
4733static void
4734diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4735{
4736        struct bfi_diag_ledtest_req_s  *msg;
4737
4738        msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4739        /* build host command */
4740        bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4741                        bfa_ioc_portid(diag->ioc));
4742
4743        /*
4744         * convert the freq from N blinks per 10 sec to
4745         * crossbow ontime value. We do it here because division is need
4746         */
4747        if (ledtest->freq)
4748                ledtest->freq = 500 / ledtest->freq;
4749
4750        if (ledtest->freq == 0)
4751                ledtest->freq = 1;
4752
4753        bfa_trc(diag, ledtest->freq);
4754        /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4755        msg->cmd = (u8) ledtest->cmd;
4756        msg->color = (u8) ledtest->color;
4757        msg->portid = bfa_ioc_portid(diag->ioc);
4758        msg->led = ledtest->led;
4759        msg->freq = cpu_to_be16(ledtest->freq);
4760
4761        /* send mbox cmd */
4762        bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4763}
4764
4765static void
4766diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4767{
4768        bfa_trc(diag, diag->ledtest.lock);
4769        diag->ledtest.lock = BFA_FALSE;
4770        /* no bfa_cb_queue is needed because driver is not waiting */
4771}
4772
4773/*
4774 * Port beaconing
4775 */
4776static void
4777diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4778{
4779        struct bfi_diag_portbeacon_req_s *msg;
4780
4781        msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4782        /* build host command */
4783        bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4784                bfa_ioc_portid(diag->ioc));
4785        msg->beacon = beacon;
4786        msg->period = cpu_to_be32(sec);
4787        /* send mbox cmd */
4788        bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4789}
4790
4791static void
4792diag_portbeacon_comp(struct bfa_diag_s *diag)
4793{
4794        bfa_trc(diag, diag->beacon.state);
4795        diag->beacon.state = BFA_FALSE;
4796        if (diag->cbfn_beacon)
4797                diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4798}
4799
4800/*
4801 *      Diag hmbox handler
4802 */
4803void
4804bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4805{
4806        struct bfa_diag_s *diag = diagarg;
4807
4808        switch (msg->mh.msg_id) {
4809        case BFI_DIAG_I2H_PORTBEACON:
4810                diag_portbeacon_comp(diag);
4811                break;
4812        case BFI_DIAG_I2H_FWPING:
4813                diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4814                break;
4815        case BFI_DIAG_I2H_TEMPSENSOR:
4816                diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4817                break;
4818        case BFI_DIAG_I2H_LEDTEST:
4819                diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4820                break;
4821        default:
4822                bfa_trc(diag, msg->mh.msg_id);
4823                WARN_ON(1);
4824        }
4825}
4826
4827/*
4828 * Gen RAM Test
4829 *
4830 *   @param[in] *diag           - diag data struct
4831 *   @param[in] *memtest        - mem test params input from upper layer,
4832 *   @param[in] pattern         - mem test pattern
4833 *   @param[in] *result         - mem test result
4834 *   @param[in] cbfn            - mem test callback functioin
4835 *   @param[in] cbarg           - callback functioin arg
4836 *
4837 *   @param[out]
4838 */
4839bfa_status_t
4840bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4841                u32 pattern, struct bfa_diag_memtest_result *result,
4842                bfa_cb_diag_t cbfn, void *cbarg)
4843{
4844        u32     memtest_tov;
4845
4846        bfa_trc(diag, pattern);
4847
4848        if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4849                return BFA_STATUS_ADAPTER_ENABLED;
4850
4851        /* check to see if there is another destructive diag cmd running */
4852        if (diag->block) {
4853                bfa_trc(diag, diag->block);
4854                return BFA_STATUS_DEVBUSY;
4855        } else
4856                diag->block = 1;
4857
4858        diag->result = result;
4859        diag->cbfn = cbfn;
4860        diag->cbarg = cbarg;
4861
4862        /* download memtest code and take LPU0 out of reset */
4863        bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4864
4865        memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
4866                       CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
4867        bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4868                        bfa_diag_memtest_done, diag, memtest_tov);
4869        diag->timer_active = 1;
4870        return BFA_STATUS_OK;
4871}
4872
4873/*
4874 * DIAG firmware ping command
4875 *
4876 *   @param[in] *diag           - diag data struct
4877 *   @param[in] cnt             - dma loop count for testing PCIE
4878 *   @param[in] data            - data pattern to pass in fw
4879 *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
4880 *   @param[in] cbfn            - callback function
4881 *   @param[in] *cbarg          - callback functioin arg
4882 *
4883 *   @param[out]
4884 */
4885bfa_status_t
4886bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4887                struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4888                void *cbarg)
4889{
4890        bfa_trc(diag, cnt);
4891        bfa_trc(diag, data);
4892
4893        if (!bfa_ioc_is_operational(diag->ioc))
4894                return BFA_STATUS_IOC_NON_OP;
4895
4896        if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4897            ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4898                return BFA_STATUS_CMD_NOTSUPP;
4899
4900        /* check to see if there is another destructive diag cmd running */
4901        if (diag->block || diag->fwping.lock) {
4902                bfa_trc(diag, diag->block);
4903                bfa_trc(diag, diag->fwping.lock);
4904                return BFA_STATUS_DEVBUSY;
4905        }
4906
4907        /* Initialization */
4908        diag->fwping.lock = 1;
4909        diag->fwping.cbfn = cbfn;
4910        diag->fwping.cbarg = cbarg;
4911        diag->fwping.result = result;
4912        diag->fwping.data = data;
4913        diag->fwping.count = cnt;
4914
4915        /* Init test results */
4916        diag->fwping.result->data = 0;
4917        diag->fwping.result->status = BFA_STATUS_OK;
4918
4919        /* kick off the first ping */
4920        diag_fwping_send(diag);
4921        return BFA_STATUS_OK;
4922}
4923
4924/*
4925 * Read Temperature Sensor
4926 *
4927 *   @param[in] *diag           - diag data struct
4928 *   @param[in] *result         - pt to bfa_diag_temp_t data struct
4929 *   @param[in] cbfn            - callback function
4930 *   @param[in] *cbarg          - callback functioin arg
4931 *
4932 *   @param[out]
4933 */
4934bfa_status_t
4935bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4936                struct bfa_diag_results_tempsensor_s *result,
4937                bfa_cb_diag_t cbfn, void *cbarg)
4938{
4939        /* check to see if there is a destructive diag cmd running */
4940        if (diag->block || diag->tsensor.lock) {
4941                bfa_trc(diag, diag->block);
4942                bfa_trc(diag, diag->tsensor.lock);
4943                return BFA_STATUS_DEVBUSY;
4944        }
4945
4946        if (!bfa_ioc_is_operational(diag->ioc))
4947                return BFA_STATUS_IOC_NON_OP;
4948
4949        /* Init diag mod params */
4950        diag->tsensor.lock = 1;
4951        diag->tsensor.temp = result;
4952        diag->tsensor.cbfn = cbfn;
4953        diag->tsensor.cbarg = cbarg;
4954        diag->tsensor.status = BFA_STATUS_OK;
4955
4956        /* Send msg to fw */
4957        diag_tempsensor_send(diag);
4958
4959        return BFA_STATUS_OK;
4960}
4961
4962/*
4963 * LED Test command
4964 *
4965 *   @param[in] *diag           - diag data struct
4966 *   @param[in] *ledtest        - pt to ledtest data structure
4967 *
4968 *   @param[out]
4969 */
4970bfa_status_t
4971bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4972{
4973        bfa_trc(diag, ledtest->cmd);
4974
4975        if (!bfa_ioc_is_operational(diag->ioc))
4976                return BFA_STATUS_IOC_NON_OP;
4977
4978        if (diag->beacon.state)
4979                return BFA_STATUS_BEACON_ON;
4980
4981        if (diag->ledtest.lock)
4982                return BFA_STATUS_LEDTEST_OP;
4983
4984        /* Send msg to fw */
4985        diag->ledtest.lock = BFA_TRUE;
4986        diag_ledtest_send(diag, ledtest);
4987
4988        return BFA_STATUS_OK;
4989}
4990
4991/*
4992 * Port beaconing command
4993 *
4994 *   @param[in] *diag           - diag data struct
4995 *   @param[in] beacon          - port beaconing 1:ON   0:OFF
4996 *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
4997 *   @param[in] sec             - beaconing duration in seconds
4998 *
4999 *   @param[out]
5000 */
5001bfa_status_t
5002bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5003                bfa_boolean_t link_e2e_beacon, uint32_t sec)
5004{
5005        bfa_trc(diag, beacon);
5006        bfa_trc(diag, link_e2e_beacon);
5007        bfa_trc(diag, sec);
5008
5009        if (!bfa_ioc_is_operational(diag->ioc))
5010                return BFA_STATUS_IOC_NON_OP;
5011
5012        if (diag->ledtest.lock)
5013                return BFA_STATUS_LEDTEST_OP;
5014
5015        if (diag->beacon.state && beacon)       /* beacon alread on */
5016                return BFA_STATUS_BEACON_ON;
5017
5018        diag->beacon.state      = beacon;
5019        diag->beacon.link_e2e   = link_e2e_beacon;
5020        if (diag->cbfn_beacon)
5021                diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5022
5023        /* Send msg to fw */
5024        diag_portbeacon_send(diag, beacon, sec);
5025
5026        return BFA_STATUS_OK;
5027}
5028
5029/*
5030 * Return DMA memory needed by diag module.
5031 */
5032u32
5033bfa_diag_meminfo(void)
5034{
5035        return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5036}
5037
5038/*
5039 *      Attach virtual and physical memory for Diag.
5040 */
5041void
5042bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5043        bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5044{
5045        diag->dev = dev;
5046        diag->ioc = ioc;
5047        diag->trcmod = trcmod;
5048
5049        diag->block = 0;
5050        diag->cbfn = NULL;
5051        diag->cbarg = NULL;
5052        diag->result = NULL;
5053        diag->cbfn_beacon = cbfn_beacon;
5054
5055        bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5056        bfa_q_qe_init(&diag->ioc_notify);
5057        bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5058        list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5059}
5060
5061void
5062bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5063{
5064        diag->fwping.dbuf_kva = dm_kva;
5065        diag->fwping.dbuf_pa = dm_pa;
5066        memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5067}
5068
5069/*
5070 *      PHY module specific
5071 */
5072#define BFA_PHY_DMA_BUF_SZ      0x02000         /* 8k dma buffer */
5073#define BFA_PHY_LOCK_STATUS     0x018878        /* phy semaphore status reg */
5074
5075static void
5076bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5077{
5078        int i, m = sz >> 2;
5079
5080        for (i = 0; i < m; i++)
5081                obuf[i] = be32_to_cpu(ibuf[i]);
5082}
5083
5084static bfa_boolean_t
5085bfa_phy_present(struct bfa_phy_s *phy)
5086{
5087        return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5088}
5089
5090static void
5091bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5092{
5093        struct bfa_phy_s *phy = cbarg;
5094
5095        bfa_trc(phy, event);
5096
5097        switch (event) {
5098        case BFA_IOC_E_DISABLED:
5099        case BFA_IOC_E_FAILED:
5100                if (phy->op_busy) {
5101                        phy->status = BFA_STATUS_IOC_FAILURE;
5102                        phy->cbfn(phy->cbarg, phy->status);
5103                        phy->op_busy = 0;
5104                }
5105                break;
5106
5107        default:
5108                break;
5109        }
5110}
5111
5112/*
5113 * Send phy attribute query request.
5114 *
5115 * @param[in] cbarg - callback argument
5116 */
5117static void
5118bfa_phy_query_send(void *cbarg)
5119{
5120        struct bfa_phy_s *phy = cbarg;
5121        struct bfi_phy_query_req_s *msg =
5122                        (struct bfi_phy_query_req_s *) phy->mb.msg;
5123
5124        msg->instance = phy->instance;
5125        bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5126                bfa_ioc_portid(phy->ioc));
5127        bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5128        bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5129}
5130
5131/*
5132 * Send phy write request.
5133 *
5134 * @param[in] cbarg - callback argument
5135 */
5136static void
5137bfa_phy_write_send(void *cbarg)
5138{
5139        struct bfa_phy_s *phy = cbarg;
5140        struct bfi_phy_write_req_s *msg =
5141                        (struct bfi_phy_write_req_s *) phy->mb.msg;
5142        u32     len;
5143        u16     *buf, *dbuf;
5144        int     i, sz;
5145
5146        msg->instance = phy->instance;
5147        msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5148        len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5149                        phy->residue : BFA_PHY_DMA_BUF_SZ;
5150        msg->length = cpu_to_be32(len);
5151
5152        /* indicate if it's the last msg of the whole write operation */
5153        msg->last = (len == phy->residue) ? 1 : 0;
5154
5155        bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5156                bfa_ioc_portid(phy->ioc));
5157        bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5158
5159        buf = (u16 *) (phy->ubuf + phy->offset);
5160        dbuf = (u16 *)phy->dbuf_kva;
5161        sz = len >> 1;
5162        for (i = 0; i < sz; i++)
5163                buf[i] = cpu_to_be16(dbuf[i]);
5164
5165        bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5166
5167        phy->residue -= len;
5168        phy->offset += len;
5169}
5170
5171/*
5172 * Send phy read request.
5173 *
5174 * @param[in] cbarg - callback argument
5175 */
5176static void
5177bfa_phy_read_send(void *cbarg)
5178{
5179        struct bfa_phy_s *phy = cbarg;
5180        struct bfi_phy_read_req_s *msg =
5181                        (struct bfi_phy_read_req_s *) phy->mb.msg;
5182        u32     len;
5183
5184        msg->instance = phy->instance;
5185        msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5186        len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5187                        phy->residue : BFA_PHY_DMA_BUF_SZ;
5188        msg->length = cpu_to_be32(len);
5189        bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5190                bfa_ioc_portid(phy->ioc));
5191        bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5192        bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5193}
5194
5195/*
5196 * Send phy stats request.
5197 *
5198 * @param[in] cbarg - callback argument
5199 */
5200static void
5201bfa_phy_stats_send(void *cbarg)
5202{
5203        struct bfa_phy_s *phy = cbarg;
5204        struct bfi_phy_stats_req_s *msg =
5205                        (struct bfi_phy_stats_req_s *) phy->mb.msg;
5206
5207        msg->instance = phy->instance;
5208        bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5209                bfa_ioc_portid(phy->ioc));
5210        bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5211        bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5212}
5213
5214/*
5215 * Flash memory info API.
5216 *
5217 * @param[in] mincfg - minimal cfg variable
5218 */
5219u32
5220bfa_phy_meminfo(bfa_boolean_t mincfg)
5221{
5222        /* min driver doesn't need phy */
5223        if (mincfg)
5224                return 0;
5225
5226        return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5227}
5228
5229/*
5230 * Flash attach API.
5231 *
5232 * @param[in] phy - phy structure
5233 * @param[in] ioc  - ioc structure
5234 * @param[in] dev  - device structure
5235 * @param[in] trcmod - trace module
5236 * @param[in] logmod - log module
5237 */
5238void
5239bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5240                struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5241{
5242        phy->ioc = ioc;
5243        phy->trcmod = trcmod;
5244        phy->cbfn = NULL;
5245        phy->cbarg = NULL;
5246        phy->op_busy = 0;
5247
5248        bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5249        bfa_q_qe_init(&phy->ioc_notify);
5250        bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5251        list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5252
5253        /* min driver doesn't need phy */
5254        if (mincfg) {
5255                phy->dbuf_kva = NULL;
5256                phy->dbuf_pa = 0;
5257        }
5258}
5259
5260/*
5261 * Claim memory for phy
5262 *
5263 * @param[in] phy - phy structure
5264 * @param[in] dm_kva - pointer to virtual memory address
5265 * @param[in] dm_pa - physical memory address
5266 * @param[in] mincfg - minimal cfg variable
5267 */
5268void
5269bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5270                bfa_boolean_t mincfg)
5271{
5272        if (mincfg)
5273                return;
5274
5275        phy->dbuf_kva = dm_kva;
5276        phy->dbuf_pa = dm_pa;
5277        memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5278        dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5279        dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5280}
5281
5282bfa_boolean_t
5283bfa_phy_busy(struct bfa_ioc_s *ioc)
5284{
5285        void __iomem    *rb;
5286
5287        rb = bfa_ioc_bar0(ioc);
5288        return readl(rb + BFA_PHY_LOCK_STATUS);
5289}
5290
5291/*
5292 * Get phy attribute.
5293 *
5294 * @param[in] phy - phy structure
5295 * @param[in] attr - phy attribute structure
5296 * @param[in] cbfn - callback function
5297 * @param[in] cbarg - callback argument
5298 *
5299 * Return status.
5300 */
5301bfa_status_t
5302bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5303                struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5304{
5305        bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5306        bfa_trc(phy, instance);
5307
5308        if (!bfa_phy_present(phy))
5309                return BFA_STATUS_PHY_NOT_PRESENT;
5310
5311        if (!bfa_ioc_is_operational(phy->ioc))
5312                return BFA_STATUS_IOC_NON_OP;
5313
5314        if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5315                bfa_trc(phy, phy->op_busy);
5316                return BFA_STATUS_DEVBUSY;
5317        }
5318
5319        phy->op_busy = 1;
5320        phy->cbfn = cbfn;
5321        phy->cbarg = cbarg;
5322        phy->instance = instance;
5323        phy->ubuf = (uint8_t *) attr;
5324        bfa_phy_query_send(phy);
5325
5326        return BFA_STATUS_OK;
5327}
5328
5329/*
5330 * Get phy stats.
5331 *
5332 * @param[in] phy - phy structure
5333 * @param[in] instance - phy image instance
5334 * @param[in] stats - pointer to phy stats
5335 * @param[in] cbfn - callback function
5336 * @param[in] cbarg - callback argument
5337 *
5338 * Return status.
5339 */
5340bfa_status_t
5341bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5342                struct bfa_phy_stats_s *stats,
5343                bfa_cb_phy_t cbfn, void *cbarg)
5344{
5345        bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5346        bfa_trc(phy, instance);
5347
5348        if (!bfa_phy_present(phy))
5349                return BFA_STATUS_PHY_NOT_PRESENT;
5350
5351        if (!bfa_ioc_is_operational(phy->ioc))
5352                return BFA_STATUS_IOC_NON_OP;
5353
5354        if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5355                bfa_trc(phy, phy->op_busy);
5356                return BFA_STATUS_DEVBUSY;
5357        }
5358
5359        phy->op_busy = 1;
5360        phy->cbfn = cbfn;
5361        phy->cbarg = cbarg;
5362        phy->instance = instance;
5363        phy->ubuf = (u8 *) stats;
5364        bfa_phy_stats_send(phy);
5365
5366        return BFA_STATUS_OK;
5367}
5368
5369/*
5370 * Update phy image.
5371 *
5372 * @param[in] phy - phy structure
5373 * @param[in] instance - phy image instance
5374 * @param[in] buf - update data buffer
5375 * @param[in] len - data buffer length
5376 * @param[in] offset - offset relative to starting address
5377 * @param[in] cbfn - callback function
5378 * @param[in] cbarg - callback argument
5379 *
5380 * Return status.
5381 */
5382bfa_status_t
5383bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5384                void *buf, u32 len, u32 offset,
5385                bfa_cb_phy_t cbfn, void *cbarg)
5386{
5387        bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5388        bfa_trc(phy, instance);
5389        bfa_trc(phy, len);
5390        bfa_trc(phy, offset);
5391
5392        if (!bfa_phy_present(phy))
5393                return BFA_STATUS_PHY_NOT_PRESENT;
5394
5395        if (!bfa_ioc_is_operational(phy->ioc))
5396                return BFA_STATUS_IOC_NON_OP;
5397
5398        /* 'len' must be in word (4-byte) boundary */
5399        if (!len || (len & 0x03))
5400                return BFA_STATUS_FAILED;
5401
5402        if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5403                bfa_trc(phy, phy->op_busy);
5404                return BFA_STATUS_DEVBUSY;
5405        }
5406
5407        phy->op_busy = 1;
5408        phy->cbfn = cbfn;
5409        phy->cbarg = cbarg;
5410        phy->instance = instance;
5411        phy->residue = len;
5412        phy->offset = 0;
5413        phy->addr_off = offset;
5414        phy->ubuf = buf;
5415
5416        bfa_phy_write_send(phy);
5417        return BFA_STATUS_OK;
5418}
5419
5420/*
5421 * Read phy image.
5422 *
5423 * @param[in] phy - phy structure
5424 * @param[in] instance - phy image instance
5425 * @param[in] buf - read data buffer
5426 * @param[in] len - data buffer length
5427 * @param[in] offset - offset relative to starting address
5428 * @param[in] cbfn - callback function
5429 * @param[in] cbarg - callback argument
5430 *
5431 * Return status.
5432 */
5433bfa_status_t
5434bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5435                void *buf, u32 len, u32 offset,
5436                bfa_cb_phy_t cbfn, void *cbarg)
5437{
5438        bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5439        bfa_trc(phy, instance);
5440        bfa_trc(phy, len);
5441        bfa_trc(phy, offset);
5442
5443        if (!bfa_phy_present(phy))
5444                return BFA_STATUS_PHY_NOT_PRESENT;
5445
5446        if (!bfa_ioc_is_operational(phy->ioc))
5447                return BFA_STATUS_IOC_NON_OP;
5448
5449        /* 'len' must be in word (4-byte) boundary */
5450        if (!len || (len & 0x03))
5451                return BFA_STATUS_FAILED;
5452
5453        if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5454                bfa_trc(phy, phy->op_busy);
5455                return BFA_STATUS_DEVBUSY;
5456        }
5457
5458        phy->op_busy = 1;
5459        phy->cbfn = cbfn;
5460        phy->cbarg = cbarg;
5461        phy->instance = instance;
5462        phy->residue = len;
5463        phy->offset = 0;
5464        phy->addr_off = offset;
5465        phy->ubuf = buf;
5466        bfa_phy_read_send(phy);
5467
5468        return BFA_STATUS_OK;
5469}
5470
5471/*
5472 * Process phy response messages upon receiving interrupts.
5473 *
5474 * @param[in] phyarg - phy structure
5475 * @param[in] msg - message structure
5476 */
5477void
5478bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5479{
5480        struct bfa_phy_s *phy = phyarg;
5481        u32     status;
5482
5483        union {
5484                struct bfi_phy_query_rsp_s *query;
5485                struct bfi_phy_stats_rsp_s *stats;
5486                struct bfi_phy_write_rsp_s *write;
5487                struct bfi_phy_read_rsp_s *read;
5488                struct bfi_mbmsg_s   *msg;
5489        } m;
5490
5491        m.msg = msg;
5492        bfa_trc(phy, msg->mh.msg_id);
5493
5494        if (!phy->op_busy) {
5495                /* receiving response after ioc failure */
5496                bfa_trc(phy, 0x9999);
5497                return;
5498        }
5499
5500        switch (msg->mh.msg_id) {
5501        case BFI_PHY_I2H_QUERY_RSP:
5502                status = be32_to_cpu(m.query->status);
5503                bfa_trc(phy, status);
5504
5505                if (status == BFA_STATUS_OK) {
5506                        struct bfa_phy_attr_s *attr =
5507                                (struct bfa_phy_attr_s *) phy->ubuf;
5508                        bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5509                                        sizeof(struct bfa_phy_attr_s));
5510                        bfa_trc(phy, attr->status);
5511                        bfa_trc(phy, attr->length);
5512                }
5513
5514                phy->status = status;
5515                phy->op_busy = 0;
5516                if (phy->cbfn)
5517                        phy->cbfn(phy->cbarg, phy->status);
5518                break;
5519        case BFI_PHY_I2H_STATS_RSP:
5520                status = be32_to_cpu(m.stats->status);
5521                bfa_trc(phy, status);
5522
5523                if (status == BFA_STATUS_OK) {
5524                        struct bfa_phy_stats_s *stats =
5525                                (struct bfa_phy_stats_s *) phy->ubuf;
5526                        bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5527                                sizeof(struct bfa_phy_stats_s));
5528                                bfa_trc(phy, stats->status);
5529                }
5530
5531                phy->status = status;
5532                phy->op_busy = 0;
5533                if (phy->cbfn)
5534                        phy->cbfn(phy->cbarg, phy->status);
5535                break;
5536        case BFI_PHY_I2H_WRITE_RSP:
5537                status = be32_to_cpu(m.write->status);
5538                bfa_trc(phy, status);
5539
5540                if (status != BFA_STATUS_OK || phy->residue == 0) {
5541                        phy->status = status;
5542                        phy->op_busy = 0;
5543                        if (phy->cbfn)
5544                                phy->cbfn(phy->cbarg, phy->status);
5545                } else {
5546                        bfa_trc(phy, phy->offset);
5547                        bfa_phy_write_send(phy);
5548                }
5549                break;
5550        case BFI_PHY_I2H_READ_RSP:
5551                status = be32_to_cpu(m.read->status);
5552                bfa_trc(phy, status);
5553
5554                if (status != BFA_STATUS_OK) {
5555                        phy->status = status;
5556                        phy->op_busy = 0;
5557                        if (phy->cbfn)
5558                                phy->cbfn(phy->cbarg, phy->status);
5559                } else {
5560                        u32 len = be32_to_cpu(m.read->length);
5561                        u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5562                        u16 *dbuf = (u16 *)phy->dbuf_kva;
5563                        int i, sz = len >> 1;
5564
5565                        bfa_trc(phy, phy->offset);
5566                        bfa_trc(phy, len);
5567
5568                        for (i = 0; i < sz; i++)
5569                                buf[i] = be16_to_cpu(dbuf[i]);
5570
5571                        phy->residue -= len;
5572                        phy->offset += len;
5573
5574                        if (phy->residue == 0) {
5575                                phy->status = status;
5576                                phy->op_busy = 0;
5577                                if (phy->cbfn)
5578                                        phy->cbfn(phy->cbarg, phy->status);
5579                        } else
5580                                bfa_phy_read_send(phy);
5581                }
5582                break;
5583        default:
5584                WARN_ON(1);
5585        }
5586}
5587
5588/*
5589 *      DCONF module specific
5590 */
5591
5592BFA_MODULE(dconf);
5593
5594/*
5595 * DCONF state machine events
5596 */
5597enum bfa_dconf_event {
5598        BFA_DCONF_SM_INIT               = 1,    /* dconf Init */
5599        BFA_DCONF_SM_FLASH_COMP         = 2,    /* read/write to flash */
5600        BFA_DCONF_SM_WR                 = 3,    /* binding change, map */
5601        BFA_DCONF_SM_TIMEOUT            = 4,    /* Start timer */
5602        BFA_DCONF_SM_EXIT               = 5,    /* exit dconf module */
5603        BFA_DCONF_SM_IOCDISABLE         = 6,    /* IOC disable event */
5604};
5605
5606/* forward declaration of DCONF state machine */
5607static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5608                                enum bfa_dconf_event event);
5609static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5610                                enum bfa_dconf_event event);
5611static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5612                                enum bfa_dconf_event event);
5613static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5614                                enum bfa_dconf_event event);
5615static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5616                                enum bfa_dconf_event event);
5617static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5618                                enum bfa_dconf_event event);
5619static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5620                                enum bfa_dconf_event event);
5621
5622static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5623static void bfa_dconf_timer(void *cbarg);
5624static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5625static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5626
5627/*
5628 * Beginning state of dconf module. Waiting for an event to start.
5629 */
5630static void
5631bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5632{
5633        bfa_status_t bfa_status;
5634        bfa_trc(dconf->bfa, event);
5635
5636        switch (event) {
5637        case BFA_DCONF_SM_INIT:
5638                if (dconf->min_cfg) {
5639                        bfa_trc(dconf->bfa, dconf->min_cfg);
5640                        bfa_fsm_send_event(&dconf->bfa->iocfc,
5641                                        IOCFC_E_DCONF_DONE);
5642                        return;
5643                }
5644                bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5645                bfa_timer_start(dconf->bfa, &dconf->timer,
5646                        bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5647                bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5648                                        BFA_FLASH_PART_DRV, dconf->instance,
5649                                        dconf->dconf,
5650                                        sizeof(struct bfa_dconf_s), 0,
5651                                        bfa_dconf_init_cb, dconf->bfa);
5652                if (bfa_status != BFA_STATUS_OK) {
5653                        bfa_timer_stop(&dconf->timer);
5654                        bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5655                        bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5656                        return;
5657                }
5658                break;
5659        case BFA_DCONF_SM_EXIT:
5660                bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5661        case BFA_DCONF_SM_IOCDISABLE:
5662        case BFA_DCONF_SM_WR:
5663        case BFA_DCONF_SM_FLASH_COMP:
5664                break;
5665        default:
5666                bfa_sm_fault(dconf->bfa, event);
5667        }
5668}
5669
5670/*
5671 * Read flash for dconf entries and make a call back to the driver once done.
5672 */
5673static void
5674bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5675                        enum bfa_dconf_event event)
5676{
5677        bfa_trc(dconf->bfa, event);
5678
5679        switch (event) {
5680        case BFA_DCONF_SM_FLASH_COMP:
5681                bfa_timer_stop(&dconf->timer);
5682                bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5683                break;
5684        case BFA_DCONF_SM_TIMEOUT:
5685                bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5686                bfa_ioc_suspend(&dconf->bfa->ioc);
5687                break;
5688        case BFA_DCONF_SM_EXIT:
5689                bfa_timer_stop(&dconf->timer);
5690                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5691                bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5692                break;
5693        case BFA_DCONF_SM_IOCDISABLE:
5694                bfa_timer_stop(&dconf->timer);
5695                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5696                break;
5697        default:
5698                bfa_sm_fault(dconf->bfa, event);
5699        }
5700}
5701
5702/*
5703 * DCONF Module is in ready state. Has completed the initialization.
5704 */
5705static void
5706bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5707{
5708        bfa_trc(dconf->bfa, event);
5709
5710        switch (event) {
5711        case BFA_DCONF_SM_WR:
5712                bfa_timer_start(dconf->bfa, &dconf->timer,
5713                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5714                bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5715                break;
5716        case BFA_DCONF_SM_EXIT:
5717                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5718                bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5719                break;
5720        case BFA_DCONF_SM_INIT:
5721        case BFA_DCONF_SM_IOCDISABLE:
5722                break;
5723        default:
5724                bfa_sm_fault(dconf->bfa, event);
5725        }
5726}
5727
5728/*
5729 * entries are dirty, write back to the flash.
5730 */
5731
5732static void
5733bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5734{
5735        bfa_trc(dconf->bfa, event);
5736
5737        switch (event) {
5738        case BFA_DCONF_SM_TIMEOUT:
5739                bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5740                bfa_dconf_flash_write(dconf);
5741                break;
5742        case BFA_DCONF_SM_WR:
5743                bfa_timer_stop(&dconf->timer);
5744                bfa_timer_start(dconf->bfa, &dconf->timer,
5745                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5746                break;
5747        case BFA_DCONF_SM_EXIT:
5748                bfa_timer_stop(&dconf->timer);
5749                bfa_timer_start(dconf->bfa, &dconf->timer,
5750                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5751                bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5752                bfa_dconf_flash_write(dconf);
5753                break;
5754        case BFA_DCONF_SM_FLASH_COMP:
5755                break;
5756        case BFA_DCONF_SM_IOCDISABLE:
5757                bfa_timer_stop(&dconf->timer);
5758                bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5759                break;
5760        default:
5761                bfa_sm_fault(dconf->bfa, event);
5762        }
5763}
5764
5765/*
5766 * Sync the dconf entries to the flash.
5767 */
5768static void
5769bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5770                        enum bfa_dconf_event event)
5771{
5772        bfa_trc(dconf->bfa, event);
5773
5774        switch (event) {
5775        case BFA_DCONF_SM_IOCDISABLE:
5776        case BFA_DCONF_SM_FLASH_COMP:
5777                bfa_timer_stop(&dconf->timer);
5778        case BFA_DCONF_SM_TIMEOUT:
5779                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5780                bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5781                break;
5782        default:
5783                bfa_sm_fault(dconf->bfa, event);
5784        }
5785}
5786
5787static void
5788bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5789{
5790        bfa_trc(dconf->bfa, event);
5791
5792        switch (event) {
5793        case BFA_DCONF_SM_FLASH_COMP:
5794                bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5795                break;
5796        case BFA_DCONF_SM_WR:
5797                bfa_timer_start(dconf->bfa, &dconf->timer,
5798                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5799                bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5800                break;
5801        case BFA_DCONF_SM_EXIT:
5802                bfa_timer_start(dconf->bfa, &dconf->timer,
5803                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5804                bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5805                break;
5806        case BFA_DCONF_SM_IOCDISABLE:
5807                bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5808                break;
5809        default:
5810                bfa_sm_fault(dconf->bfa, event);
5811        }
5812}
5813
5814static void
5815bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5816                        enum bfa_dconf_event event)
5817{
5818        bfa_trc(dconf->bfa, event);
5819
5820        switch (event) {
5821        case BFA_DCONF_SM_INIT:
5822                bfa_timer_start(dconf->bfa, &dconf->timer,
5823                        bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5824                bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5825                break;
5826        case BFA_DCONF_SM_EXIT:
5827                bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5828                bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5829                break;
5830        case BFA_DCONF_SM_IOCDISABLE:
5831                break;
5832        default:
5833                bfa_sm_fault(dconf->bfa, event);
5834        }
5835}
5836
5837/*
5838 * Compute and return memory needed by DRV_CFG module.
5839 */
5840static void
5841bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5842                  struct bfa_s *bfa)
5843{
5844        struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5845
5846        if (cfg->drvcfg.min_cfg)
5847                bfa_mem_kva_setup(meminfo, dconf_kva,
5848                                sizeof(struct bfa_dconf_hdr_s));
5849        else
5850                bfa_mem_kva_setup(meminfo, dconf_kva,
5851                                sizeof(struct bfa_dconf_s));
5852}
5853
5854static void
5855bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5856                struct bfa_pcidev_s *pcidev)
5857{
5858        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5859
5860        dconf->bfad = bfad;
5861        dconf->bfa = bfa;
5862        dconf->instance = bfa->ioc.port_id;
5863        bfa_trc(bfa, dconf->instance);
5864
5865        dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5866        if (cfg->drvcfg.min_cfg) {
5867                bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5868                dconf->min_cfg = BFA_TRUE;
5869        } else {
5870                dconf->min_cfg = BFA_FALSE;
5871                bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5872        }
5873
5874        bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5875        bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5876}
5877
5878static void
5879bfa_dconf_init_cb(void *arg, bfa_status_t status)
5880{
5881        struct bfa_s *bfa = arg;
5882        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5883
5884        if (status == BFA_STATUS_OK) {
5885                bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5886                if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5887                        dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5888                if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5889                        dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5890        }
5891        bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5892        bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
5893}
5894
5895void
5896bfa_dconf_modinit(struct bfa_s *bfa)
5897{
5898        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5899        bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5900}
5901static void
5902bfa_dconf_start(struct bfa_s *bfa)
5903{
5904}
5905
5906static void
5907bfa_dconf_stop(struct bfa_s *bfa)
5908{
5909}
5910
5911static void bfa_dconf_timer(void *cbarg)
5912{
5913        struct bfa_dconf_mod_s *dconf = cbarg;
5914        bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5915}
5916static void
5917bfa_dconf_iocdisable(struct bfa_s *bfa)
5918{
5919        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5920        bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5921}
5922
5923static void
5924bfa_dconf_detach(struct bfa_s *bfa)
5925{
5926}
5927
5928static bfa_status_t
5929bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5930{
5931        bfa_status_t bfa_status;
5932        bfa_trc(dconf->bfa, 0);
5933
5934        bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5935                                BFA_FLASH_PART_DRV, dconf->instance,
5936                                dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
5937                                bfa_dconf_cbfn, dconf);
5938        if (bfa_status != BFA_STATUS_OK)
5939                WARN_ON(bfa_status);
5940        bfa_trc(dconf->bfa, bfa_status);
5941
5942        return bfa_status;
5943}
5944
5945bfa_status_t
5946bfa_dconf_update(struct bfa_s *bfa)
5947{
5948        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5949        bfa_trc(dconf->bfa, 0);
5950        if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5951                return BFA_STATUS_FAILED;
5952
5953        if (dconf->min_cfg) {
5954                bfa_trc(dconf->bfa, dconf->min_cfg);
5955                return BFA_STATUS_FAILED;
5956        }
5957
5958        bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5959        return BFA_STATUS_OK;
5960}
5961
5962static void
5963bfa_dconf_cbfn(void *arg, bfa_status_t status)
5964{
5965        struct bfa_dconf_mod_s *dconf = arg;
5966        WARN_ON(status);
5967        bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5968}
5969
5970void
5971bfa_dconf_modexit(struct bfa_s *bfa)
5972{
5973        struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5974        bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5975}
5976
5977/*
5978 * FRU specific functions
5979 */
5980
5981#define BFA_FRU_DMA_BUF_SZ      0x02000         /* 8k dma buffer */
5982#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
5983#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
5984
5985static void
5986bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
5987{
5988        struct bfa_fru_s *fru = cbarg;
5989
5990        bfa_trc(fru, event);
5991
5992        switch (event) {
5993        case BFA_IOC_E_DISABLED:
5994        case BFA_IOC_E_FAILED:
5995                if (fru->op_busy) {
5996                        fru->status = BFA_STATUS_IOC_FAILURE;
5997                        fru->cbfn(fru->cbarg, fru->status);
5998                        fru->op_busy = 0;
5999                }
6000                break;
6001
6002        default:
6003                break;
6004        }
6005}
6006
6007/*
6008 * Send fru write request.
6009 *
6010 * @param[in] cbarg - callback argument
6011 */
6012static void
6013bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6014{
6015        struct bfa_fru_s *fru = cbarg;
6016        struct bfi_fru_write_req_s *msg =
6017                        (struct bfi_fru_write_req_s *) fru->mb.msg;
6018        u32 len;
6019
6020        msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6021        len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6022                                fru->residue : BFA_FRU_DMA_BUF_SZ;
6023        msg->length = cpu_to_be32(len);
6024
6025        /*
6026         * indicate if it's the last msg of the whole write operation
6027         */
6028        msg->last = (len == fru->residue) ? 1 : 0;
6029
6030        msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
6031        bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6032        bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6033
6034        memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6035        bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6036
6037        fru->residue -= len;
6038        fru->offset += len;
6039}
6040
6041/*
6042 * Send fru read request.
6043 *
6044 * @param[in] cbarg - callback argument
6045 */
6046static void
6047bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6048{
6049        struct bfa_fru_s *fru = cbarg;
6050        struct bfi_fru_read_req_s *msg =
6051                        (struct bfi_fru_read_req_s *) fru->mb.msg;
6052        u32 len;
6053
6054        msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6055        len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6056                                fru->residue : BFA_FRU_DMA_BUF_SZ;
6057        msg->length = cpu_to_be32(len);
6058        bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6059        bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6060        bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6061}
6062
6063/*
6064 * Flash memory info API.
6065 *
6066 * @param[in] mincfg - minimal cfg variable
6067 */
6068u32
6069bfa_fru_meminfo(bfa_boolean_t mincfg)
6070{
6071        /* min driver doesn't need fru */
6072        if (mincfg)
6073                return 0;
6074
6075        return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6076}
6077
6078/*
6079 * Flash attach API.
6080 *
6081 * @param[in] fru - fru structure
6082 * @param[in] ioc  - ioc structure
6083 * @param[in] dev  - device structure
6084 * @param[in] trcmod - trace module
6085 * @param[in] logmod - log module
6086 */
6087void
6088bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6089        struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6090{
6091        fru->ioc = ioc;
6092        fru->trcmod = trcmod;
6093        fru->cbfn = NULL;
6094        fru->cbarg = NULL;
6095        fru->op_busy = 0;
6096
6097        bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6098        bfa_q_qe_init(&fru->ioc_notify);
6099        bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6100        list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6101
6102        /* min driver doesn't need fru */
6103        if (mincfg) {
6104                fru->dbuf_kva = NULL;
6105                fru->dbuf_pa = 0;
6106        }
6107}
6108
6109/*
6110 * Claim memory for fru
6111 *
6112 * @param[in] fru - fru structure
6113 * @param[in] dm_kva - pointer to virtual memory address
6114 * @param[in] dm_pa - frusical memory address
6115 * @param[in] mincfg - minimal cfg variable
6116 */
6117void
6118bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6119        bfa_boolean_t mincfg)
6120{
6121        if (mincfg)
6122                return;
6123
6124        fru->dbuf_kva = dm_kva;
6125        fru->dbuf_pa = dm_pa;
6126        memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6127        dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6128        dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6129}
6130
6131/*
6132 * Update fru vpd image.
6133 *
6134 * @param[in] fru - fru structure
6135 * @param[in] buf - update data buffer
6136 * @param[in] len - data buffer length
6137 * @param[in] offset - offset relative to starting address
6138 * @param[in] cbfn - callback function
6139 * @param[in] cbarg - callback argument
6140 *
6141 * Return status.
6142 */
6143bfa_status_t
6144bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6145                  bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
6146{
6147        bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6148        bfa_trc(fru, len);
6149        bfa_trc(fru, offset);
6150
6151        if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
6152                fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6153                return BFA_STATUS_FRU_NOT_PRESENT;
6154
6155        if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6156                return BFA_STATUS_CMD_NOTSUPP;
6157
6158        if (!bfa_ioc_is_operational(fru->ioc))
6159                return BFA_STATUS_IOC_NON_OP;
6160
6161        if (fru->op_busy) {
6162                bfa_trc(fru, fru->op_busy);
6163                return BFA_STATUS_DEVBUSY;
6164        }
6165
6166        fru->op_busy = 1;
6167
6168        fru->cbfn = cbfn;
6169        fru->cbarg = cbarg;
6170        fru->residue = len;
6171        fru->offset = 0;
6172        fru->addr_off = offset;
6173        fru->ubuf = buf;
6174        fru->trfr_cmpl = trfr_cmpl;
6175
6176        bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6177
6178        return BFA_STATUS_OK;
6179}
6180
6181/*
6182 * Read fru vpd image.
6183 *
6184 * @param[in] fru - fru structure
6185 * @param[in] buf - read data buffer
6186 * @param[in] len - data buffer length
6187 * @param[in] offset - offset relative to starting address
6188 * @param[in] cbfn - callback function
6189 * @param[in] cbarg - callback argument
6190 *
6191 * Return status.
6192 */
6193bfa_status_t
6194bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6195                bfa_cb_fru_t cbfn, void *cbarg)
6196{
6197        bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6198        bfa_trc(fru, len);
6199        bfa_trc(fru, offset);
6200
6201        if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6202                return BFA_STATUS_FRU_NOT_PRESENT;
6203
6204        if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
6205                fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6206                return BFA_STATUS_CMD_NOTSUPP;
6207
6208        if (!bfa_ioc_is_operational(fru->ioc))
6209                return BFA_STATUS_IOC_NON_OP;
6210
6211        if (fru->op_busy) {
6212                bfa_trc(fru, fru->op_busy);
6213                return BFA_STATUS_DEVBUSY;
6214        }
6215
6216        fru->op_busy = 1;
6217
6218        fru->cbfn = cbfn;
6219        fru->cbarg = cbarg;
6220        fru->residue = len;
6221        fru->offset = 0;
6222        fru->addr_off = offset;
6223        fru->ubuf = buf;
6224        bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6225
6226        return BFA_STATUS_OK;
6227}
6228
6229/*
6230 * Get maximum size fru vpd image.
6231 *
6232 * @param[in] fru - fru structure
6233 * @param[out] size - maximum size of fru vpd data
6234 *
6235 * Return status.
6236 */
6237bfa_status_t
6238bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6239{
6240        if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6241                return BFA_STATUS_FRU_NOT_PRESENT;
6242
6243        if (!bfa_ioc_is_operational(fru->ioc))
6244                return BFA_STATUS_IOC_NON_OP;
6245
6246        if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
6247                fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
6248                *max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6249        else
6250                return BFA_STATUS_CMD_NOTSUPP;
6251        return BFA_STATUS_OK;
6252}
6253/*
6254 * tfru write.
6255 *
6256 * @param[in] fru - fru structure
6257 * @param[in] buf - update data buffer
6258 * @param[in] len - data buffer length
6259 * @param[in] offset - offset relative to starting address
6260 * @param[in] cbfn - callback function
6261 * @param[in] cbarg - callback argument
6262 *
6263 * Return status.
6264 */
6265bfa_status_t
6266bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6267               bfa_cb_fru_t cbfn, void *cbarg)
6268{
6269        bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6270        bfa_trc(fru, len);
6271        bfa_trc(fru, offset);
6272        bfa_trc(fru, *((u8 *) buf));
6273
6274        if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6275                return BFA_STATUS_FRU_NOT_PRESENT;
6276
6277        if (!bfa_ioc_is_operational(fru->ioc))
6278                return BFA_STATUS_IOC_NON_OP;
6279
6280        if (fru->op_busy) {
6281                bfa_trc(fru, fru->op_busy);
6282                return BFA_STATUS_DEVBUSY;
6283        }
6284
6285        fru->op_busy = 1;
6286
6287        fru->cbfn = cbfn;
6288        fru->cbarg = cbarg;
6289        fru->residue = len;
6290        fru->offset = 0;
6291        fru->addr_off = offset;
6292        fru->ubuf = buf;
6293
6294        bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6295
6296        return BFA_STATUS_OK;
6297}
6298
6299/*
6300 * tfru read.
6301 *
6302 * @param[in] fru - fru structure
6303 * @param[in] buf - read data buffer
6304 * @param[in] len - data buffer length
6305 * @param[in] offset - offset relative to starting address
6306 * @param[in] cbfn - callback function
6307 * @param[in] cbarg - callback argument
6308 *
6309 * Return status.
6310 */
6311bfa_status_t
6312bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6313              bfa_cb_fru_t cbfn, void *cbarg)
6314{
6315        bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6316        bfa_trc(fru, len);
6317        bfa_trc(fru, offset);
6318
6319        if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6320                return BFA_STATUS_FRU_NOT_PRESENT;
6321
6322        if (!bfa_ioc_is_operational(fru->ioc))
6323                return BFA_STATUS_IOC_NON_OP;
6324
6325        if (fru->op_busy) {
6326                bfa_trc(fru, fru->op_busy);
6327                return BFA_STATUS_DEVBUSY;
6328        }
6329
6330        fru->op_busy = 1;
6331
6332        fru->cbfn = cbfn;
6333        fru->cbarg = cbarg;
6334        fru->residue = len;
6335        fru->offset = 0;
6336        fru->addr_off = offset;
6337        fru->ubuf = buf;
6338        bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6339
6340        return BFA_STATUS_OK;
6341}
6342
6343/*
6344 * Process fru response messages upon receiving interrupts.
6345 *
6346 * @param[in] fruarg - fru structure
6347 * @param[in] msg - message structure
6348 */
6349void
6350bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6351{
6352        struct bfa_fru_s *fru = fruarg;
6353        struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6354        u32 status;
6355
6356        bfa_trc(fru, msg->mh.msg_id);
6357
6358        if (!fru->op_busy) {
6359                /*
6360                 * receiving response after ioc failure
6361                 */
6362                bfa_trc(fru, 0x9999);
6363                return;
6364        }
6365
6366        switch (msg->mh.msg_id) {
6367        case BFI_FRUVPD_I2H_WRITE_RSP:
6368        case BFI_TFRU_I2H_WRITE_RSP:
6369                status = be32_to_cpu(rsp->status);
6370                bfa_trc(fru, status);
6371
6372                if (status != BFA_STATUS_OK || fru->residue == 0) {
6373                        fru->status = status;
6374                        fru->op_busy = 0;
6375                        if (fru->cbfn)
6376                                fru->cbfn(fru->cbarg, fru->status);
6377                } else {
6378                        bfa_trc(fru, fru->offset);
6379                        if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6380                                bfa_fru_write_send(fru,
6381                                        BFI_FRUVPD_H2I_WRITE_REQ);
6382                        else
6383                                bfa_fru_write_send(fru,
6384                                        BFI_TFRU_H2I_WRITE_REQ);
6385                }
6386                break;
6387        case BFI_FRUVPD_I2H_READ_RSP:
6388        case BFI_TFRU_I2H_READ_RSP:
6389                status = be32_to_cpu(rsp->status);
6390                bfa_trc(fru, status);
6391
6392                if (status != BFA_STATUS_OK) {
6393                        fru->status = status;
6394                        fru->op_busy = 0;
6395                        if (fru->cbfn)
6396                                fru->cbfn(fru->cbarg, fru->status);
6397                } else {
6398                        u32 len = be32_to_cpu(rsp->length);
6399
6400                        bfa_trc(fru, fru->offset);
6401                        bfa_trc(fru, len);
6402
6403                        memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6404                        fru->residue -= len;
6405                        fru->offset += len;
6406
6407                        if (fru->residue == 0) {
6408                                fru->status = status;
6409                                fru->op_busy = 0;
6410                                if (fru->cbfn)
6411                                        fru->cbfn(fru->cbarg, fru->status);
6412                        } else {
6413                                if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6414                                        bfa_fru_read_send(fru,
6415                                                BFI_FRUVPD_H2I_READ_REQ);
6416                                else
6417                                        bfa_fru_read_send(fru,
6418                                                BFI_TFRU_H2I_READ_REQ);
6419                        }
6420                }
6421                break;
6422        default:
6423                WARN_ON(1);
6424        }
6425}
6426