linux/drivers/net/ethernet/brocade/bna/bfa_ioc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Linux network driver for QLogic BR-series Converged Network Adapter.
   4 */
   5/*
   6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   7 * Copyright (c) 2014-2015 QLogic Corporation
   8 * All rights reserved
   9 * www.qlogic.com
  10 */
  11
  12#include "bfa_ioc.h"
  13#include "bfi_reg.h"
  14#include "bfa_defs.h"
  15
  16/* IOC local definitions */
  17
  18/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
  19
  20#define bfa_ioc_firmware_lock(__ioc)                    \
  21                        ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  22#define bfa_ioc_firmware_unlock(__ioc)                  \
  23                        ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  24#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  25#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  26#define bfa_ioc_notify_fail(__ioc)                      \
  27                        ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
  28#define bfa_ioc_sync_start(__ioc)               \
  29                        ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  30#define bfa_ioc_sync_join(__ioc)                        \
  31                        ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  32#define bfa_ioc_sync_leave(__ioc)                       \
  33                        ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
  34#define bfa_ioc_sync_ack(__ioc)                         \
  35                        ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
  36#define bfa_ioc_sync_complete(__ioc)                    \
  37                        ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
  38#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)           \
  39                        ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
  40#define bfa_ioc_get_cur_ioc_fwstate(__ioc)              \
  41                        ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
  42#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)           \
  43                ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
  44
  45static bool bfa_nw_auto_recover = true;
  46
  47/*
  48 * forward declarations
  49 */
  50static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
  51static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
  52static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
  53static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
  54static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
  55static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
  56static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
  57static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
  58static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
  59static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
  60static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
  61static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
  62static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
  63static void bfa_ioc_recover(struct bfa_ioc *ioc);
  64static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
  65static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
  66static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
  67static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
  68static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
  69static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
  70static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
  71static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
  72static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
  73static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
  74static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc,
  75                        enum bfi_fwboot_type boot_type, u32 boot_param);
  76static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
  77static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
  78                                                char *serial_num);
  79static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
  80                                                char *fw_ver);
  81static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
  82                                                char *chip_rev);
  83static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
  84                                                char *optrom_ver);
  85static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
  86                                                char *manufacturer);
  87static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
  88static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
  89
  90/* IOC state machine definitions/declarations */
  91enum ioc_event {
  92        IOC_E_RESET             = 1,    /*!< IOC reset request          */
  93        IOC_E_ENABLE            = 2,    /*!< IOC enable request         */
  94        IOC_E_DISABLE           = 3,    /*!< IOC disable request        */
  95        IOC_E_DETACH            = 4,    /*!< driver detach cleanup      */
  96        IOC_E_ENABLED           = 5,    /*!< f/w enabled                */
  97        IOC_E_FWRSP_GETATTR     = 6,    /*!< IOC get attribute response */
  98        IOC_E_DISABLED          = 7,    /*!< f/w disabled               */
  99        IOC_E_PFFAILED          = 8,    /*!< failure notice by iocpf sm */
 100        IOC_E_HBFAIL            = 9,    /*!< heartbeat failure          */
 101        IOC_E_HWERROR           = 10,   /*!< hardware error interrupt   */
 102        IOC_E_TIMEOUT           = 11,   /*!< timeout                    */
 103        IOC_E_HWFAILED          = 12,   /*!< PCI mapping failure notice */
 104};
 105
 106bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
 107bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
 108bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
 109bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
 110bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
 111bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
 112bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
 113bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
 114bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
 115bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
 116
 117static struct bfa_sm_table ioc_sm_table[] = {
 118        {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
 119        {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
 120        {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
 121        {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
 122        {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
 123        {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
 124        {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
 125        {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
 126        {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 127        {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
 128};
 129
 130/*
 131 * Forward declareations for iocpf state machine
 132 */
 133static void bfa_iocpf_enable(struct bfa_ioc *ioc);
 134static void bfa_iocpf_disable(struct bfa_ioc *ioc);
 135static void bfa_iocpf_fail(struct bfa_ioc *ioc);
 136static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
 137static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
 138static void bfa_iocpf_stop(struct bfa_ioc *ioc);
 139
 140/* IOCPF state machine events */
 141enum iocpf_event {
 142        IOCPF_E_ENABLE          = 1,    /*!< IOCPF enable request       */
 143        IOCPF_E_DISABLE         = 2,    /*!< IOCPF disable request      */
 144        IOCPF_E_STOP            = 3,    /*!< stop on driver detach      */
 145        IOCPF_E_FWREADY         = 4,    /*!< f/w initialization done    */
 146        IOCPF_E_FWRSP_ENABLE    = 5,    /*!< enable f/w response        */
 147        IOCPF_E_FWRSP_DISABLE   = 6,    /*!< disable f/w response       */
 148        IOCPF_E_FAIL            = 7,    /*!< failure notice by ioc sm   */
 149        IOCPF_E_INITFAIL        = 8,    /*!< init fail notice by ioc sm */
 150        IOCPF_E_GETATTRFAIL     = 9,    /*!< init fail notice by ioc sm */
 151        IOCPF_E_SEMLOCKED       = 10,   /*!< h/w semaphore is locked    */
 152        IOCPF_E_TIMEOUT         = 11,   /*!< f/w response timeout       */
 153        IOCPF_E_SEM_ERROR       = 12,   /*!< h/w sem mapping error      */
 154};
 155
 156/* IOCPF states */
 157enum bfa_iocpf_state {
 158        BFA_IOCPF_RESET         = 1,    /*!< IOC is in reset state */
 159        BFA_IOCPF_SEMWAIT       = 2,    /*!< Waiting for IOC h/w semaphore */
 160        BFA_IOCPF_HWINIT        = 3,    /*!< IOC h/w is being initialized */
 161        BFA_IOCPF_READY         = 4,    /*!< IOCPF is initialized */
 162        BFA_IOCPF_INITFAIL      = 5,    /*!< IOCPF failed */
 163        BFA_IOCPF_FAIL          = 6,    /*!< IOCPF failed */
 164        BFA_IOCPF_DISABLING     = 7,    /*!< IOCPF is being disabled */
 165        BFA_IOCPF_DISABLED      = 8,    /*!< IOCPF is disabled */
 166        BFA_IOCPF_FWMISMATCH    = 9,    /*!< IOC f/w different from drivers */
 167};
 168
 169bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
 170bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
 171bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
 172bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
 173bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
 174bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
 175bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
 176bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
 177                                                enum iocpf_event);
 178bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
 179bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
 180bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
 181bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
 182bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
 183                                                enum iocpf_event);
 184bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
 185
 186static struct bfa_sm_table iocpf_sm_table[] = {
 187        {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
 188        {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
 189        {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
 190        {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
 191        {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
 192        {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
 193        {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
 194        {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
 195        {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
 196        {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
 197        {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
 198        {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
 199        {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
 200        {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
 201};
 202
 203/* IOC State Machine */
 204
 205/* Beginning state. IOC uninit state. */
 206static void
 207bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
 208{
 209}
 210
 211/* IOC is in uninit state. */
 212static void
 213bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
 214{
 215        switch (event) {
 216        case IOC_E_RESET:
 217                bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
 218                break;
 219
 220        default:
 221                bfa_sm_fault(event);
 222        }
 223}
 224
 225/* Reset entry actions -- initialize state machine */
 226static void
 227bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
 228{
 229        bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
 230}
 231
 232/* IOC is in reset state. */
 233static void
 234bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
 235{
 236        switch (event) {
 237        case IOC_E_ENABLE:
 238                bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 239                break;
 240
 241        case IOC_E_DISABLE:
 242                bfa_ioc_disable_comp(ioc);
 243                break;
 244
 245        case IOC_E_DETACH:
 246                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 247                break;
 248
 249        default:
 250                bfa_sm_fault(event);
 251        }
 252}
 253
 254static void
 255bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
 256{
 257        bfa_iocpf_enable(ioc);
 258}
 259
 260/* Host IOC function is being enabled, awaiting response from firmware.
 261 * Semaphore is acquired.
 262 */
 263static void
 264bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
 265{
 266        switch (event) {
 267        case IOC_E_ENABLED:
 268                bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 269                break;
 270
 271        case IOC_E_PFFAILED:
 272                fallthrough;
 273        case IOC_E_HWERROR:
 274                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 275                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 276                if (event != IOC_E_PFFAILED)
 277                        bfa_iocpf_initfail(ioc);
 278                break;
 279
 280        case IOC_E_HWFAILED:
 281                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 282                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 283                break;
 284
 285        case IOC_E_DISABLE:
 286                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 287                break;
 288
 289        case IOC_E_DETACH:
 290                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 291                bfa_iocpf_stop(ioc);
 292                break;
 293
 294        case IOC_E_ENABLE:
 295                break;
 296
 297        default:
 298                bfa_sm_fault(event);
 299        }
 300}
 301
 302/* Semaphore should be acquired for version check. */
 303static void
 304bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
 305{
 306        mod_timer(&ioc->ioc_timer, jiffies +
 307                msecs_to_jiffies(BFA_IOC_TOV));
 308        bfa_ioc_send_getattr(ioc);
 309}
 310
 311/* IOC configuration in progress. Timer is active. */
 312static void
 313bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
 314{
 315        switch (event) {
 316        case IOC_E_FWRSP_GETATTR:
 317                del_timer(&ioc->ioc_timer);
 318                bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 319                break;
 320
 321        case IOC_E_PFFAILED:
 322        case IOC_E_HWERROR:
 323                del_timer(&ioc->ioc_timer);
 324                fallthrough;
 325        case IOC_E_TIMEOUT:
 326                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 327                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 328                if (event != IOC_E_PFFAILED)
 329                        bfa_iocpf_getattrfail(ioc);
 330                break;
 331
 332        case IOC_E_DISABLE:
 333                del_timer(&ioc->ioc_timer);
 334                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 335                break;
 336
 337        case IOC_E_ENABLE:
 338                break;
 339
 340        default:
 341                bfa_sm_fault(event);
 342        }
 343}
 344
 345static void
 346bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
 347{
 348        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 349        bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
 350        bfa_ioc_hb_monitor(ioc);
 351}
 352
 353static void
 354bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
 355{
 356        switch (event) {
 357        case IOC_E_ENABLE:
 358                break;
 359
 360        case IOC_E_DISABLE:
 361                bfa_ioc_hb_stop(ioc);
 362                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 363                break;
 364
 365        case IOC_E_PFFAILED:
 366        case IOC_E_HWERROR:
 367                bfa_ioc_hb_stop(ioc);
 368                fallthrough;
 369
 370        case IOC_E_HBFAIL:
 371                if (ioc->iocpf.auto_recover)
 372                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
 373                else
 374                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 375
 376                bfa_ioc_fail_notify(ioc);
 377
 378                if (event != IOC_E_PFFAILED)
 379                        bfa_iocpf_fail(ioc);
 380                break;
 381
 382        default:
 383                bfa_sm_fault(event);
 384        }
 385}
 386
 387static void
 388bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
 389{
 390        bfa_iocpf_disable(ioc);
 391}
 392
 393/* IOC is being disabled */
 394static void
 395bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
 396{
 397        switch (event) {
 398        case IOC_E_DISABLED:
 399                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 400                break;
 401
 402        case IOC_E_HWERROR:
 403                /*
 404                 * No state change.  Will move to disabled state
 405                 * after iocpf sm completes failure processing and
 406                 * moves to disabled state.
 407                 */
 408                bfa_iocpf_fail(ioc);
 409                break;
 410
 411        case IOC_E_HWFAILED:
 412                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 413                bfa_ioc_disable_comp(ioc);
 414                break;
 415
 416        default:
 417                bfa_sm_fault(event);
 418        }
 419}
 420
 421/* IOC disable completion entry. */
 422static void
 423bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
 424{
 425        bfa_ioc_disable_comp(ioc);
 426}
 427
 428static void
 429bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
 430{
 431        switch (event) {
 432        case IOC_E_ENABLE:
 433                bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 434                break;
 435
 436        case IOC_E_DISABLE:
 437                ioc->cbfn->disable_cbfn(ioc->bfa);
 438                break;
 439
 440        case IOC_E_DETACH:
 441                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 442                bfa_iocpf_stop(ioc);
 443                break;
 444
 445        default:
 446                bfa_sm_fault(event);
 447        }
 448}
 449
 450static void
 451bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
 452{
 453}
 454
 455/* Hardware initialization retry. */
 456static void
 457bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
 458{
 459        switch (event) {
 460        case IOC_E_ENABLED:
 461                bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 462                break;
 463
 464        case IOC_E_PFFAILED:
 465        case IOC_E_HWERROR:
 466                /**
 467                 * Initialization retry failed.
 468                 */
 469                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 470                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 471                if (event != IOC_E_PFFAILED)
 472                        bfa_iocpf_initfail(ioc);
 473                break;
 474
 475        case IOC_E_HWFAILED:
 476                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 477                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 478                break;
 479
 480        case IOC_E_ENABLE:
 481                break;
 482
 483        case IOC_E_DISABLE:
 484                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 485                break;
 486
 487        case IOC_E_DETACH:
 488                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 489                bfa_iocpf_stop(ioc);
 490                break;
 491
 492        default:
 493                bfa_sm_fault(event);
 494        }
 495}
 496
 497static void
 498bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
 499{
 500}
 501
 502/* IOC failure. */
 503static void
 504bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
 505{
 506        switch (event) {
 507        case IOC_E_ENABLE:
 508                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 509                break;
 510
 511        case IOC_E_DISABLE:
 512                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 513                break;
 514
 515        case IOC_E_DETACH:
 516                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 517                bfa_iocpf_stop(ioc);
 518                break;
 519
 520        case IOC_E_HWERROR:
 521                /* HB failure notification, ignore. */
 522                break;
 523
 524        default:
 525                bfa_sm_fault(event);
 526        }
 527}
 528
 529static void
 530bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
 531{
 532}
 533
 534/* IOC failure. */
 535static void
 536bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
 537{
 538        switch (event) {
 539
 540        case IOC_E_ENABLE:
 541                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 542                break;
 543
 544        case IOC_E_DISABLE:
 545                ioc->cbfn->disable_cbfn(ioc->bfa);
 546                break;
 547
 548        case IOC_E_DETACH:
 549                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 550                break;
 551
 552        default:
 553                bfa_sm_fault(event);
 554        }
 555}
 556
 557/* IOCPF State Machine */
 558
 559/* Reset entry actions -- initialize state machine */
 560static void
 561bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
 562{
 563        iocpf->fw_mismatch_notified = false;
 564        iocpf->auto_recover = bfa_nw_auto_recover;
 565}
 566
 567/* Beginning state. IOC is in reset state. */
 568static void
 569bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
 570{
 571        switch (event) {
 572        case IOCPF_E_ENABLE:
 573                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 574                break;
 575
 576        case IOCPF_E_STOP:
 577                break;
 578
 579        default:
 580                bfa_sm_fault(event);
 581        }
 582}
 583
 584/* Semaphore should be acquired for version check. */
 585static void
 586bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
 587{
 588        bfa_ioc_hw_sem_init(iocpf->ioc);
 589        bfa_ioc_hw_sem_get(iocpf->ioc);
 590}
 591
 592/* Awaiting h/w semaphore to continue with version check. */
 593static void
 594bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
 595{
 596        struct bfa_ioc *ioc = iocpf->ioc;
 597
 598        switch (event) {
 599        case IOCPF_E_SEMLOCKED:
 600                if (bfa_ioc_firmware_lock(ioc)) {
 601                        if (bfa_ioc_sync_start(ioc)) {
 602                                bfa_ioc_sync_join(ioc);
 603                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 604                        } else {
 605                                bfa_ioc_firmware_unlock(ioc);
 606                                bfa_nw_ioc_hw_sem_release(ioc);
 607                                mod_timer(&ioc->sem_timer, jiffies +
 608                                        msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
 609                        }
 610                } else {
 611                        bfa_nw_ioc_hw_sem_release(ioc);
 612                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
 613                }
 614                break;
 615
 616        case IOCPF_E_SEM_ERROR:
 617                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 618                bfa_ioc_pf_hwfailed(ioc);
 619                break;
 620
 621        case IOCPF_E_DISABLE:
 622                bfa_ioc_hw_sem_get_cancel(ioc);
 623                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 624                bfa_ioc_pf_disabled(ioc);
 625                break;
 626
 627        case IOCPF_E_STOP:
 628                bfa_ioc_hw_sem_get_cancel(ioc);
 629                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 630                break;
 631
 632        default:
 633                bfa_sm_fault(event);
 634        }
 635}
 636
 637/* Notify enable completion callback */
 638static void
 639bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
 640{
 641        /* Call only the first time sm enters fwmismatch state. */
 642        if (!iocpf->fw_mismatch_notified)
 643                bfa_ioc_pf_fwmismatch(iocpf->ioc);
 644
 645        iocpf->fw_mismatch_notified = true;
 646        mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
 647                msecs_to_jiffies(BFA_IOC_TOV));
 648}
 649
 650/* Awaiting firmware version match. */
 651static void
 652bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
 653{
 654        struct bfa_ioc *ioc = iocpf->ioc;
 655
 656        switch (event) {
 657        case IOCPF_E_TIMEOUT:
 658                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 659                break;
 660
 661        case IOCPF_E_DISABLE:
 662                del_timer(&ioc->iocpf_timer);
 663                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 664                bfa_ioc_pf_disabled(ioc);
 665                break;
 666
 667        case IOCPF_E_STOP:
 668                del_timer(&ioc->iocpf_timer);
 669                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 670                break;
 671
 672        default:
 673                bfa_sm_fault(event);
 674        }
 675}
 676
 677/* Request for semaphore. */
 678static void
 679bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
 680{
 681        bfa_ioc_hw_sem_get(iocpf->ioc);
 682}
 683
 684/* Awaiting semaphore for h/w initialzation. */
 685static void
 686bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
 687{
 688        struct bfa_ioc *ioc = iocpf->ioc;
 689
 690        switch (event) {
 691        case IOCPF_E_SEMLOCKED:
 692                if (bfa_ioc_sync_complete(ioc)) {
 693                        bfa_ioc_sync_join(ioc);
 694                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 695                } else {
 696                        bfa_nw_ioc_hw_sem_release(ioc);
 697                        mod_timer(&ioc->sem_timer, jiffies +
 698                                msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
 699                }
 700                break;
 701
 702        case IOCPF_E_SEM_ERROR:
 703                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 704                bfa_ioc_pf_hwfailed(ioc);
 705                break;
 706
 707        case IOCPF_E_DISABLE:
 708                bfa_ioc_hw_sem_get_cancel(ioc);
 709                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 710                break;
 711
 712        default:
 713                bfa_sm_fault(event);
 714        }
 715}
 716
 717static void
 718bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
 719{
 720        iocpf->poll_time = 0;
 721        bfa_ioc_reset(iocpf->ioc, false);
 722}
 723
 724/* Hardware is being initialized. Interrupts are enabled.
 725 * Holding hardware semaphore lock.
 726 */
 727static void
 728bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
 729{
 730        struct bfa_ioc *ioc = iocpf->ioc;
 731
 732        switch (event) {
 733        case IOCPF_E_FWREADY:
 734                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
 735                break;
 736
 737        case IOCPF_E_TIMEOUT:
 738                bfa_nw_ioc_hw_sem_release(ioc);
 739                bfa_ioc_pf_failed(ioc);
 740                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 741                break;
 742
 743        case IOCPF_E_DISABLE:
 744                del_timer(&ioc->iocpf_timer);
 745                bfa_ioc_sync_leave(ioc);
 746                bfa_nw_ioc_hw_sem_release(ioc);
 747                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 748                break;
 749
 750        default:
 751                bfa_sm_fault(event);
 752        }
 753}
 754
 755static void
 756bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
 757{
 758        mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
 759                msecs_to_jiffies(BFA_IOC_TOV));
 760        /**
 761         * Enable Interrupts before sending fw IOC ENABLE cmd.
 762         */
 763        iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
 764        bfa_ioc_send_enable(iocpf->ioc);
 765}
 766
 767/* Host IOC function is being enabled, awaiting response from firmware.
 768 * Semaphore is acquired.
 769 */
 770static void
 771bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
 772{
 773        struct bfa_ioc *ioc = iocpf->ioc;
 774
 775        switch (event) {
 776        case IOCPF_E_FWRSP_ENABLE:
 777                del_timer(&ioc->iocpf_timer);
 778                bfa_nw_ioc_hw_sem_release(ioc);
 779                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
 780                break;
 781
 782        case IOCPF_E_INITFAIL:
 783                del_timer(&ioc->iocpf_timer);
 784                fallthrough;
 785
 786        case IOCPF_E_TIMEOUT:
 787                bfa_nw_ioc_hw_sem_release(ioc);
 788                if (event == IOCPF_E_TIMEOUT)
 789                        bfa_ioc_pf_failed(ioc);
 790                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 791                break;
 792
 793        case IOCPF_E_DISABLE:
 794                del_timer(&ioc->iocpf_timer);
 795                bfa_nw_ioc_hw_sem_release(ioc);
 796                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
 797                break;
 798
 799        default:
 800                bfa_sm_fault(event);
 801        }
 802}
 803
 804static void
 805bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
 806{
 807        bfa_ioc_pf_enabled(iocpf->ioc);
 808}
 809
 810static void
 811bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
 812{
 813        switch (event) {
 814        case IOCPF_E_DISABLE:
 815                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
 816                break;
 817
 818        case IOCPF_E_GETATTRFAIL:
 819                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 820                break;
 821
 822        case IOCPF_E_FAIL:
 823                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
 824                break;
 825
 826        default:
 827                bfa_sm_fault(event);
 828        }
 829}
 830
 831static void
 832bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
 833{
 834        mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
 835                msecs_to_jiffies(BFA_IOC_TOV));
 836        bfa_ioc_send_disable(iocpf->ioc);
 837}
 838
 839/* IOC is being disabled */
 840static void
 841bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
 842{
 843        struct bfa_ioc *ioc = iocpf->ioc;
 844
 845        switch (event) {
 846        case IOCPF_E_FWRSP_DISABLE:
 847                del_timer(&ioc->iocpf_timer);
 848                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 849                break;
 850
 851        case IOCPF_E_FAIL:
 852                del_timer(&ioc->iocpf_timer);
 853                fallthrough;
 854
 855        case IOCPF_E_TIMEOUT:
 856                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
 857                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 858                break;
 859
 860        case IOCPF_E_FWRSP_ENABLE:
 861                break;
 862
 863        default:
 864                bfa_sm_fault(event);
 865        }
 866}
 867
 868static void
 869bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
 870{
 871        bfa_ioc_hw_sem_get(iocpf->ioc);
 872}
 873
 874/* IOC hb ack request is being removed. */
 875static void
 876bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
 877{
 878        struct bfa_ioc *ioc = iocpf->ioc;
 879
 880        switch (event) {
 881        case IOCPF_E_SEMLOCKED:
 882                bfa_ioc_sync_leave(ioc);
 883                bfa_nw_ioc_hw_sem_release(ioc);
 884                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 885                break;
 886
 887        case IOCPF_E_SEM_ERROR:
 888                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 889                bfa_ioc_pf_hwfailed(ioc);
 890                break;
 891
 892        case IOCPF_E_FAIL:
 893                break;
 894
 895        default:
 896                bfa_sm_fault(event);
 897        }
 898}
 899
 900/* IOC disable completion entry. */
 901static void
 902bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
 903{
 904        bfa_ioc_mbox_flush(iocpf->ioc);
 905        bfa_ioc_pf_disabled(iocpf->ioc);
 906}
 907
 908static void
 909bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
 910{
 911        struct bfa_ioc *ioc = iocpf->ioc;
 912
 913        switch (event) {
 914        case IOCPF_E_ENABLE:
 915                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
 916                break;
 917
 918        case IOCPF_E_STOP:
 919                bfa_ioc_firmware_unlock(ioc);
 920                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 921                break;
 922
 923        default:
 924                bfa_sm_fault(event);
 925        }
 926}
 927
 928static void
 929bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
 930{
 931        bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
 932        bfa_ioc_hw_sem_get(iocpf->ioc);
 933}
 934
 935/* Hardware initialization failed. */
 936static void
 937bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
 938{
 939        struct bfa_ioc *ioc = iocpf->ioc;
 940
 941        switch (event) {
 942        case IOCPF_E_SEMLOCKED:
 943                bfa_ioc_notify_fail(ioc);
 944                bfa_ioc_sync_leave(ioc);
 945                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
 946                bfa_nw_ioc_hw_sem_release(ioc);
 947                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
 948                break;
 949
 950        case IOCPF_E_SEM_ERROR:
 951                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 952                bfa_ioc_pf_hwfailed(ioc);
 953                break;
 954
 955        case IOCPF_E_DISABLE:
 956                bfa_ioc_hw_sem_get_cancel(ioc);
 957                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 958                break;
 959
 960        case IOCPF_E_STOP:
 961                bfa_ioc_hw_sem_get_cancel(ioc);
 962                bfa_ioc_firmware_unlock(ioc);
 963                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 964                break;
 965
 966        case IOCPF_E_FAIL:
 967                break;
 968
 969        default:
 970                bfa_sm_fault(event);
 971        }
 972}
 973
 974static void
 975bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
 976{
 977}
 978
 979/* Hardware initialization failed. */
 980static void
 981bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
 982{
 983        struct bfa_ioc *ioc = iocpf->ioc;
 984
 985        switch (event) {
 986        case IOCPF_E_DISABLE:
 987                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 988                break;
 989
 990        case IOCPF_E_STOP:
 991                bfa_ioc_firmware_unlock(ioc);
 992                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 993                break;
 994
 995        default:
 996                bfa_sm_fault(event);
 997        }
 998}
 999
1000static void
1001bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1002{
1003        /**
1004         * Mark IOC as failed in hardware and stop firmware.
1005         */
1006        bfa_ioc_lpu_stop(iocpf->ioc);
1007
1008        /**
1009         * Flush any queued up mailbox requests.
1010         */
1011        bfa_ioc_mbox_flush(iocpf->ioc);
1012        bfa_ioc_hw_sem_get(iocpf->ioc);
1013}
1014
1015/* IOC is in failed state. */
1016static void
1017bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1018{
1019        struct bfa_ioc *ioc = iocpf->ioc;
1020
1021        switch (event) {
1022        case IOCPF_E_SEMLOCKED:
1023                bfa_ioc_sync_ack(ioc);
1024                bfa_ioc_notify_fail(ioc);
1025                if (!iocpf->auto_recover) {
1026                        bfa_ioc_sync_leave(ioc);
1027                        bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1028                        bfa_nw_ioc_hw_sem_release(ioc);
1029                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1030                } else {
1031                        if (bfa_ioc_sync_complete(ioc))
1032                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1033                        else {
1034                                bfa_nw_ioc_hw_sem_release(ioc);
1035                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1036                        }
1037                }
1038                break;
1039
1040        case IOCPF_E_SEM_ERROR:
1041                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1042                bfa_ioc_pf_hwfailed(ioc);
1043                break;
1044
1045        case IOCPF_E_DISABLE:
1046                bfa_ioc_hw_sem_get_cancel(ioc);
1047                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1048                break;
1049
1050        case IOCPF_E_FAIL:
1051                break;
1052
1053        default:
1054                bfa_sm_fault(event);
1055        }
1056}
1057
1058static void
1059bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1060{
1061}
1062
1063/* IOC is in failed state. */
1064static void
1065bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1066{
1067        switch (event) {
1068        case IOCPF_E_DISABLE:
1069                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1070                break;
1071
1072        default:
1073                bfa_sm_fault(event);
1074        }
1075}
1076
1077/* BFA IOC private functions */
1078
1079/* Notify common modules registered for notification. */
1080static void
1081bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1082{
1083        struct bfa_ioc_notify *notify;
1084
1085        list_for_each_entry(notify, &ioc->notify_q, qe)
1086                notify->cbfn(notify->cbarg, event);
1087}
1088
1089static void
1090bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1091{
1092        ioc->cbfn->disable_cbfn(ioc->bfa);
1093        bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1094}
1095
1096bool
1097bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1098{
1099        u32 r32;
1100        int cnt = 0;
1101#define BFA_SEM_SPINCNT 3000
1102
1103        r32 = readl(sem_reg);
1104
1105        while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1106                cnt++;
1107                udelay(2);
1108                r32 = readl(sem_reg);
1109        }
1110
1111        if (!(r32 & 1))
1112                return true;
1113
1114        return false;
1115}
1116
1117void
1118bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1119{
1120        readl(sem_reg);
1121        writel(1, sem_reg);
1122}
1123
1124/* Clear fwver hdr */
1125static void
1126bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1127{
1128        u32 pgnum, loff = 0;
1129        int i;
1130
1131        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1132        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1133
1134        for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1135                writel(0, ioc->ioc_regs.smem_page_start + loff);
1136                loff += sizeof(u32);
1137        }
1138}
1139
1140
1141static void
1142bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1143{
1144        struct bfi_ioc_image_hdr fwhdr;
1145        u32 fwstate, r32;
1146
1147        /* Spin on init semaphore to serialize. */
1148        r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1149        while (r32 & 0x1) {
1150                udelay(20);
1151                r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1152        }
1153
1154        fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1155        if (fwstate == BFI_IOC_UNINIT) {
1156                writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1157                return;
1158        }
1159
1160        bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1161
1162        if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1163                writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1164                return;
1165        }
1166
1167        bfa_ioc_fwver_clear(ioc);
1168        bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
1169        bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
1170
1171        /*
1172         * Try to lock and then unlock the semaphore.
1173         */
1174        readl(ioc->ioc_regs.ioc_sem_reg);
1175        writel(1, ioc->ioc_regs.ioc_sem_reg);
1176
1177        /* Unlock init semaphore */
1178        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1179}
1180
1181static void
1182bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1183{
1184        u32     r32;
1185
1186        /**
1187         * First read to the semaphore register will return 0, subsequent reads
1188         * will return 1. Semaphore is released by writing 1 to the register
1189         */
1190        r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1191        if (r32 == ~0) {
1192                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1193                return;
1194        }
1195        if (!(r32 & 1)) {
1196                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1197                return;
1198        }
1199
1200        mod_timer(&ioc->sem_timer, jiffies +
1201                msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1202}
1203
1204void
1205bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1206{
1207        writel(1, ioc->ioc_regs.ioc_sem_reg);
1208}
1209
1210static void
1211bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1212{
1213        del_timer(&ioc->sem_timer);
1214}
1215
1216/* Initialize LPU local memory (aka secondary memory / SRAM) */
1217static void
1218bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1219{
1220        u32     pss_ctl;
1221        int             i;
1222#define PSS_LMEM_INIT_TIME  10000
1223
1224        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1225        pss_ctl &= ~__PSS_LMEM_RESET;
1226        pss_ctl |= __PSS_LMEM_INIT_EN;
1227
1228        /*
1229         * i2c workaround 12.5khz clock
1230         */
1231        pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1232        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1233
1234        /**
1235         * wait for memory initialization to be complete
1236         */
1237        i = 0;
1238        do {
1239                pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1240                i++;
1241        } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1242
1243        /**
1244         * If memory initialization is not successful, IOC timeout will catch
1245         * such failures.
1246         */
1247        BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1248
1249        pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1250        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1251}
1252
1253static void
1254bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1255{
1256        u32     pss_ctl;
1257
1258        /**
1259         * Take processor out of reset.
1260         */
1261        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1262        pss_ctl &= ~__PSS_LPU0_RESET;
1263
1264        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1265}
1266
1267static void
1268bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1269{
1270        u32     pss_ctl;
1271
1272        /**
1273         * Put processors in reset.
1274         */
1275        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1276        pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1277
1278        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1279}
1280
1281/* Get driver and firmware versions. */
1282void
1283bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1284{
1285        u32     pgnum;
1286        u32     loff = 0;
1287        int             i;
1288        u32     *fwsig = (u32 *) fwhdr;
1289
1290        pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1291        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1292
1293        for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1294             i++) {
1295                fwsig[i] =
1296                        swab32(readl(loff + ioc->ioc_regs.smem_page_start));
1297                loff += sizeof(u32);
1298        }
1299}
1300
1301static bool
1302bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
1303                        struct bfi_ioc_image_hdr *fwhdr_2)
1304{
1305        int i;
1306
1307        for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1308                if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1309                        return false;
1310        }
1311
1312        return true;
1313}
1314
1315/* Returns TRUE if major minor and maintenance are same.
1316 * If patch version are same, check for MD5 Checksum to be same.
1317 */
1318static bool
1319bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr,
1320                          struct bfi_ioc_image_hdr *fwhdr_to_cmp)
1321{
1322        if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1323                return false;
1324        if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1325                return false;
1326        if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1327                return false;
1328        if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1329                return false;
1330        if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1331            drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1332            drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build)
1333                return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1334
1335        return true;
1336}
1337
1338static bool
1339bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr)
1340{
1341        if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1342                return false;
1343
1344        return true;
1345}
1346
1347static bool
1348fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr)
1349{
1350        if (fwhdr->fwver.phase == 0 &&
1351            fwhdr->fwver.build == 0)
1352                return false;
1353
1354        return true;
1355}
1356
1357/* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
1358static enum bfi_ioc_img_ver_cmp
1359bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
1360                         struct bfi_ioc_image_hdr *fwhdr_to_cmp)
1361{
1362        if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp))
1363                return BFI_IOC_IMG_VER_INCOMP;
1364
1365        if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1366                return BFI_IOC_IMG_VER_BETTER;
1367        else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1368                return BFI_IOC_IMG_VER_OLD;
1369
1370        /* GA takes priority over internal builds of the same patch stream.
1371         * At this point major minor maint and patch numbers are same.
1372         */
1373        if (fwhdr_is_ga(base_fwhdr))
1374                if (fwhdr_is_ga(fwhdr_to_cmp))
1375                        return BFI_IOC_IMG_VER_SAME;
1376                else
1377                        return BFI_IOC_IMG_VER_OLD;
1378        else
1379                if (fwhdr_is_ga(fwhdr_to_cmp))
1380                        return BFI_IOC_IMG_VER_BETTER;
1381
1382        if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1383                return BFI_IOC_IMG_VER_BETTER;
1384        else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1385                return BFI_IOC_IMG_VER_OLD;
1386
1387        if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1388                return BFI_IOC_IMG_VER_BETTER;
1389        else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1390                return BFI_IOC_IMG_VER_OLD;
1391
1392        /* All Version Numbers are equal.
1393         * Md5 check to be done as a part of compatibility check.
1394         */
1395        return BFI_IOC_IMG_VER_SAME;
1396}
1397
1398/* register definitions */
1399#define FLI_CMD_REG                     0x0001d000
1400#define FLI_WRDATA_REG                  0x0001d00c
1401#define FLI_RDDATA_REG                  0x0001d010
1402#define FLI_ADDR_REG                    0x0001d004
1403#define FLI_DEV_STATUS_REG              0x0001d014
1404
1405#define BFA_FLASH_FIFO_SIZE             128     /* fifo size */
1406#define BFA_FLASH_CHECK_MAX             10000   /* max # of status check */
1407#define BFA_FLASH_BLOCKING_OP_MAX       1000000 /* max # of blocking op check */
1408#define BFA_FLASH_WIP_MASK              0x01    /* write in progress bit mask */
1409
1410#define NFC_STATE_RUNNING               0x20000001
1411#define NFC_STATE_PAUSED                0x00004560
1412#define NFC_VER_VALID                   0x147
1413
1414enum bfa_flash_cmd {
1415        BFA_FLASH_FAST_READ     = 0x0b, /* fast read */
1416        BFA_FLASH_WRITE_ENABLE  = 0x06, /* write enable */
1417        BFA_FLASH_SECTOR_ERASE  = 0xd8, /* sector erase */
1418        BFA_FLASH_WRITE         = 0x02, /* write */
1419        BFA_FLASH_READ_STATUS   = 0x05, /* read status */
1420};
1421
1422/* hardware error definition */
1423enum bfa_flash_err {
1424        BFA_FLASH_NOT_PRESENT   = -1,   /*!< flash not present */
1425        BFA_FLASH_UNINIT        = -2,   /*!< flash not initialized */
1426        BFA_FLASH_BAD           = -3,   /*!< flash bad */
1427        BFA_FLASH_BUSY          = -4,   /*!< flash busy */
1428        BFA_FLASH_ERR_CMD_ACT   = -5,   /*!< command active never cleared */
1429        BFA_FLASH_ERR_FIFO_CNT  = -6,   /*!< fifo count never cleared */
1430        BFA_FLASH_ERR_WIP       = -7,   /*!< write-in-progress never cleared */
1431        BFA_FLASH_ERR_TIMEOUT   = -8,   /*!< fli timeout */
1432        BFA_FLASH_ERR_LEN       = -9,   /*!< invalid length */
1433};
1434
1435/* flash command register data structure */
1436union bfa_flash_cmd_reg {
1437        struct {
1438#ifdef __BIG_ENDIAN
1439                u32     act:1;
1440                u32     rsv:1;
1441                u32     write_cnt:9;
1442                u32     read_cnt:9;
1443                u32     addr_cnt:4;
1444                u32     cmd:8;
1445#else
1446                u32     cmd:8;
1447                u32     addr_cnt:4;
1448                u32     read_cnt:9;
1449                u32     write_cnt:9;
1450                u32     rsv:1;
1451                u32     act:1;
1452#endif
1453        } r;
1454        u32     i;
1455};
1456
1457/* flash device status register data structure */
1458union bfa_flash_dev_status_reg {
1459        struct {
1460#ifdef __BIG_ENDIAN
1461                u32     rsv:21;
1462                u32     fifo_cnt:6;
1463                u32     busy:1;
1464                u32     init_status:1;
1465                u32     present:1;
1466                u32     bad:1;
1467                u32     good:1;
1468#else
1469                u32     good:1;
1470                u32     bad:1;
1471                u32     present:1;
1472                u32     init_status:1;
1473                u32     busy:1;
1474                u32     fifo_cnt:6;
1475                u32     rsv:21;
1476#endif
1477        } r;
1478        u32     i;
1479};
1480
1481/* flash address register data structure */
1482union bfa_flash_addr_reg {
1483        struct {
1484#ifdef __BIG_ENDIAN
1485                u32     addr:24;
1486                u32     dummy:8;
1487#else
1488                u32     dummy:8;
1489                u32     addr:24;
1490#endif
1491        } r;
1492        u32     i;
1493};
1494
1495/* Flash raw private functions */
1496static void
1497bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
1498                  u8 rd_cnt, u8 ad_cnt, u8 op)
1499{
1500        union bfa_flash_cmd_reg cmd;
1501
1502        cmd.i = 0;
1503        cmd.r.act = 1;
1504        cmd.r.write_cnt = wr_cnt;
1505        cmd.r.read_cnt = rd_cnt;
1506        cmd.r.addr_cnt = ad_cnt;
1507        cmd.r.cmd = op;
1508        writel(cmd.i, (pci_bar + FLI_CMD_REG));
1509}
1510
1511static void
1512bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
1513{
1514        union bfa_flash_addr_reg addr;
1515
1516        addr.r.addr = address & 0x00ffffff;
1517        addr.r.dummy = 0;
1518        writel(addr.i, (pci_bar + FLI_ADDR_REG));
1519}
1520
1521static int
1522bfa_flash_cmd_act_check(void __iomem *pci_bar)
1523{
1524        union bfa_flash_cmd_reg cmd;
1525
1526        cmd.i = readl(pci_bar + FLI_CMD_REG);
1527
1528        if (cmd.r.act)
1529                return BFA_FLASH_ERR_CMD_ACT;
1530
1531        return 0;
1532}
1533
1534/* Flush FLI data fifo. */
1535static int
1536bfa_flash_fifo_flush(void __iomem *pci_bar)
1537{
1538        u32 i;
1539        union bfa_flash_dev_status_reg dev_status;
1540
1541        dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1542
1543        if (!dev_status.r.fifo_cnt)
1544                return 0;
1545
1546        /* fifo counter in terms of words */
1547        for (i = 0; i < dev_status.r.fifo_cnt; i++)
1548                readl(pci_bar + FLI_RDDATA_REG);
1549
1550        /* Check the device status. It may take some time. */
1551        for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
1552                dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1553                if (!dev_status.r.fifo_cnt)
1554                        break;
1555        }
1556
1557        if (dev_status.r.fifo_cnt)
1558                return BFA_FLASH_ERR_FIFO_CNT;
1559
1560        return 0;
1561}
1562
1563/* Read flash status. */
1564static int
1565bfa_flash_status_read(void __iomem *pci_bar)
1566{
1567        union bfa_flash_dev_status_reg  dev_status;
1568        int                             status;
1569        u32                     ret_status;
1570        int                             i;
1571
1572        status = bfa_flash_fifo_flush(pci_bar);
1573        if (status < 0)
1574                return status;
1575
1576        bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
1577
1578        for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
1579                status = bfa_flash_cmd_act_check(pci_bar);
1580                if (!status)
1581                        break;
1582        }
1583
1584        if (status)
1585                return status;
1586
1587        dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1588        if (!dev_status.r.fifo_cnt)
1589                return BFA_FLASH_BUSY;
1590
1591        ret_status = readl(pci_bar + FLI_RDDATA_REG);
1592        ret_status >>= 24;
1593
1594        status = bfa_flash_fifo_flush(pci_bar);
1595        if (status < 0)
1596                return status;
1597
1598        return ret_status;
1599}
1600
1601/* Start flash read operation. */
1602static int
1603bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
1604                     char *buf)
1605{
1606        int status;
1607
1608        /* len must be mutiple of 4 and not exceeding fifo size */
1609        if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
1610                return BFA_FLASH_ERR_LEN;
1611
1612        /* check status */
1613        status = bfa_flash_status_read(pci_bar);
1614        if (status == BFA_FLASH_BUSY)
1615                status = bfa_flash_status_read(pci_bar);
1616
1617        if (status < 0)
1618                return status;
1619
1620        /* check if write-in-progress bit is cleared */
1621        if (status & BFA_FLASH_WIP_MASK)
1622                return BFA_FLASH_ERR_WIP;
1623
1624        bfa_flash_set_addr(pci_bar, offset);
1625
1626        bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
1627
1628        return 0;
1629}
1630
1631/* Check flash read operation. */
1632static u32
1633bfa_flash_read_check(void __iomem *pci_bar)
1634{
1635        if (bfa_flash_cmd_act_check(pci_bar))
1636                return 1;
1637
1638        return 0;
1639}
1640
1641/* End flash read operation. */
1642static void
1643bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
1644{
1645        u32 i;
1646
1647        /* read data fifo up to 32 words */
1648        for (i = 0; i < len; i += 4) {
1649                u32 w = readl(pci_bar + FLI_RDDATA_REG);
1650                *((u32 *)(buf + i)) = swab32(w);
1651        }
1652
1653        bfa_flash_fifo_flush(pci_bar);
1654}
1655
1656/* Perform flash raw read. */
1657
1658#define FLASH_BLOCKING_OP_MAX   500
1659#define FLASH_SEM_LOCK_REG      0x18820
1660
1661static int
1662bfa_raw_sem_get(void __iomem *bar)
1663{
1664        int     locked;
1665
1666        locked = readl(bar + FLASH_SEM_LOCK_REG);
1667
1668        return !locked;
1669}
1670
1671static enum bfa_status
1672bfa_flash_sem_get(void __iomem *bar)
1673{
1674        u32 n = FLASH_BLOCKING_OP_MAX;
1675
1676        while (!bfa_raw_sem_get(bar)) {
1677                if (--n <= 0)
1678                        return BFA_STATUS_BADFLASH;
1679                mdelay(10);
1680        }
1681        return BFA_STATUS_OK;
1682}
1683
1684static void
1685bfa_flash_sem_put(void __iomem *bar)
1686{
1687        writel(0, (bar + FLASH_SEM_LOCK_REG));
1688}
1689
1690static enum bfa_status
1691bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
1692                   u32 len)
1693{
1694        u32 n;
1695        int status;
1696        u32 off, l, s, residue, fifo_sz;
1697
1698        residue = len;
1699        off = 0;
1700        fifo_sz = BFA_FLASH_FIFO_SIZE;
1701        status = bfa_flash_sem_get(pci_bar);
1702        if (status != BFA_STATUS_OK)
1703                return status;
1704
1705        while (residue) {
1706                s = offset + off;
1707                n = s / fifo_sz;
1708                l = (n + 1) * fifo_sz - s;
1709                if (l > residue)
1710                        l = residue;
1711
1712                status = bfa_flash_read_start(pci_bar, offset + off, l,
1713                                                                &buf[off]);
1714                if (status < 0) {
1715                        bfa_flash_sem_put(pci_bar);
1716                        return BFA_STATUS_FAILED;
1717                }
1718
1719                n = BFA_FLASH_BLOCKING_OP_MAX;
1720                while (bfa_flash_read_check(pci_bar)) {
1721                        if (--n <= 0) {
1722                                bfa_flash_sem_put(pci_bar);
1723                                return BFA_STATUS_FAILED;
1724                        }
1725                }
1726
1727                bfa_flash_read_end(pci_bar, l, &buf[off]);
1728
1729                residue -= l;
1730                off += l;
1731        }
1732        bfa_flash_sem_put(pci_bar);
1733
1734        return BFA_STATUS_OK;
1735}
1736
1737#define BFA_FLASH_PART_FWIMG_ADDR       0x100000 /* fw image address */
1738
1739static enum bfa_status
1740bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off,
1741                              u32 *fwimg)
1742{
1743        return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1744                        BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1745                        (char *)fwimg, BFI_FLASH_CHUNK_SZ);
1746}
1747
1748static enum bfi_ioc_img_ver_cmp
1749bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
1750                        struct bfi_ioc_image_hdr *base_fwhdr)
1751{
1752        struct bfi_ioc_image_hdr *flash_fwhdr;
1753        enum bfa_status status;
1754        u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1755
1756        status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1757        if (status != BFA_STATUS_OK)
1758                return BFI_IOC_IMG_VER_INCOMP;
1759
1760        flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg;
1761        if (bfa_ioc_flash_fwver_valid(flash_fwhdr))
1762                return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1763        else
1764                return BFI_IOC_IMG_VER_INCOMP;
1765}
1766
1767/*
1768 * Returns TRUE if driver is willing to work with current smem f/w version.
1769 */
1770bool
1771bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1772{
1773        struct bfi_ioc_image_hdr *drv_fwhdr;
1774        enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp;
1775
1776        drv_fwhdr = (struct bfi_ioc_image_hdr *)
1777                bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1778
1779        /* If smem is incompatible or old, driver should not work with it. */
1780        drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr);
1781        if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1782            drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1783                return false;
1784        }
1785
1786        /* IF Flash has a better F/W than smem do not work with smem.
1787         * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1788         * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1789         */
1790        smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr);
1791
1792        if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER)
1793                return false;
1794        else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME)
1795                return true;
1796        else
1797                return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1798                        true : false;
1799}
1800
1801/* Return true if current running version is valid. Firmware signature and
1802 * execution context (driver/bios) must match.
1803 */
1804static bool
1805bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1806{
1807        struct bfi_ioc_image_hdr fwhdr;
1808
1809        bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1810        if (swab32(fwhdr.bootenv) != boot_env)
1811                return false;
1812
1813        return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1814}
1815
1816/* Conditionally flush any pending message from firmware at start. */
1817static void
1818bfa_ioc_msgflush(struct bfa_ioc *ioc)
1819{
1820        u32     r32;
1821
1822        r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1823        if (r32)
1824                writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1825}
1826
1827static void
1828bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1829{
1830        enum bfi_ioc_state ioc_fwstate;
1831        bool fwvalid;
1832        u32 boot_env;
1833
1834        ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1835
1836        if (force)
1837                ioc_fwstate = BFI_IOC_UNINIT;
1838
1839        boot_env = BFI_FWBOOT_ENV_OS;
1840
1841        /**
1842         * check if firmware is valid
1843         */
1844        fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1845                false : bfa_ioc_fwver_valid(ioc, boot_env);
1846
1847        if (!fwvalid) {
1848                if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
1849                                                                BFA_STATUS_OK)
1850                        bfa_ioc_poll_fwinit(ioc);
1851
1852                return;
1853        }
1854
1855        /**
1856         * If hardware initialization is in progress (initialized by other IOC),
1857         * just wait for an initialization completion interrupt.
1858         */
1859        if (ioc_fwstate == BFI_IOC_INITING) {
1860                bfa_ioc_poll_fwinit(ioc);
1861                return;
1862        }
1863
1864        /**
1865         * If IOC function is disabled and firmware version is same,
1866         * just re-enable IOC.
1867         */
1868        if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1869                /**
1870                 * When using MSI-X any pending firmware ready event should
1871                 * be flushed. Otherwise MSI-X interrupts are not delivered.
1872                 */
1873                bfa_ioc_msgflush(ioc);
1874                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1875                return;
1876        }
1877
1878        /**
1879         * Initialize the h/w for any other states.
1880         */
1881        if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
1882                                                        BFA_STATUS_OK)
1883                bfa_ioc_poll_fwinit(ioc);
1884}
1885
1886void
1887bfa_nw_ioc_timeout(struct bfa_ioc *ioc)
1888{
1889        bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1890}
1891
1892static void
1893bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1894{
1895        u32 *msgp = (u32 *) ioc_msg;
1896        u32 i;
1897
1898        BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1899
1900        /*
1901         * first write msg to mailbox registers
1902         */
1903        for (i = 0; i < len / sizeof(u32); i++)
1904                writel(cpu_to_le32(msgp[i]),
1905                              ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1906
1907        for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1908                writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1909
1910        /*
1911         * write 1 to mailbox CMD to trigger LPU event
1912         */
1913        writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1914        (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1915}
1916
1917static void
1918bfa_ioc_send_enable(struct bfa_ioc *ioc)
1919{
1920        struct bfi_ioc_ctrl_req enable_req;
1921
1922        bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1923                    bfa_ioc_portid(ioc));
1924        enable_req.clscode = htons(ioc->clscode);
1925        enable_req.rsvd = htons(0);
1926        /* overflow in 2106 */
1927        enable_req.tv_sec = ntohl(ktime_get_real_seconds());
1928        bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1929}
1930
1931static void
1932bfa_ioc_send_disable(struct bfa_ioc *ioc)
1933{
1934        struct bfi_ioc_ctrl_req disable_req;
1935
1936        bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1937                    bfa_ioc_portid(ioc));
1938        disable_req.clscode = htons(ioc->clscode);
1939        disable_req.rsvd = htons(0);
1940        /* overflow in 2106 */
1941        disable_req.tv_sec = ntohl(ktime_get_real_seconds());
1942        bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1943}
1944
1945static void
1946bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1947{
1948        struct bfi_ioc_getattr_req attr_req;
1949
1950        bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1951                    bfa_ioc_portid(ioc));
1952        bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1953        bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1954}
1955
1956void
1957bfa_nw_ioc_hb_check(struct bfa_ioc *ioc)
1958{
1959        u32 hb_count;
1960
1961        hb_count = readl(ioc->ioc_regs.heartbeat);
1962        if (ioc->hb_count == hb_count) {
1963                bfa_ioc_recover(ioc);
1964                return;
1965        } else {
1966                ioc->hb_count = hb_count;
1967        }
1968
1969        bfa_ioc_mbox_poll(ioc);
1970        mod_timer(&ioc->hb_timer, jiffies +
1971                msecs_to_jiffies(BFA_IOC_HB_TOV));
1972}
1973
1974static void
1975bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1976{
1977        ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1978        mod_timer(&ioc->hb_timer, jiffies +
1979                msecs_to_jiffies(BFA_IOC_HB_TOV));
1980}
1981
1982static void
1983bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1984{
1985        del_timer(&ioc->hb_timer);
1986}
1987
1988/* Initiate a full firmware download. */
1989static enum bfa_status
1990bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1991                    u32 boot_env)
1992{
1993        u32 *fwimg;
1994        u32 pgnum;
1995        u32 loff = 0;
1996        u32 chunkno = 0;
1997        u32 i;
1998        u32 asicmode;
1999        u32 fwimg_size;
2000        u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
2001        enum bfa_status status;
2002
2003        if (boot_env == BFI_FWBOOT_ENV_OS &&
2004            boot_type == BFI_FWBOOT_TYPE_FLASH) {
2005                fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
2006
2007                status = bfa_nw_ioc_flash_img_get_chnk(ioc,
2008                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
2009                if (status != BFA_STATUS_OK)
2010                        return status;
2011
2012                fwimg = fwimg_buf;
2013        } else {
2014                fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
2015                fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
2016                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
2017        }
2018
2019        pgnum = bfa_ioc_smem_pgnum(ioc, loff);
2020
2021        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2022
2023        for (i = 0; i < fwimg_size; i++) {
2024                if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
2025                        chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
2026                        if (boot_env == BFI_FWBOOT_ENV_OS &&
2027                            boot_type == BFI_FWBOOT_TYPE_FLASH) {
2028                                status = bfa_nw_ioc_flash_img_get_chnk(ioc,
2029                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
2030                                        fwimg_buf);
2031                                if (status != BFA_STATUS_OK)
2032                                        return status;
2033
2034                                fwimg = fwimg_buf;
2035                        } else {
2036                                fwimg = bfa_cb_image_get_chunk(
2037                                        bfa_ioc_asic_gen(ioc),
2038                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
2039                        }
2040                }
2041
2042                /**
2043                 * write smem
2044                 */
2045                writel(swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]),
2046                       ioc->ioc_regs.smem_page_start + loff);
2047
2048                loff += sizeof(u32);
2049
2050                /**
2051                 * handle page offset wrap around
2052                 */
2053                loff = PSS_SMEM_PGOFF(loff);
2054                if (loff == 0) {
2055                        pgnum++;
2056                        writel(pgnum,
2057                                      ioc->ioc_regs.host_page_num_fn);
2058                }
2059        }
2060
2061        writel(bfa_ioc_smem_pgnum(ioc, 0),
2062                      ioc->ioc_regs.host_page_num_fn);
2063
2064        /*
2065         * Set boot type, env and device mode at the end.
2066        */
2067        if (boot_env == BFI_FWBOOT_ENV_OS &&
2068            boot_type == BFI_FWBOOT_TYPE_FLASH) {
2069                boot_type = BFI_FWBOOT_TYPE_NORMAL;
2070        }
2071        asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
2072                                        ioc->port0_mode, ioc->port1_mode);
2073        writel(asicmode, ((ioc->ioc_regs.smem_page_start)
2074                        + BFI_FWBOOT_DEVMODE_OFF));
2075        writel(boot_type, ((ioc->ioc_regs.smem_page_start)
2076                        + (BFI_FWBOOT_TYPE_OFF)));
2077        writel(boot_env, ((ioc->ioc_regs.smem_page_start)
2078                        + (BFI_FWBOOT_ENV_OFF)));
2079        return BFA_STATUS_OK;
2080}
2081
2082static void
2083bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
2084{
2085        bfa_ioc_hwinit(ioc, force);
2086}
2087
2088/* BFA ioc enable reply by firmware */
2089static void
2090bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
2091                        u8 cap_bm)
2092{
2093        struct bfa_iocpf *iocpf = &ioc->iocpf;
2094
2095        ioc->port_mode = ioc->port_mode_cfg = port_mode;
2096        ioc->ad_cap_bm = cap_bm;
2097        bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2098}
2099
2100/* Update BFA configuration from firmware configuration. */
2101static void
2102bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
2103{
2104        struct bfi_ioc_attr *attr = ioc->attr;
2105
2106        attr->adapter_prop  = ntohl(attr->adapter_prop);
2107        attr->card_type     = ntohl(attr->card_type);
2108        attr->maxfrsize     = ntohs(attr->maxfrsize);
2109
2110        bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
2111}
2112
2113/* Attach time initialization of mbox logic. */
2114static void
2115bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
2116{
2117        struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2118        int     mc;
2119
2120        INIT_LIST_HEAD(&mod->cmd_q);
2121        for (mc = 0; mc < BFI_MC_MAX; mc++) {
2122                mod->mbhdlr[mc].cbfn = NULL;
2123                mod->mbhdlr[mc].cbarg = ioc->bfa;
2124        }
2125}
2126
2127/* Mbox poll timer -- restarts any pending mailbox requests. */
2128static void
2129bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
2130{
2131        struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2132        struct bfa_mbox_cmd *cmd;
2133        bfa_mbox_cmd_cbfn_t cbfn;
2134        void *cbarg;
2135        u32 stat;
2136
2137        /**
2138         * If no command pending, do nothing
2139         */
2140        if (list_empty(&mod->cmd_q))
2141                return;
2142
2143        /**
2144         * If previous command is not yet fetched by firmware, do nothing
2145         */
2146        stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2147        if (stat)
2148                return;
2149
2150        /**
2151         * Enqueue command to firmware.
2152         */
2153        cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
2154        list_del(&cmd->qe);
2155        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2156
2157        /**
2158         * Give a callback to the client, indicating that the command is sent
2159         */
2160        if (cmd->cbfn) {
2161                cbfn = cmd->cbfn;
2162                cbarg = cmd->cbarg;
2163                cmd->cbfn = NULL;
2164                cbfn(cbarg);
2165        }
2166}
2167
2168/* Cleanup any pending requests. */
2169static void
2170bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
2171{
2172        struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2173        struct bfa_mbox_cmd *cmd;
2174
2175        while (!list_empty(&mod->cmd_q)) {
2176                cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
2177                list_del(&cmd->qe);
2178        }
2179}
2180
2181/**
2182 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
2183 *
2184 * @ioc:     memory for IOC
2185 * @tbuf:    app memory to store data from smem
2186 * @soff:    smem offset
2187 * @sz:      size of smem in bytes
2188 */
2189static int
2190bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
2191{
2192        u32 pgnum, loff, r32;
2193        int i, len;
2194        u32 *buf = tbuf;
2195
2196        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2197        loff = PSS_SMEM_PGOFF(soff);
2198
2199        /*
2200         *  Hold semaphore to serialize pll init and fwtrc.
2201        */
2202        if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
2203                return 1;
2204
2205        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2206
2207        len = sz/sizeof(u32);
2208        for (i = 0; i < len; i++) {
2209                r32 = swab32(readl(loff + ioc->ioc_regs.smem_page_start));
2210                buf[i] = be32_to_cpu(r32);
2211                loff += sizeof(u32);
2212
2213                /**
2214                 * handle page offset wrap around
2215                 */
2216                loff = PSS_SMEM_PGOFF(loff);
2217                if (loff == 0) {
2218                        pgnum++;
2219                        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2220                }
2221        }
2222
2223        writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2224               ioc->ioc_regs.host_page_num_fn);
2225
2226        /*
2227         * release semaphore
2228         */
2229        readl(ioc->ioc_regs.ioc_init_sem_reg);
2230        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2231        return 0;
2232}
2233
2234/* Retrieve saved firmware trace from a prior IOC failure. */
2235int
2236bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
2237{
2238        u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
2239        int tlen, status = 0;
2240
2241        tlen = *trclen;
2242        if (tlen > BNA_DBG_FWTRC_LEN)
2243                tlen = BNA_DBG_FWTRC_LEN;
2244
2245        status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
2246        *trclen = tlen;
2247        return status;
2248}
2249
2250/* Save firmware trace if configured. */
2251static void
2252bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
2253{
2254        int tlen;
2255
2256        if (ioc->dbg_fwsave_once) {
2257                ioc->dbg_fwsave_once = false;
2258                if (ioc->dbg_fwsave_len) {
2259                        tlen = ioc->dbg_fwsave_len;
2260                        bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2261                }
2262        }
2263}
2264
2265/* Retrieve saved firmware trace from a prior IOC failure. */
2266int
2267bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
2268{
2269        int tlen;
2270
2271        if (ioc->dbg_fwsave_len == 0)
2272                return BFA_STATUS_ENOFSAVE;
2273
2274        tlen = *trclen;
2275        if (tlen > ioc->dbg_fwsave_len)
2276                tlen = ioc->dbg_fwsave_len;
2277
2278        memcpy(trcdata, ioc->dbg_fwsave, tlen);
2279        *trclen = tlen;
2280        return BFA_STATUS_OK;
2281}
2282
2283static void
2284bfa_ioc_fail_notify(struct bfa_ioc *ioc)
2285{
2286        /**
2287         * Notify driver and common modules registered for notification.
2288         */
2289        ioc->cbfn->hbfail_cbfn(ioc->bfa);
2290        bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2291        bfa_nw_ioc_debug_save_ftrc(ioc);
2292}
2293
2294/* IOCPF to IOC interface */
2295static void
2296bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
2297{
2298        bfa_fsm_send_event(ioc, IOC_E_ENABLED);
2299}
2300
2301static void
2302bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
2303{
2304        bfa_fsm_send_event(ioc, IOC_E_DISABLED);
2305}
2306
2307static void
2308bfa_ioc_pf_failed(struct bfa_ioc *ioc)
2309{
2310        bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
2311}
2312
2313static void
2314bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
2315{
2316        bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
2317}
2318
2319static void
2320bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
2321{
2322        /**
2323         * Provide enable completion callback and AEN notification.
2324         */
2325        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2326}
2327
2328/* IOC public */
2329static enum bfa_status
2330bfa_ioc_pll_init(struct bfa_ioc *ioc)
2331{
2332        /*
2333         *  Hold semaphore so that nobody can access the chip during init.
2334         */
2335        bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2336
2337        bfa_ioc_pll_init_asic(ioc);
2338
2339        ioc->pllinit = true;
2340
2341        /* Initialize LMEM */
2342        bfa_ioc_lmem_init(ioc);
2343
2344        /*
2345         *  release semaphore.
2346         */
2347        bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2348
2349        return BFA_STATUS_OK;
2350}
2351
2352/* Interface used by diag module to do firmware boot with memory test
2353 * as the entry vector.
2354 */
2355static enum bfa_status
2356bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
2357                u32 boot_env)
2358{
2359        struct bfi_ioc_image_hdr *drv_fwhdr;
2360        enum bfa_status status;
2361        bfa_ioc_stats(ioc, ioc_boots);
2362
2363        if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2364                return BFA_STATUS_FAILED;
2365        if (boot_env == BFI_FWBOOT_ENV_OS &&
2366            boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2367                drv_fwhdr = (struct bfi_ioc_image_hdr *)
2368                        bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2369                /* Work with Flash iff flash f/w is better than driver f/w.
2370                 * Otherwise push drivers firmware.
2371                 */
2372                if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2373                        BFI_IOC_IMG_VER_BETTER)
2374                        boot_type = BFI_FWBOOT_TYPE_FLASH;
2375        }
2376
2377        /**
2378         * Initialize IOC state of all functions on a chip reset.
2379         */
2380        if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2381                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2382                bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2383        } else {
2384                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2385                bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2386        }
2387
2388        bfa_ioc_msgflush(ioc);
2389        status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2390        if (status == BFA_STATUS_OK)
2391                bfa_ioc_lpu_start(ioc);
2392        else
2393                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2394
2395        return status;
2396}
2397
2398/* Enable/disable IOC failure auto recovery. */
2399void
2400bfa_nw_ioc_auto_recover(bool auto_recover)
2401{
2402        bfa_nw_auto_recover = auto_recover;
2403}
2404
2405static bool
2406bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
2407{
2408        u32     *msgp = mbmsg;
2409        u32     r32;
2410        int             i;
2411
2412        r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2413        if ((r32 & 1) == 0)
2414                return false;
2415
2416        /**
2417         * read the MBOX msg
2418         */
2419        for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2420             i++) {
2421                r32 = readl(ioc->ioc_regs.lpu_mbox +
2422                                   i * sizeof(u32));
2423                msgp[i] = htonl(r32);
2424        }
2425
2426        /**
2427         * turn off mailbox interrupt by clearing mailbox status
2428         */
2429        writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2430        readl(ioc->ioc_regs.lpu_mbox_cmd);
2431
2432        return true;
2433}
2434
2435static void
2436bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
2437{
2438        union bfi_ioc_i2h_msg_u *msg;
2439        struct bfa_iocpf *iocpf = &ioc->iocpf;
2440
2441        msg = (union bfi_ioc_i2h_msg_u *) m;
2442
2443        bfa_ioc_stats(ioc, ioc_isrs);
2444
2445        switch (msg->mh.msg_id) {
2446        case BFI_IOC_I2H_HBEAT:
2447                break;
2448
2449        case BFI_IOC_I2H_ENABLE_REPLY:
2450                bfa_ioc_enable_reply(ioc,
2451                        (enum bfa_mode)msg->fw_event.port_mode,
2452                        msg->fw_event.cap_bm);
2453                break;
2454
2455        case BFI_IOC_I2H_DISABLE_REPLY:
2456                bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2457                break;
2458
2459        case BFI_IOC_I2H_GETATTR_REPLY:
2460                bfa_ioc_getattr_reply(ioc);
2461                break;
2462
2463        default:
2464                BUG_ON(1);
2465        }
2466}
2467
2468/**
2469 * bfa_nw_ioc_attach - IOC attach time initialization and setup.
2470 *
2471 * @ioc:        memory for IOC
2472 * @bfa:        driver instance structure
2473 * @cbfn:       callback function
2474 */
2475void
2476bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
2477{
2478        ioc->bfa        = bfa;
2479        ioc->cbfn       = cbfn;
2480        ioc->fcmode     = false;
2481        ioc->pllinit    = false;
2482        ioc->dbg_fwsave_once = true;
2483        ioc->iocpf.ioc  = ioc;
2484
2485        bfa_ioc_mbox_attach(ioc);
2486        INIT_LIST_HEAD(&ioc->notify_q);
2487
2488        bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2489        bfa_fsm_send_event(ioc, IOC_E_RESET);
2490}
2491
2492/* Driver detach time IOC cleanup. */
2493void
2494bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2495{
2496        bfa_fsm_send_event(ioc, IOC_E_DETACH);
2497
2498        /* Done with detach, empty the notify_q. */
2499        INIT_LIST_HEAD(&ioc->notify_q);
2500}
2501
2502/**
2503 * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
2504 *
2505 * @ioc:        memory for IOC
2506 * @pcidev:     PCI device information for this IOC
2507 * @clscode:    class code
2508 */
2509void
2510bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
2511                 enum bfi_pcifn_class clscode)
2512{
2513        ioc->clscode    = clscode;
2514        ioc->pcidev     = *pcidev;
2515
2516        /**
2517         * Initialize IOC and device personality
2518         */
2519        ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2520        ioc->asic_mode  = BFI_ASIC_MODE_FC;
2521
2522        switch (pcidev->device_id) {
2523        case PCI_DEVICE_ID_BROCADE_CT:
2524                ioc->asic_gen = BFI_ASIC_GEN_CT;
2525                ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2526                ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2527                ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2528                ioc->ad_cap_bm = BFA_CM_CNA;
2529                break;
2530
2531        case BFA_PCI_DEVICE_ID_CT2:
2532                ioc->asic_gen = BFI_ASIC_GEN_CT2;
2533                if (clscode == BFI_PCIFN_CLASS_FC &&
2534                        pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2535                        ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2536                        ioc->fcmode = true;
2537                        ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2538                        ioc->ad_cap_bm = BFA_CM_HBA;
2539                } else {
2540                        ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2541                        ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2542                        if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2543                                ioc->port_mode =
2544                                ioc->port_mode_cfg = BFA_MODE_CNA;
2545                                ioc->ad_cap_bm = BFA_CM_CNA;
2546                        } else {
2547                                ioc->port_mode =
2548                                ioc->port_mode_cfg = BFA_MODE_NIC;
2549                                ioc->ad_cap_bm = BFA_CM_NIC;
2550                        }
2551                }
2552                break;
2553
2554        default:
2555                BUG_ON(1);
2556        }
2557
2558        /**
2559         * Set asic specific interfaces.
2560         */
2561        if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2562                bfa_nw_ioc_set_ct_hwif(ioc);
2563        else {
2564                WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2565                bfa_nw_ioc_set_ct2_hwif(ioc);
2566                bfa_nw_ioc_ct2_poweron(ioc);
2567        }
2568
2569        bfa_ioc_map_port(ioc);
2570        bfa_ioc_reg_init(ioc);
2571}
2572
2573/**
2574 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2575 *
2576 * @ioc:        memory for IOC
2577 * @dm_kva:     kernel virtual address of IOC dma memory
2578 * @dm_pa:      physical address of IOC dma memory
2579 */
2580void
2581bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
2582{
2583        /**
2584         * dma memory for firmware attribute
2585         */
2586        ioc->attr_dma.kva = dm_kva;
2587        ioc->attr_dma.pa = dm_pa;
2588        ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2589}
2590
2591/* Return size of dma memory required. */
2592u32
2593bfa_nw_ioc_meminfo(void)
2594{
2595        return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2596}
2597
2598void
2599bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2600{
2601        bfa_ioc_stats(ioc, ioc_enables);
2602        ioc->dbg_fwsave_once = true;
2603
2604        bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2605}
2606
2607void
2608bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2609{
2610        bfa_ioc_stats(ioc, ioc_disables);
2611        bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2612}
2613
2614/* Initialize memory for saving firmware trace. */
2615void
2616bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2617{
2618        ioc->dbg_fwsave = dbg_fwsave;
2619        ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2620}
2621
2622static u32
2623bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2624{
2625        return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2626}
2627
2628/* Register mailbox message handler function, to be called by common modules */
2629void
2630bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2631                    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2632{
2633        struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2634
2635        mod->mbhdlr[mc].cbfn    = cbfn;
2636        mod->mbhdlr[mc].cbarg = cbarg;
2637}
2638
2639/**
2640 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2641 *
2642 * @ioc:        IOC instance
2643 * @cmd:        Mailbox command
2644 * @cbfn:       callback function
2645 * @cbarg:      arguments to callback
2646 *
2647 * Waits if mailbox is busy. Responsibility of caller to serialize
2648 */
2649bool
2650bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2651                        bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2652{
2653        struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2654        u32                     stat;
2655
2656        cmd->cbfn = cbfn;
2657        cmd->cbarg = cbarg;
2658
2659        /**
2660         * If a previous command is pending, queue new command
2661         */
2662        if (!list_empty(&mod->cmd_q)) {
2663                list_add_tail(&cmd->qe, &mod->cmd_q);
2664                return true;
2665        }
2666
2667        /**
2668         * If mailbox is busy, queue command for poll timer
2669         */
2670        stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2671        if (stat) {
2672                list_add_tail(&cmd->qe, &mod->cmd_q);
2673                return true;
2674        }
2675
2676        /**
2677         * mailbox is free -- queue command to firmware
2678         */
2679        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2680
2681        return false;
2682}
2683
2684/* Handle mailbox interrupts */
2685void
2686bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2687{
2688        struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2689        struct bfi_mbmsg m;
2690        int                             mc;
2691
2692        if (bfa_ioc_msgget(ioc, &m)) {
2693                /**
2694                 * Treat IOC message class as special.
2695                 */
2696                mc = m.mh.msg_class;
2697                if (mc == BFI_MC_IOC) {
2698                        bfa_ioc_isr(ioc, &m);
2699                        return;
2700                }
2701
2702                if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2703                        return;
2704
2705                mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2706        }
2707
2708        bfa_ioc_lpu_read_stat(ioc);
2709
2710        /**
2711         * Try to send pending mailbox commands
2712         */
2713        bfa_ioc_mbox_poll(ioc);
2714}
2715
2716void
2717bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2718{
2719        bfa_ioc_stats(ioc, ioc_hbfails);
2720        bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2721        bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2722}
2723
2724/* return true if IOC is disabled */
2725bool
2726bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2727{
2728        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2729                bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2730}
2731
2732/* return true if IOC is operational */
2733bool
2734bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2735{
2736        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2737}
2738
2739/* Add to IOC heartbeat failure notification queue. To be used by common
2740 * modules such as cee, port, diag.
2741 */
2742void
2743bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2744                        struct bfa_ioc_notify *notify)
2745{
2746        list_add_tail(&notify->qe, &ioc->notify_q);
2747}
2748
2749#define BFA_MFG_NAME "QLogic"
2750static void
2751bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2752                         struct bfa_adapter_attr *ad_attr)
2753{
2754        struct bfi_ioc_attr *ioc_attr;
2755
2756        ioc_attr = ioc->attr;
2757
2758        bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2759        bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2760        bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2761        bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2762        memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2763                      sizeof(struct bfa_mfg_vpd));
2764
2765        ad_attr->nports = bfa_ioc_get_nports(ioc);
2766        ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2767
2768        bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2769        /* For now, model descr uses same model string */
2770        bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2771
2772        ad_attr->card_type = ioc_attr->card_type;
2773        ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2774
2775        if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2776                ad_attr->prototype = 1;
2777        else
2778                ad_attr->prototype = 0;
2779
2780        ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2781        bfa_nw_ioc_get_mac(ioc, ad_attr->mac);
2782
2783        ad_attr->pcie_gen = ioc_attr->pcie_gen;
2784        ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2785        ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2786        ad_attr->asic_rev = ioc_attr->asic_rev;
2787
2788        bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2789}
2790
2791static enum bfa_ioc_type
2792bfa_ioc_get_type(struct bfa_ioc *ioc)
2793{
2794        if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2795                return BFA_IOC_TYPE_LL;
2796
2797        BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2798
2799        return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2800                ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2801}
2802
2803static void
2804bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2805{
2806        memcpy(serial_num,
2807                        (void *)ioc->attr->brcd_serialnum,
2808                        BFA_ADAPTER_SERIAL_NUM_LEN);
2809}
2810
2811static void
2812bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2813{
2814        memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2815}
2816
2817static void
2818bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2819{
2820        BUG_ON(!(chip_rev));
2821
2822        memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2823
2824        chip_rev[0] = 'R';
2825        chip_rev[1] = 'e';
2826        chip_rev[2] = 'v';
2827        chip_rev[3] = '-';
2828        chip_rev[4] = ioc->attr->asic_rev;
2829        chip_rev[5] = '\0';
2830}
2831
2832static void
2833bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2834{
2835        memcpy(optrom_ver, ioc->attr->optrom_version,
2836                      BFA_VERSION_LEN);
2837}
2838
2839static void
2840bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2841{
2842        strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2843}
2844
2845static void
2846bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2847{
2848        struct bfi_ioc_attr *ioc_attr;
2849
2850        BUG_ON(!(model));
2851        memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2852
2853        ioc_attr = ioc->attr;
2854
2855        snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2856                BFA_MFG_NAME, ioc_attr->card_type);
2857}
2858
2859static enum bfa_ioc_state
2860bfa_ioc_get_state(struct bfa_ioc *ioc)
2861{
2862        enum bfa_iocpf_state iocpf_st;
2863        enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2864
2865        if (ioc_st == BFA_IOC_ENABLING ||
2866                ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2867
2868                iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2869
2870                switch (iocpf_st) {
2871                case BFA_IOCPF_SEMWAIT:
2872                        ioc_st = BFA_IOC_SEMWAIT;
2873                        break;
2874
2875                case BFA_IOCPF_HWINIT:
2876                        ioc_st = BFA_IOC_HWINIT;
2877                        break;
2878
2879                case BFA_IOCPF_FWMISMATCH:
2880                        ioc_st = BFA_IOC_FWMISMATCH;
2881                        break;
2882
2883                case BFA_IOCPF_FAIL:
2884                        ioc_st = BFA_IOC_FAIL;
2885                        break;
2886
2887                case BFA_IOCPF_INITFAIL:
2888                        ioc_st = BFA_IOC_INITFAIL;
2889                        break;
2890
2891                default:
2892                        break;
2893                }
2894        }
2895        return ioc_st;
2896}
2897
2898void
2899bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2900{
2901        memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2902
2903        ioc_attr->state = bfa_ioc_get_state(ioc);
2904        ioc_attr->port_id = bfa_ioc_portid(ioc);
2905        ioc_attr->port_mode = ioc->port_mode;
2906
2907        ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2908        ioc_attr->cap_bm = ioc->ad_cap_bm;
2909
2910        ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2911
2912        bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2913
2914        ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2915        ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2916        ioc_attr->def_fn = bfa_ioc_is_default(ioc);
2917        bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2918}
2919
2920/* WWN public */
2921static u64
2922bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2923{
2924        return ioc->attr->pwwn;
2925}
2926
2927void
2928bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac)
2929{
2930        ether_addr_copy(mac, ioc->attr->mac);
2931}
2932
2933/* Firmware failure detected. Start recovery actions. */
2934static void
2935bfa_ioc_recover(struct bfa_ioc *ioc)
2936{
2937        pr_crit("Heart Beat of IOC has failed\n");
2938        bfa_ioc_stats(ioc, ioc_hbfails);
2939        bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2940        bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2941}
2942
2943/* BFA IOC PF private functions */
2944
2945static void
2946bfa_iocpf_enable(struct bfa_ioc *ioc)
2947{
2948        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2949}
2950
2951static void
2952bfa_iocpf_disable(struct bfa_ioc *ioc)
2953{
2954        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2955}
2956
2957static void
2958bfa_iocpf_fail(struct bfa_ioc *ioc)
2959{
2960        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2961}
2962
2963static void
2964bfa_iocpf_initfail(struct bfa_ioc *ioc)
2965{
2966        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2967}
2968
2969static void
2970bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2971{
2972        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2973}
2974
2975static void
2976bfa_iocpf_stop(struct bfa_ioc *ioc)
2977{
2978        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2979}
2980
2981void
2982bfa_nw_iocpf_timeout(struct bfa_ioc *ioc)
2983{
2984        enum bfa_iocpf_state iocpf_st;
2985
2986        iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2987
2988        if (iocpf_st == BFA_IOCPF_HWINIT)
2989                bfa_ioc_poll_fwinit(ioc);
2990        else
2991                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2992}
2993
2994void
2995bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc)
2996{
2997        bfa_ioc_hw_sem_get(ioc);
2998}
2999
3000static void
3001bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
3002{
3003        u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3004
3005        if (fwstate == BFI_IOC_DISABLED) {
3006                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3007                return;
3008        }
3009
3010        if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
3011                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3012        } else {
3013                ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3014                mod_timer(&ioc->iocpf_timer, jiffies +
3015                        msecs_to_jiffies(BFA_IOC_POLL_TOV));
3016        }
3017}
3018
3019/*
3020 *      Flash module specific
3021 */
3022
3023/*
3024 * FLASH DMA buffer should be big enough to hold both MFG block and
3025 * asic block(64k) at the same time and also should be 2k aligned to
3026 * avoid write segement to cross sector boundary.
3027 */
3028#define BFA_FLASH_SEG_SZ        2048
3029#define BFA_FLASH_DMA_BUF_SZ    \
3030        roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
3031
3032static void
3033bfa_flash_cb(struct bfa_flash *flash)
3034{
3035        flash->op_busy = 0;
3036        if (flash->cbfn)
3037                flash->cbfn(flash->cbarg, flash->status);
3038}
3039
3040static void
3041bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
3042{
3043        struct bfa_flash *flash = cbarg;
3044
3045        switch (event) {
3046        case BFA_IOC_E_DISABLED:
3047        case BFA_IOC_E_FAILED:
3048                if (flash->op_busy) {
3049                        flash->status = BFA_STATUS_IOC_FAILURE;
3050                        flash->cbfn(flash->cbarg, flash->status);
3051                        flash->op_busy = 0;
3052                }
3053                break;
3054        default:
3055                break;
3056        }
3057}
3058
3059/*
3060 * Send flash write request.
3061 */
3062static void
3063bfa_flash_write_send(struct bfa_flash *flash)
3064{
3065        struct bfi_flash_write_req *msg =
3066                        (struct bfi_flash_write_req *) flash->mb.msg;
3067        u32     len;
3068
3069        msg->type = be32_to_cpu(flash->type);
3070        msg->instance = flash->instance;
3071        msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3072        len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3073               flash->residue : BFA_FLASH_DMA_BUF_SZ;
3074        msg->length = be32_to_cpu(len);
3075
3076        /* indicate if it's the last msg of the whole write operation */
3077        msg->last = (len == flash->residue) ? 1 : 0;
3078
3079        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
3080                    bfa_ioc_portid(flash->ioc));
3081        bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3082        memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
3083        bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3084
3085        flash->residue -= len;
3086        flash->offset += len;
3087}
3088
3089/**
3090 * bfa_flash_read_send - Send flash read request.
3091 *
3092 * @cbarg: callback argument
3093 */
3094static void
3095bfa_flash_read_send(void *cbarg)
3096{
3097        struct bfa_flash *flash = cbarg;
3098        struct bfi_flash_read_req *msg =
3099                        (struct bfi_flash_read_req *) flash->mb.msg;
3100        u32     len;
3101
3102        msg->type = be32_to_cpu(flash->type);
3103        msg->instance = flash->instance;
3104        msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3105        len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3106               flash->residue : BFA_FLASH_DMA_BUF_SZ;
3107        msg->length = be32_to_cpu(len);
3108        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
3109                    bfa_ioc_portid(flash->ioc));
3110        bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3111        bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3112}
3113
3114/**
3115 * bfa_flash_intr - Process flash response messages upon receiving interrupts.
3116 *
3117 * @flasharg: flash structure
3118 * @msg: message structure
3119 */
3120static void
3121bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
3122{
3123        struct bfa_flash *flash = flasharg;
3124        u32     status;
3125
3126        union {
3127                struct bfi_flash_query_rsp *query;
3128                struct bfi_flash_write_rsp *write;
3129                struct bfi_flash_read_rsp *read;
3130                struct bfi_mbmsg   *msg;
3131        } m;
3132
3133        m.msg = msg;
3134
3135        /* receiving response after ioc failure */
3136        if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
3137                return;
3138
3139        switch (msg->mh.msg_id) {
3140        case BFI_FLASH_I2H_QUERY_RSP:
3141                status = be32_to_cpu(m.query->status);
3142                if (status == BFA_STATUS_OK) {
3143                        u32     i;
3144                        struct bfa_flash_attr *attr, *f;
3145
3146                        attr = (struct bfa_flash_attr *) flash->ubuf;
3147                        f = (struct bfa_flash_attr *) flash->dbuf_kva;
3148                        attr->status = be32_to_cpu(f->status);
3149                        attr->npart = be32_to_cpu(f->npart);
3150                        for (i = 0; i < attr->npart; i++) {
3151                                attr->part[i].part_type =
3152                                        be32_to_cpu(f->part[i].part_type);
3153                                attr->part[i].part_instance =
3154                                        be32_to_cpu(f->part[i].part_instance);
3155                                attr->part[i].part_off =
3156                                        be32_to_cpu(f->part[i].part_off);
3157                                attr->part[i].part_size =
3158                                        be32_to_cpu(f->part[i].part_size);
3159                                attr->part[i].part_len =
3160                                        be32_to_cpu(f->part[i].part_len);
3161                                attr->part[i].part_status =
3162                                        be32_to_cpu(f->part[i].part_status);
3163                        }
3164                }
3165                flash->status = status;
3166                bfa_flash_cb(flash);
3167                break;
3168        case BFI_FLASH_I2H_WRITE_RSP:
3169                status = be32_to_cpu(m.write->status);
3170                if (status != BFA_STATUS_OK || flash->residue == 0) {
3171                        flash->status = status;
3172                        bfa_flash_cb(flash);
3173                } else
3174                        bfa_flash_write_send(flash);
3175                break;
3176        case BFI_FLASH_I2H_READ_RSP:
3177                status = be32_to_cpu(m.read->status);
3178                if (status != BFA_STATUS_OK) {
3179                        flash->status = status;
3180                        bfa_flash_cb(flash);
3181                } else {
3182                        u32 len = be32_to_cpu(m.read->length);
3183                        memcpy(flash->ubuf + flash->offset,
3184                               flash->dbuf_kva, len);
3185                        flash->residue -= len;
3186                        flash->offset += len;
3187                        if (flash->residue == 0) {
3188                                flash->status = status;
3189                                bfa_flash_cb(flash);
3190                        } else
3191                                bfa_flash_read_send(flash);
3192                }
3193                break;
3194        case BFI_FLASH_I2H_BOOT_VER_RSP:
3195        case BFI_FLASH_I2H_EVENT:
3196                break;
3197        default:
3198                WARN_ON(1);
3199        }
3200}
3201
3202/*
3203 * Flash memory info API.
3204 */
3205u32
3206bfa_nw_flash_meminfo(void)
3207{
3208        return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3209}
3210
3211/**
3212 * bfa_nw_flash_attach - Flash attach API.
3213 *
3214 * @flash: flash structure
3215 * @ioc: ioc structure
3216 * @dev: device structure
3217 */
3218void
3219bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
3220{
3221        flash->ioc = ioc;
3222        flash->cbfn = NULL;
3223        flash->cbarg = NULL;
3224        flash->op_busy = 0;
3225
3226        bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
3227        bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
3228        list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
3229}
3230
3231/**
3232 * bfa_nw_flash_memclaim - Claim memory for flash
3233 *
3234 * @flash: flash structure
3235 * @dm_kva: pointer to virtual memory address
3236 * @dm_pa: physical memory address
3237 */
3238void
3239bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
3240{
3241        flash->dbuf_kva = dm_kva;
3242        flash->dbuf_pa = dm_pa;
3243        memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
3244        dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3245        dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3246}
3247
3248/**
3249 * bfa_nw_flash_get_attr - Get flash attribute.
3250 *
3251 * @flash: flash structure
3252 * @attr: flash attribute structure
3253 * @cbfn: callback function
3254 * @cbarg: callback argument
3255 *
3256 * Return status.
3257 */
3258enum bfa_status
3259bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
3260                      bfa_cb_flash cbfn, void *cbarg)
3261{
3262        struct bfi_flash_query_req *msg =
3263                        (struct bfi_flash_query_req *) flash->mb.msg;
3264
3265        if (!bfa_nw_ioc_is_operational(flash->ioc))
3266                return BFA_STATUS_IOC_NON_OP;
3267
3268        if (flash->op_busy)
3269                return BFA_STATUS_DEVBUSY;
3270
3271        flash->op_busy = 1;
3272        flash->cbfn = cbfn;
3273        flash->cbarg = cbarg;
3274        flash->ubuf = (u8 *) attr;
3275
3276        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
3277                    bfa_ioc_portid(flash->ioc));
3278        bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
3279        bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3280
3281        return BFA_STATUS_OK;
3282}
3283
3284/**
3285 * bfa_nw_flash_update_part - Update flash partition.
3286 *
3287 * @flash: flash structure
3288 * @type: flash partition type
3289 * @instance: flash partition instance
3290 * @buf: update data buffer
3291 * @len: data buffer length
3292 * @offset: offset relative to the partition starting address
3293 * @cbfn: callback function
3294 * @cbarg: callback argument
3295 *
3296 * Return status.
3297 */
3298enum bfa_status
3299bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
3300                         void *buf, u32 len, u32 offset,
3301                         bfa_cb_flash cbfn, void *cbarg)
3302{
3303        if (!bfa_nw_ioc_is_operational(flash->ioc))
3304                return BFA_STATUS_IOC_NON_OP;
3305
3306        /*
3307         * 'len' must be in word (4-byte) boundary
3308         */
3309        if (!len || (len & 0x03))
3310                return BFA_STATUS_FLASH_BAD_LEN;
3311
3312        if (type == BFA_FLASH_PART_MFG)
3313                return BFA_STATUS_EINVAL;
3314
3315        if (flash->op_busy)
3316                return BFA_STATUS_DEVBUSY;
3317
3318        flash->op_busy = 1;
3319        flash->cbfn = cbfn;
3320        flash->cbarg = cbarg;
3321        flash->type = type;
3322        flash->instance = instance;
3323        flash->residue = len;
3324        flash->offset = 0;
3325        flash->addr_off = offset;
3326        flash->ubuf = buf;
3327
3328        bfa_flash_write_send(flash);
3329
3330        return BFA_STATUS_OK;
3331}
3332
3333/**
3334 * bfa_nw_flash_read_part - Read flash partition.
3335 *
3336 * @flash: flash structure
3337 * @type: flash partition type
3338 * @instance: flash partition instance
3339 * @buf: read data buffer
3340 * @len: data buffer length
3341 * @offset: offset relative to the partition starting address
3342 * @cbfn: callback function
3343 * @cbarg: callback argument
3344 *
3345 * Return status.
3346 */
3347enum bfa_status
3348bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
3349                       void *buf, u32 len, u32 offset,
3350                       bfa_cb_flash cbfn, void *cbarg)
3351{
3352        if (!bfa_nw_ioc_is_operational(flash->ioc))
3353                return BFA_STATUS_IOC_NON_OP;
3354
3355        /*
3356         * 'len' must be in word (4-byte) boundary
3357         */
3358        if (!len || (len & 0x03))
3359                return BFA_STATUS_FLASH_BAD_LEN;
3360
3361        if (flash->op_busy)
3362                return BFA_STATUS_DEVBUSY;
3363
3364        flash->op_busy = 1;
3365        flash->cbfn = cbfn;
3366        flash->cbarg = cbarg;
3367        flash->type = type;
3368        flash->instance = instance;
3369        flash->residue = len;
3370        flash->offset = 0;
3371        flash->addr_off = offset;
3372        flash->ubuf = buf;
3373
3374        bfa_flash_read_send(flash);
3375
3376        return BFA_STATUS_OK;
3377}
3378