linux/drivers/scsi/bfa/bfa_ioc.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
   3 * All rights reserved
   4 * www.brocade.com
   5 *
   6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of the GNU General Public License (GPL) Version 2 as
  10 * published by the Free Software Foundation
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 */
  17
  18#include <bfa.h>
  19#include <bfa_ioc.h>
  20#include <bfa_fwimg_priv.h>
  21#include <bfa_trcmod_priv.h>
  22#include <cs/bfa_debug.h>
  23#include <bfi/bfi_ioc.h>
  24#include <bfi/bfi_ctreg.h>
  25#include <aen/bfa_aen_ioc.h>
  26#include <aen/bfa_aen.h>
  27#include <log/bfa_log_hal.h>
  28#include <defs/bfa_defs_pci.h>
  29
  30BFA_TRC_FILE(HAL, IOC);
  31
  32/**
  33 * IOC local definitions
  34 */
  35#define BFA_IOC_TOV             2000    /* msecs */
  36#define BFA_IOC_HB_TOV          1000    /* msecs */
  37#define BFA_IOC_HB_FAIL_MAX     4
  38#define BFA_IOC_HWINIT_MAX      2
  39#define BFA_IOC_FWIMG_MINSZ     (16 * 1024)
  40#define BFA_IOC_TOV_RECOVER     (BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \
  41                                + BFA_IOC_TOV)
  42
  43#define bfa_ioc_timer_start(__ioc)                                      \
  44        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
  45                        bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  46#define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
  47
  48#define BFA_DBG_FWTRC_ENTS      (BFI_IOC_TRC_ENTS)
  49#define BFA_DBG_FWTRC_LEN                                       \
  50        (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) +        \
  51         (sizeof(struct bfa_trc_mod_s) -                        \
  52          BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
  53#define BFA_DBG_FWTRC_OFF(_fn)  (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  54#define bfa_ioc_stats(_ioc, _stats)     (_ioc)->stats._stats ++
  55
  56#define BFA_FLASH_CHUNK_NO(off)         (off / BFI_FLASH_CHUNK_SZ_WORDS)
  57#define BFA_FLASH_OFFSET_IN_CHUNK(off)  (off % BFI_FLASH_CHUNK_SZ_WORDS)
  58#define BFA_FLASH_CHUNK_ADDR(chunkno)   (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
  59bfa_boolean_t   bfa_auto_recover = BFA_FALSE;
  60
  61/*
  62 * forward declarations
  63 */
  64static void     bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
  65                                 enum bfa_ioc_aen_event event);
  66static void     bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  67static void     bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
  68static void     bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
  69static void     bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  70static void     bfa_ioc_timeout(void *ioc);
  71static void     bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  72static void     bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  73static void     bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  74static void     bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  75static void     bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
  76static void     bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  77static void     bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  78static void     bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
  79static void     bfa_ioc_recover(struct bfa_ioc_s *ioc);
  80static bfa_boolean_t bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc);
  81static void     bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc);
  82static void     bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
  83static void     bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
  84
  85/**
  86 *  bfa_ioc_sm
  87 */
  88
  89/**
  90 * IOC state machine events
  91 */
  92enum ioc_event {
  93        IOC_E_ENABLE = 1,       /*  IOC enable request */
  94        IOC_E_DISABLE = 2,      /*  IOC disable request */
  95        IOC_E_TIMEOUT = 3,      /*  f/w response timeout */
  96        IOC_E_FWREADY = 4,      /*  f/w initialization done */
  97        IOC_E_FWRSP_GETATTR = 5,        /*  IOC get attribute response */
  98        IOC_E_FWRSP_ENABLE = 6, /*  enable f/w response */
  99        IOC_E_FWRSP_DISABLE = 7,        /*  disable f/w response */
 100        IOC_E_HBFAIL = 8,       /*  heartbeat failure */
 101        IOC_E_HWERROR = 9,      /*  hardware error interrupt */
 102        IOC_E_SEMLOCKED = 10,   /*  h/w semaphore is locked */
 103        IOC_E_DETACH = 11,      /*  driver detach cleanup */
 104};
 105
 106bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
 107bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc_s, enum ioc_event);
 108bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc_s, enum ioc_event);
 109bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc_s, enum ioc_event);
 110bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc_s, enum ioc_event);
 111bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
 112bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
 113bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
 114bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
 115bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc_s, enum ioc_event);
 116bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 117bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
 118
 119static struct bfa_sm_table_s ioc_sm_table[] = {
 120        {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
 121        {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
 122        {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
 123        {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
 124        {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
 125        {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
 126        {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
 127        {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
 128        {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
 129        {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
 130        {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
 131        {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 132};
 133
 134/**
 135 * Reset entry actions -- initialize state machine
 136 */
 137static void
 138bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
 139{
 140        ioc->retry_count = 0;
 141        ioc->auto_recover = bfa_auto_recover;
 142}
 143
 144/**
 145 * Beginning state. IOC is in reset state.
 146 */
 147static void
 148bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
 149{
 150        bfa_trc(ioc, event);
 151
 152        switch (event) {
 153        case IOC_E_ENABLE:
 154                bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
 155                break;
 156
 157        case IOC_E_DISABLE:
 158                bfa_ioc_disable_comp(ioc);
 159                break;
 160
 161        case IOC_E_DETACH:
 162                break;
 163
 164        default:
 165                bfa_sm_fault(ioc, event);
 166        }
 167}
 168
 169/**
 170 * Semaphore should be acquired for version check.
 171 */
 172static void
 173bfa_ioc_sm_fwcheck_entry(struct bfa_ioc_s *ioc)
 174{
 175        bfa_ioc_hw_sem_get(ioc);
 176}
 177
 178/**
 179 * Awaiting h/w semaphore to continue with version check.
 180 */
 181static void
 182bfa_ioc_sm_fwcheck(struct bfa_ioc_s *ioc, enum ioc_event event)
 183{
 184        bfa_trc(ioc, event);
 185
 186        switch (event) {
 187        case IOC_E_SEMLOCKED:
 188                if (bfa_ioc_firmware_lock(ioc)) {
 189                        ioc->retry_count = 0;
 190                        bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
 191                } else {
 192                        bfa_ioc_hw_sem_release(ioc);
 193                        bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
 194                }
 195                break;
 196
 197        case IOC_E_DISABLE:
 198                bfa_ioc_disable_comp(ioc);
 199                /*
 200                 * fall through
 201                 */
 202
 203        case IOC_E_DETACH:
 204                bfa_ioc_hw_sem_get_cancel(ioc);
 205                bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
 206                break;
 207
 208        case IOC_E_FWREADY:
 209                break;
 210
 211        default:
 212                bfa_sm_fault(ioc, event);
 213        }
 214}
 215
 216/**
 217 * Notify enable completion callback and generate mismatch AEN.
 218 */
 219static void
 220bfa_ioc_sm_mismatch_entry(struct bfa_ioc_s *ioc)
 221{
 222        /**
 223         * Provide enable completion callback and AEN notification only once.
 224         */
 225        if (ioc->retry_count == 0) {
 226                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 227                bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
 228        }
 229        ioc->retry_count++;
 230        bfa_ioc_timer_start(ioc);
 231}
 232
 233/**
 234 * Awaiting firmware version match.
 235 */
 236static void
 237bfa_ioc_sm_mismatch(struct bfa_ioc_s *ioc, enum ioc_event event)
 238{
 239        bfa_trc(ioc, event);
 240
 241        switch (event) {
 242        case IOC_E_TIMEOUT:
 243                bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
 244                break;
 245
 246        case IOC_E_DISABLE:
 247                bfa_ioc_disable_comp(ioc);
 248                /*
 249                 * fall through
 250                 */
 251
 252        case IOC_E_DETACH:
 253                bfa_ioc_timer_stop(ioc);
 254                bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
 255                break;
 256
 257        case IOC_E_FWREADY:
 258                break;
 259
 260        default:
 261                bfa_sm_fault(ioc, event);
 262        }
 263}
 264
 265/**
 266 * Request for semaphore.
 267 */
 268static void
 269bfa_ioc_sm_semwait_entry(struct bfa_ioc_s *ioc)
 270{
 271        bfa_ioc_hw_sem_get(ioc);
 272}
 273
 274/**
 275 * Awaiting semaphore for h/w initialzation.
 276 */
 277static void
 278bfa_ioc_sm_semwait(struct bfa_ioc_s *ioc, enum ioc_event event)
 279{
 280        bfa_trc(ioc, event);
 281
 282        switch (event) {
 283        case IOC_E_SEMLOCKED:
 284                ioc->retry_count = 0;
 285                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
 286                break;
 287
 288        case IOC_E_DISABLE:
 289                bfa_ioc_hw_sem_get_cancel(ioc);
 290                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 291                break;
 292
 293        default:
 294                bfa_sm_fault(ioc, event);
 295        }
 296}
 297
 298
 299static void
 300bfa_ioc_sm_hwinit_entry(struct bfa_ioc_s *ioc)
 301{
 302        bfa_ioc_timer_start(ioc);
 303        bfa_ioc_reset(ioc, BFA_FALSE);
 304}
 305
 306/**
 307 * Hardware is being initialized. Interrupts are enabled.
 308 * Holding hardware semaphore lock.
 309 */
 310static void
 311bfa_ioc_sm_hwinit(struct bfa_ioc_s *ioc, enum ioc_event event)
 312{
 313        bfa_trc(ioc, event);
 314
 315        switch (event) {
 316        case IOC_E_FWREADY:
 317                bfa_ioc_timer_stop(ioc);
 318                bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 319                break;
 320
 321        case IOC_E_HWERROR:
 322                bfa_ioc_timer_stop(ioc);
 323                /*
 324                 * fall through
 325                 */
 326
 327        case IOC_E_TIMEOUT:
 328                ioc->retry_count++;
 329                if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
 330                        bfa_ioc_timer_start(ioc);
 331                        bfa_ioc_reset(ioc, BFA_TRUE);
 332                        break;
 333                }
 334
 335                bfa_ioc_hw_sem_release(ioc);
 336                bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
 337                break;
 338
 339        case IOC_E_DISABLE:
 340                bfa_ioc_hw_sem_release(ioc);
 341                bfa_ioc_timer_stop(ioc);
 342                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 343                break;
 344
 345        default:
 346                bfa_sm_fault(ioc, event);
 347        }
 348}
 349
 350
 351static void
 352bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
 353{
 354        bfa_ioc_timer_start(ioc);
 355        bfa_ioc_send_enable(ioc);
 356}
 357
 358/**
 359 * Host IOC function is being enabled, awaiting response from firmware.
 360 * Semaphore is acquired.
 361 */
 362static void
 363bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 364{
 365        bfa_trc(ioc, event);
 366
 367        switch (event) {
 368        case IOC_E_FWRSP_ENABLE:
 369                bfa_ioc_timer_stop(ioc);
 370                bfa_ioc_hw_sem_release(ioc);
 371                bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 372                break;
 373
 374        case IOC_E_HWERROR:
 375                bfa_ioc_timer_stop(ioc);
 376                /*
 377                 * fall through
 378                 */
 379
 380        case IOC_E_TIMEOUT:
 381                ioc->retry_count++;
 382                if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
 383                        bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
 384                                      BFI_IOC_UNINIT);
 385                        bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
 386                        break;
 387                }
 388
 389                bfa_ioc_hw_sem_release(ioc);
 390                bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
 391                break;
 392
 393        case IOC_E_DISABLE:
 394                bfa_ioc_timer_stop(ioc);
 395                bfa_ioc_hw_sem_release(ioc);
 396                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 397                break;
 398
 399        case IOC_E_FWREADY:
 400                bfa_ioc_send_enable(ioc);
 401                break;
 402
 403        default:
 404                bfa_sm_fault(ioc, event);
 405        }
 406}
 407
 408
 409static void
 410bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
 411{
 412        bfa_ioc_timer_start(ioc);
 413        bfa_ioc_send_getattr(ioc);
 414}
 415
 416/**
 417 * IOC configuration in progress. Timer is active.
 418 */
 419static void
 420bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
 421{
 422        bfa_trc(ioc, event);
 423
 424        switch (event) {
 425        case IOC_E_FWRSP_GETATTR:
 426                bfa_ioc_timer_stop(ioc);
 427                bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 428                break;
 429
 430        case IOC_E_HWERROR:
 431                bfa_ioc_timer_stop(ioc);
 432                /*
 433                 * fall through
 434                 */
 435
 436        case IOC_E_TIMEOUT:
 437                bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
 438                break;
 439
 440        case IOC_E_DISABLE:
 441                bfa_ioc_timer_stop(ioc);
 442                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 443                break;
 444
 445        default:
 446                bfa_sm_fault(ioc, event);
 447        }
 448}
 449
 450
 451static void
 452bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
 453{
 454        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 455        bfa_ioc_hb_monitor(ioc);
 456        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
 457}
 458
 459static void
 460bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
 461{
 462        bfa_trc(ioc, event);
 463
 464        switch (event) {
 465        case IOC_E_ENABLE:
 466                break;
 467
 468        case IOC_E_DISABLE:
 469                bfa_ioc_hb_stop(ioc);
 470                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 471                break;
 472
 473        case IOC_E_HWERROR:
 474        case IOC_E_FWREADY:
 475                /**
 476                 * Hard error or IOC recovery by other function.
 477                 * Treat it same as heartbeat failure.
 478                 */
 479                bfa_ioc_hb_stop(ioc);
 480                /*
 481                 * !!! fall through !!!
 482                 */
 483
 484        case IOC_E_HBFAIL:
 485                bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
 486                break;
 487
 488        default:
 489                bfa_sm_fault(ioc, event);
 490        }
 491}
 492
 493
 494static void
 495bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
 496{
 497        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
 498        bfa_ioc_timer_start(ioc);
 499        bfa_ioc_send_disable(ioc);
 500}
 501
 502/**
 503 * IOC is being disabled
 504 */
 505static void
 506bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 507{
 508        bfa_trc(ioc, event);
 509
 510        switch (event) {
 511        case IOC_E_HWERROR:
 512        case IOC_E_FWRSP_DISABLE:
 513                bfa_ioc_timer_stop(ioc);
 514                /*
 515                 * !!! fall through !!!
 516                 */
 517
 518        case IOC_E_TIMEOUT:
 519                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 520                break;
 521
 522        default:
 523                bfa_sm_fault(ioc, event);
 524        }
 525}
 526
 527/**
 528 * IOC disable completion entry.
 529 */
 530static void
 531bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
 532{
 533        bfa_ioc_disable_comp(ioc);
 534}
 535
 536static void
 537bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
 538{
 539        bfa_trc(ioc, event);
 540
 541        switch (event) {
 542        case IOC_E_ENABLE:
 543                bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
 544                break;
 545
 546        case IOC_E_DISABLE:
 547                ioc->cbfn->disable_cbfn(ioc->bfa);
 548                break;
 549
 550        case IOC_E_FWREADY:
 551                break;
 552
 553        case IOC_E_DETACH:
 554                bfa_ioc_firmware_unlock(ioc);
 555                bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
 556                break;
 557
 558        default:
 559                bfa_sm_fault(ioc, event);
 560        }
 561}
 562
 563
 564static void
 565bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
 566{
 567        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 568        bfa_ioc_timer_start(ioc);
 569}
 570
 571/**
 572 * Hardware initialization failed.
 573 */
 574static void
 575bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
 576{
 577        bfa_trc(ioc, event);
 578
 579        switch (event) {
 580        case IOC_E_DISABLE:
 581                bfa_ioc_timer_stop(ioc);
 582                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 583                break;
 584
 585        case IOC_E_DETACH:
 586                bfa_ioc_timer_stop(ioc);
 587                bfa_ioc_firmware_unlock(ioc);
 588                bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
 589                break;
 590
 591        case IOC_E_TIMEOUT:
 592                bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
 593                break;
 594
 595        default:
 596                bfa_sm_fault(ioc, event);
 597        }
 598}
 599
 600
 601static void
 602bfa_ioc_sm_hbfail_entry(struct bfa_ioc_s *ioc)
 603{
 604        struct list_head *qe;
 605        struct bfa_ioc_hbfail_notify_s *notify;
 606
 607        /**
 608         * Mark IOC as failed in hardware and stop firmware.
 609         */
 610        bfa_ioc_lpu_stop(ioc);
 611        bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_HBFAIL);
 612
 613        if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
 614                bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
 615                /*
 616                 * Wait for halt to take effect
 617                 */
 618                bfa_reg_read(ioc->ioc_regs.ll_halt);
 619        }
 620
 621        /**
 622         * Notify driver and common modules registered for notification.
 623         */
 624        ioc->cbfn->hbfail_cbfn(ioc->bfa);
 625        list_for_each(qe, &ioc->hb_notify_q) {
 626                notify = (struct bfa_ioc_hbfail_notify_s *)qe;
 627                notify->cbfn(notify->cbarg);
 628        }
 629
 630        /**
 631         * Flush any queued up mailbox requests.
 632         */
 633        bfa_ioc_mbox_hbfail(ioc);
 634        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
 635
 636        /**
 637         * Trigger auto-recovery after a delay.
 638         */
 639        if (ioc->auto_recover) {
 640                bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer,
 641                                bfa_ioc_timeout, ioc, BFA_IOC_TOV_RECOVER);
 642        }
 643}
 644
 645/**
 646 * IOC heartbeat failure.
 647 */
 648static void
 649bfa_ioc_sm_hbfail(struct bfa_ioc_s *ioc, enum ioc_event event)
 650{
 651        bfa_trc(ioc, event);
 652
 653        switch (event) {
 654
 655        case IOC_E_ENABLE:
 656                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 657                break;
 658
 659        case IOC_E_DISABLE:
 660                if (ioc->auto_recover)
 661                        bfa_ioc_timer_stop(ioc);
 662                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 663                break;
 664
 665        case IOC_E_TIMEOUT:
 666                bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
 667                break;
 668
 669        case IOC_E_FWREADY:
 670                /**
 671                 * Recovery is already initiated by other function.
 672                 */
 673                break;
 674
 675        default:
 676                bfa_sm_fault(ioc, event);
 677        }
 678}
 679
 680
 681
 682/**
 683 *  bfa_ioc_pvt BFA IOC private functions
 684 */
 685
 686static void
 687bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
 688{
 689        struct list_head *qe;
 690        struct bfa_ioc_hbfail_notify_s *notify;
 691
 692        ioc->cbfn->disable_cbfn(ioc->bfa);
 693
 694        /**
 695         * Notify common modules registered for notification.
 696         */
 697        list_for_each(qe, &ioc->hb_notify_q) {
 698                notify = (struct bfa_ioc_hbfail_notify_s *)qe;
 699                notify->cbfn(notify->cbarg);
 700        }
 701}
 702
 703static void
 704bfa_ioc_sem_timeout(void *ioc_arg)
 705{
 706        struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
 707
 708        bfa_ioc_hw_sem_get(ioc);
 709}
 710
 711static void
 712bfa_ioc_usage_sem_get(struct bfa_ioc_s *ioc)
 713{
 714        u32        r32;
 715        int             cnt = 0;
 716#define BFA_SEM_SPINCNT 1000
 717
 718        do {
 719                r32 = bfa_reg_read(ioc->ioc_regs.ioc_usage_sem_reg);
 720                cnt++;
 721                if (cnt > BFA_SEM_SPINCNT)
 722                        break;
 723        } while (r32 != 0);
 724        bfa_assert(cnt < BFA_SEM_SPINCNT);
 725}
 726
 727static void
 728bfa_ioc_usage_sem_release(struct bfa_ioc_s *ioc)
 729{
 730        bfa_reg_write(ioc->ioc_regs.ioc_usage_sem_reg, 1);
 731}
 732
 733static void
 734bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
 735{
 736        u32        r32;
 737
 738        /**
 739         * First read to the semaphore register will return 0, subsequent reads
 740         * will return 1. Semaphore is released by writing 0 to the register
 741         */
 742        r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
 743        if (r32 == 0) {
 744                bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
 745                return;
 746        }
 747
 748        bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
 749                        ioc, BFA_IOC_TOV);
 750}
 751
 752static void
 753bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
 754{
 755        bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
 756}
 757
 758static void
 759bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
 760{
 761        bfa_timer_stop(&ioc->sem_timer);
 762}
 763
 764/**
 765 * Initialize LPU local memory (aka secondary memory / SRAM)
 766 */
 767static void
 768bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
 769{
 770        u32        pss_ctl;
 771        int             i;
 772#define PSS_LMEM_INIT_TIME  10000
 773
 774        pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
 775        pss_ctl &= ~__PSS_LMEM_RESET;
 776        pss_ctl |= __PSS_LMEM_INIT_EN;
 777        pss_ctl |= __PSS_I2C_CLK_DIV(3UL); /* i2c workaround 12.5khz clock */
 778        bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
 779
 780        /**
 781         * wait for memory initialization to be complete
 782         */
 783        i = 0;
 784        do {
 785                pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
 786                i++;
 787        } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
 788
 789        /**
 790         * If memory initialization is not successful, IOC timeout will catch
 791         * such failures.
 792         */
 793        bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
 794        bfa_trc(ioc, pss_ctl);
 795
 796        pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
 797        bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
 798}
 799
 800static void
 801bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
 802{
 803        u32        pss_ctl;
 804
 805        /**
 806         * Take processor out of reset.
 807         */
 808        pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
 809        pss_ctl &= ~__PSS_LPU0_RESET;
 810
 811        bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
 812}
 813
 814static void
 815bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
 816{
 817        u32        pss_ctl;
 818
 819        /**
 820         * Put processors in reset.
 821         */
 822        pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
 823        pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
 824
 825        bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
 826}
 827
 828/**
 829 * Get driver and firmware versions.
 830 */
 831static void
 832bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
 833{
 834        u32        pgnum, pgoff;
 835        u32        loff = 0;
 836        int             i;
 837        u32       *fwsig = (u32 *) fwhdr;
 838
 839        pgnum = bfa_ioc_smem_pgnum(ioc, loff);
 840        pgoff = bfa_ioc_smem_pgoff(ioc, loff);
 841        bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
 842
 843        for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
 844             i++) {
 845                fwsig[i] = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
 846                loff += sizeof(u32);
 847        }
 848}
 849
 850static u32 *
 851bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
 852{
 853        if (ioc->ctdev)
 854                return bfi_image_ct_get_chunk(off);
 855        return bfi_image_cb_get_chunk(off);
 856}
 857
 858static          u32
 859bfa_ioc_fwimg_get_size(struct bfa_ioc_s *ioc)
 860{
 861return (ioc->ctdev) ? bfi_image_ct_size : bfi_image_cb_size;
 862}
 863
 864/**
 865 * Returns TRUE if same.
 866 */
 867static          bfa_boolean_t
 868bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
 869{
 870        struct bfi_ioc_image_hdr_s *drv_fwhdr;
 871        int             i;
 872
 873        drv_fwhdr =
 874                (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
 875
 876        for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
 877                if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
 878                        bfa_trc(ioc, i);
 879                        bfa_trc(ioc, fwhdr->md5sum[i]);
 880                        bfa_trc(ioc, drv_fwhdr->md5sum[i]);
 881                        return BFA_FALSE;
 882                }
 883        }
 884
 885        bfa_trc(ioc, fwhdr->md5sum[0]);
 886        return BFA_TRUE;
 887}
 888
 889/**
 890 * Return true if current running version is valid. Firmware signature and
 891 * execution context (driver/bios) must match.
 892 */
 893static          bfa_boolean_t
 894bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc)
 895{
 896        struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
 897
 898        /**
 899         * If bios/efi boot (flash based) -- return true
 900         */
 901        if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
 902                return BFA_TRUE;
 903
 904        bfa_ioc_fwver_get(ioc, &fwhdr);
 905        drv_fwhdr =
 906                (struct bfi_ioc_image_hdr_s *)bfa_ioc_fwimg_get_chunk(ioc, 0);
 907
 908        if (fwhdr.signature != drv_fwhdr->signature) {
 909                bfa_trc(ioc, fwhdr.signature);
 910                bfa_trc(ioc, drv_fwhdr->signature);
 911                return BFA_FALSE;
 912        }
 913
 914        if (fwhdr.exec != drv_fwhdr->exec) {
 915                bfa_trc(ioc, fwhdr.exec);
 916                bfa_trc(ioc, drv_fwhdr->exec);
 917                return BFA_FALSE;
 918        }
 919
 920        return bfa_ioc_fwver_cmp(ioc, &fwhdr);
 921}
 922
 923/**
 924 * Return true if firmware of current driver matches the running firmware.
 925 */
 926static          bfa_boolean_t
 927bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc)
 928{
 929        enum bfi_ioc_state ioc_fwstate;
 930        u32        usecnt;
 931        struct bfi_ioc_image_hdr_s fwhdr;
 932
 933        /**
 934         * Firmware match check is relevant only for CNA.
 935         */
 936        if (!ioc->cna)
 937                return BFA_TRUE;
 938
 939        /**
 940         * If bios boot (flash based) -- do not increment usage count
 941         */
 942        if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
 943                return BFA_TRUE;
 944
 945        bfa_ioc_usage_sem_get(ioc);
 946        usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
 947
 948        /**
 949         * If usage count is 0, always return TRUE.
 950         */
 951        if (usecnt == 0) {
 952                bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
 953                bfa_ioc_usage_sem_release(ioc);
 954                bfa_trc(ioc, usecnt);
 955                return BFA_TRUE;
 956        }
 957
 958        ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
 959        bfa_trc(ioc, ioc_fwstate);
 960
 961        /**
 962         * Use count cannot be non-zero and chip in uninitialized state.
 963         */
 964        bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
 965
 966        /**
 967         * Check if another driver with a different firmware is active
 968         */
 969        bfa_ioc_fwver_get(ioc, &fwhdr);
 970        if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
 971                bfa_ioc_usage_sem_release(ioc);
 972                bfa_trc(ioc, usecnt);
 973                return BFA_FALSE;
 974        }
 975
 976        /**
 977         * Same firmware version. Increment the reference count.
 978         */
 979        usecnt++;
 980        bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
 981        bfa_ioc_usage_sem_release(ioc);
 982        bfa_trc(ioc, usecnt);
 983        return BFA_TRUE;
 984}
 985
 986static void
 987bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc)
 988{
 989        u32        usecnt;
 990
 991        /**
 992         * Firmware lock is relevant only for CNA.
 993         * If bios boot (flash based) -- do not decrement usage count
 994         */
 995        if (!ioc->cna || (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ))
 996                return;
 997
 998        /**
 999         * decrement usage count
1000         */
1001        bfa_ioc_usage_sem_get(ioc);
1002        usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
1003        bfa_assert(usecnt > 0);
1004
1005        usecnt--;
1006        bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
1007        bfa_trc(ioc, usecnt);
1008
1009        bfa_ioc_usage_sem_release(ioc);
1010}
1011
1012/**
1013 * Conditionally flush any pending message from firmware at start.
1014 */
1015static void
1016bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1017{
1018        u32        r32;
1019
1020        r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
1021        if (r32)
1022                bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
1023}
1024
1025
1026static void
1027bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1028{
1029        enum bfi_ioc_state ioc_fwstate;
1030        bfa_boolean_t   fwvalid;
1031
1032        ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
1033
1034        if (force)
1035                ioc_fwstate = BFI_IOC_UNINIT;
1036
1037        bfa_trc(ioc, ioc_fwstate);
1038
1039        /**
1040         * check if firmware is valid
1041         */
1042        fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1043                        BFA_FALSE : bfa_ioc_fwver_valid(ioc);
1044
1045        if (!fwvalid) {
1046                bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1047                return;
1048        }
1049
1050        /**
1051         * If hardware initialization is in progress (initialized by other IOC),
1052         * just wait for an initialization completion interrupt.
1053         */
1054        if (ioc_fwstate == BFI_IOC_INITING) {
1055                bfa_trc(ioc, ioc_fwstate);
1056                ioc->cbfn->reset_cbfn(ioc->bfa);
1057                return;
1058        }
1059
1060        /**
1061         * If IOC function is disabled and firmware version is same,
1062         * just re-enable IOC.
1063         */
1064        if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1065                bfa_trc(ioc, ioc_fwstate);
1066
1067                /**
1068                 * When using MSI-X any pending firmware ready event should
1069                 * be flushed. Otherwise MSI-X interrupts are not delivered.
1070                 */
1071                bfa_ioc_msgflush(ioc);
1072                ioc->cbfn->reset_cbfn(ioc->bfa);
1073                bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1074                return;
1075        }
1076
1077        /**
1078         * Initialize the h/w for any other states.
1079         */
1080        bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1081}
1082
1083static void
1084bfa_ioc_timeout(void *ioc_arg)
1085{
1086        struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
1087
1088        bfa_trc(ioc, 0);
1089        bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1090}
1091
1092void
1093bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1094{
1095        u32       *msgp = (u32 *) ioc_msg;
1096        u32        i;
1097
1098        bfa_trc(ioc, msgp[0]);
1099        bfa_trc(ioc, len);
1100
1101        bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
1102
1103        /*
1104         * first write msg to mailbox registers
1105         */
1106        for (i = 0; i < len / sizeof(u32); i++)
1107                bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
1108                              bfa_os_wtole(msgp[i]));
1109
1110        for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1111                bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
1112
1113        /*
1114         * write 1 to mailbox CMD to trigger LPU event
1115         */
1116        bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
1117        (void)bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1118}
1119
1120static void
1121bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1122{
1123        struct bfi_ioc_ctrl_req_s enable_req;
1124
1125        bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1126                    bfa_ioc_portid(ioc));
1127        enable_req.ioc_class = ioc->ioc_mc;
1128        bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1129}
1130
1131static void
1132bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1133{
1134        struct bfi_ioc_ctrl_req_s disable_req;
1135
1136        bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1137                    bfa_ioc_portid(ioc));
1138        bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1139}
1140
1141static void
1142bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1143{
1144        struct bfi_ioc_getattr_req_s attr_req;
1145
1146        bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1147                    bfa_ioc_portid(ioc));
1148        bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1149        bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1150}
1151
1152static void
1153bfa_ioc_hb_check(void *cbarg)
1154{
1155        struct bfa_ioc_s *ioc = cbarg;
1156        u32        hb_count;
1157
1158        hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1159        if (ioc->hb_count == hb_count) {
1160                ioc->hb_fail++;
1161        } else {
1162                ioc->hb_count = hb_count;
1163                ioc->hb_fail = 0;
1164        }
1165
1166        if (ioc->hb_fail >= BFA_IOC_HB_FAIL_MAX) {
1167                bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count);
1168                ioc->hb_fail = 0;
1169                bfa_ioc_recover(ioc);
1170                return;
1171        }
1172
1173        bfa_ioc_mbox_poll(ioc);
1174        bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
1175                        BFA_IOC_HB_TOV);
1176}
1177
1178static void
1179bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1180{
1181        ioc->hb_fail = 0;
1182        ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1183        bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
1184                        BFA_IOC_HB_TOV);
1185}
1186
1187static void
1188bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1189{
1190        bfa_timer_stop(&ioc->ioc_timer);
1191}
1192
1193/**
1194 * Host to LPU mailbox message addresses
1195 */
1196static struct {
1197        u32        hfn_mbox, lpu_mbox, hfn_pgn;
1198} iocreg_fnreg[] = {
1199        {
1200        HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0}, {
1201        HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1}, {
1202        HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2}, {
1203        HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3}
1204};
1205
1206/**
1207 * Host <-> LPU mailbox command/status registers - port 0
1208 */
1209static struct {
1210        u32        hfn, lpu;
1211} iocreg_mbcmd_p0[] = {
1212        {
1213        HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT}, {
1214        HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT}, {
1215        HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT}, {
1216        HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT}
1217};
1218
1219/**
1220 * Host <-> LPU mailbox command/status registers - port 1
1221 */
1222static struct {
1223        u32        hfn, lpu;
1224} iocreg_mbcmd_p1[] = {
1225        {
1226        HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT}, {
1227        HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT}, {
1228        HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT}, {
1229        HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT}
1230};
1231
1232/**
1233 * Shared IRQ handling in INTX mode
1234 */
1235static struct {
1236        u32        isr, msk;
1237} iocreg_shirq_next[] = {
1238        {
1239        HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, {
1240        HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, {
1241        HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK}, {
1242HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK},};
1243
1244static void
1245bfa_ioc_reg_init(struct bfa_ioc_s *ioc)
1246{
1247        bfa_os_addr_t   rb;
1248        int             pcifn = bfa_ioc_pcifn(ioc);
1249
1250        rb = bfa_ioc_bar0(ioc);
1251
1252        ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
1253        ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
1254        ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
1255
1256        if (ioc->port_id == 0) {
1257                ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
1258                ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
1259                ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
1260                ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
1261                ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
1262        } else {
1263                ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
1264                ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
1265                ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
1266                ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
1267                ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
1268        }
1269
1270        /**
1271         * Shared IRQ handling in INTX mode
1272         */
1273        ioc->ioc_regs.shirq_isr_next = rb + iocreg_shirq_next[pcifn].isr;
1274        ioc->ioc_regs.shirq_msk_next = rb + iocreg_shirq_next[pcifn].msk;
1275
1276        /*
1277         * PSS control registers
1278         */
1279        ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
1280        ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
1281        ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
1282
1283        /*
1284         * IOC semaphore registers and serialization
1285         */
1286        ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
1287        ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
1288        ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
1289
1290        /**
1291         * sram memory access
1292         */
1293        ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
1294        ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
1295        if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT)
1296                ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
1297}
1298
1299/**
1300 *      Initiate a full firmware download.
1301 */
1302static void
1303bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1304                    u32 boot_param)
1305{
1306        u32       *fwimg;
1307        u32        pgnum, pgoff;
1308        u32        loff = 0;
1309        u32        chunkno = 0;
1310        u32        i;
1311
1312        /**
1313         * Initialize LMEM first before code download
1314         */
1315        bfa_ioc_lmem_init(ioc);
1316
1317        /**
1318         * Flash based firmware boot
1319         */
1320        bfa_trc(ioc, bfa_ioc_fwimg_get_size(ioc));
1321        if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
1322                boot_type = BFI_BOOT_TYPE_FLASH;
1323        fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno);
1324        fwimg[BFI_BOOT_TYPE_OFF / sizeof(u32)] = bfa_os_swap32(boot_type);
1325        fwimg[BFI_BOOT_PARAM_OFF / sizeof(u32)] =
1326                bfa_os_swap32(boot_param);
1327
1328        pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1329        pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1330
1331        bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1332
1333        for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) {
1334
1335                if (BFA_FLASH_CHUNK_NO(i) != chunkno) {
1336                        chunkno = BFA_FLASH_CHUNK_NO(i);
1337                        fwimg = bfa_ioc_fwimg_get_chunk(ioc,
1338                                        BFA_FLASH_CHUNK_ADDR(chunkno));
1339                }
1340
1341                /**
1342                 * write smem
1343                 */
1344                bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1345                              fwimg[BFA_FLASH_OFFSET_IN_CHUNK(i)]);
1346
1347                loff += sizeof(u32);
1348
1349                /**
1350                 * handle page offset wrap around
1351                 */
1352                loff = PSS_SMEM_PGOFF(loff);
1353                if (loff == 0) {
1354                        pgnum++;
1355                        bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1356                }
1357        }
1358
1359        bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1360                      bfa_ioc_smem_pgnum(ioc, 0));
1361}
1362
1363static void
1364bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1365{
1366        bfa_ioc_hwinit(ioc, force);
1367}
1368
1369/**
1370 * Update BFA configuration from firmware configuration.
1371 */
1372static void
1373bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1374{
1375        struct bfi_ioc_attr_s *attr = ioc->attr;
1376
1377        attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
1378        attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
1379
1380        bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1381}
1382
1383/**
1384 * Attach time initialization of mbox logic.
1385 */
1386static void
1387bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1388{
1389        struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1390        int             mc;
1391
1392        INIT_LIST_HEAD(&mod->cmd_q);
1393        for (mc = 0; mc < BFI_MC_MAX; mc++) {
1394                mod->mbhdlr[mc].cbfn = NULL;
1395                mod->mbhdlr[mc].cbarg = ioc->bfa;
1396        }
1397}
1398
1399/**
1400 * Mbox poll timer -- restarts any pending mailbox requests.
1401 */
1402static void
1403bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1404{
1405        struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1406        struct bfa_mbox_cmd_s *cmd;
1407        u32        stat;
1408
1409        /**
1410         * If no command pending, do nothing
1411         */
1412        if (list_empty(&mod->cmd_q))
1413                return;
1414
1415        /**
1416         * If previous command is not yet fetched by firmware, do nothing
1417         */
1418        stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1419        if (stat)
1420                return;
1421
1422        /**
1423         * Enqueue command to firmware.
1424         */
1425        bfa_q_deq(&mod->cmd_q, &cmd);
1426        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1427}
1428
1429/**
1430 * Cleanup any pending requests.
1431 */
1432static void
1433bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1434{
1435        struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1436        struct bfa_mbox_cmd_s *cmd;
1437
1438        while (!list_empty(&mod->cmd_q))
1439                bfa_q_deq(&mod->cmd_q, &cmd);
1440}
1441
1442/**
1443 * Initialize IOC to port mapping.
1444 */
1445
1446#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
1447static void
1448bfa_ioc_map_port(struct bfa_ioc_s *ioc)
1449{
1450        bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
1451        u32        r32;
1452
1453        /**
1454         * For crossbow, port id is same as pci function.
1455         */
1456        if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT) {
1457                ioc->port_id = bfa_ioc_pcifn(ioc);
1458                return;
1459        }
1460
1461        /**
1462         * For catapult, base port id on personality register and IOC type
1463         */
1464        r32 = bfa_reg_read(rb + FNC_PERS_REG);
1465        r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
1466        ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
1467
1468        bfa_trc(ioc, bfa_ioc_pcifn(ioc));
1469        bfa_trc(ioc, ioc->port_id);
1470}
1471
1472
1473
1474/**
1475 *  bfa_ioc_public
1476 */
1477
1478/**
1479* Set interrupt mode for a function: INTX or MSIX
1480 */
1481void
1482bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
1483{
1484        bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
1485        u32        r32, mode;
1486
1487        r32 = bfa_reg_read(rb + FNC_PERS_REG);
1488        bfa_trc(ioc, r32);
1489
1490        mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
1491                __F0_INTX_STATUS;
1492
1493        /**
1494         * If already in desired mode, do not change anything
1495         */
1496        if (!msix && mode)
1497                return;
1498
1499        if (msix)
1500                mode = __F0_INTX_STATUS_MSIX;
1501        else
1502                mode = __F0_INTX_STATUS_INTA;
1503
1504        r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
1505        r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
1506        bfa_trc(ioc, r32);
1507
1508        bfa_reg_write(rb + FNC_PERS_REG, r32);
1509}
1510
1511bfa_status_t
1512bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1513{
1514        bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
1515        u32        pll_sclk, pll_fclk, r32;
1516
1517        if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
1518                pll_sclk =
1519                        __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
1520                        __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) |
1521                        __APP_PLL_312_JITLMT0_1(3U) |
1522                        __APP_PLL_312_CNTLMT0_1(1U);
1523                pll_fclk =
1524                        __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
1525                        __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) |
1526                        __APP_PLL_425_JITLMT0_1(3U) |
1527                        __APP_PLL_425_CNTLMT0_1(1U);
1528
1529                /**
1530                 *      For catapult, choose operational mode FC/FCoE
1531                 */
1532                if (ioc->fcmode) {
1533                        bfa_reg_write((rb + OP_MODE), 0);
1534                        bfa_reg_write((rb + ETH_MAC_SER_REG),
1535                                      __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2
1536                                      | __APP_EMS_CHANNEL_SEL);
1537                } else {
1538                        ioc->pllinit = BFA_TRUE;
1539                        bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
1540                        bfa_reg_write((rb + ETH_MAC_SER_REG),
1541                                      __APP_EMS_REFCKBUFEN1);
1542                }
1543        } else {
1544                pll_sclk =
1545                        __APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
1546                        __APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) |
1547                        __APP_PLL_312_CNTLMT0_1(3U);
1548                pll_fclk =
1549                        __APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
1550                        __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
1551                        __APP_PLL_425_JITLMT0_1(3U) |
1552                        __APP_PLL_425_CNTLMT0_1(3U);
1553        }
1554
1555        bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
1556        bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
1557
1558        bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
1559        bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
1560        bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
1561        bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
1562        bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
1563        bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
1564
1565        bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1566                      __APP_PLL_312_LOGIC_SOFT_RESET);
1567        bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1568                      __APP_PLL_312_BYPASS | __APP_PLL_312_LOGIC_SOFT_RESET);
1569        bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1570                      __APP_PLL_425_LOGIC_SOFT_RESET);
1571        bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1572                      __APP_PLL_425_BYPASS | __APP_PLL_425_LOGIC_SOFT_RESET);
1573        bfa_os_udelay(2);
1574        bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1575                      __APP_PLL_312_LOGIC_SOFT_RESET);
1576        bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1577                      __APP_PLL_425_LOGIC_SOFT_RESET);
1578
1579        bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
1580                      pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET);
1581        bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
1582                      pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET);
1583
1584        /**
1585         * Wait for PLLs to lock.
1586         */
1587        bfa_os_udelay(2000);
1588        bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
1589        bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
1590
1591        bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
1592        bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
1593
1594        if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
1595                bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
1596                bfa_os_udelay(1000);
1597                r32 = bfa_reg_read((rb + MBIST_STAT_REG));
1598                bfa_trc(ioc, r32);
1599        }
1600
1601        return BFA_STATUS_OK;
1602}
1603
1604/**
1605 * Interface used by diag module to do firmware boot with memory test
1606 * as the entry vector.
1607 */
1608void
1609bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param)
1610{
1611        bfa_os_addr_t   rb;
1612
1613        bfa_ioc_stats(ioc, ioc_boots);
1614
1615        if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1616                return;
1617
1618        /**
1619         * Initialize IOC state of all functions on a chip reset.
1620         */
1621        rb = ioc->pcidev.pci_bar_kva;
1622        if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1623                bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
1624                bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
1625        } else {
1626                bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
1627                bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
1628        }
1629
1630        bfa_ioc_download_fw(ioc, boot_type, boot_param);
1631
1632        /**
1633         * Enable interrupts just before starting LPU
1634         */
1635        ioc->cbfn->reset_cbfn(ioc->bfa);
1636        bfa_ioc_lpu_start(ioc);
1637}
1638
1639/**
1640 * Enable/disable IOC failure auto recovery.
1641 */
1642void
1643bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1644{
1645        bfa_auto_recover = BFA_FALSE;
1646}
1647
1648
1649bfa_boolean_t
1650bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1651{
1652        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1653}
1654
1655void
1656bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1657{
1658        u32       *msgp = mbmsg;
1659        u32        r32;
1660        int             i;
1661
1662        /**
1663         * read the MBOX msg
1664         */
1665        for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1666             i++) {
1667                r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
1668                                   i * sizeof(u32));
1669                msgp[i] = bfa_os_htonl(r32);
1670        }
1671
1672        /**
1673         * turn off mailbox interrupt by clearing mailbox status
1674         */
1675        bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
1676        bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
1677}
1678
1679void
1680bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1681{
1682        union bfi_ioc_i2h_msg_u *msg;
1683
1684        msg = (union bfi_ioc_i2h_msg_u *)m;
1685
1686        bfa_ioc_stats(ioc, ioc_isrs);
1687
1688        switch (msg->mh.msg_id) {
1689        case BFI_IOC_I2H_HBEAT:
1690                break;
1691
1692        case BFI_IOC_I2H_READY_EVENT:
1693                bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1694                break;
1695
1696        case BFI_IOC_I2H_ENABLE_REPLY:
1697                bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1698                break;
1699
1700        case BFI_IOC_I2H_DISABLE_REPLY:
1701                bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1702                break;
1703
1704        case BFI_IOC_I2H_GETATTR_REPLY:
1705                bfa_ioc_getattr_reply(ioc);
1706                break;
1707
1708        default:
1709                bfa_trc(ioc, msg->mh.msg_id);
1710                bfa_assert(0);
1711        }
1712}
1713
1714/**
1715 * IOC attach time initialization and setup.
1716 *
1717 * @param[in]   ioc     memory for IOC
1718 * @param[in]   bfa     driver instance structure
1719 * @param[in]   trcmod  kernel trace module
1720 * @param[in]   aen     kernel aen event module
1721 * @param[in]   logm    kernel logging module
1722 */
1723void
1724bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1725               struct bfa_timer_mod_s *timer_mod, struct bfa_trc_mod_s *trcmod,
1726               struct bfa_aen_s *aen, struct bfa_log_mod_s *logm)
1727{
1728        ioc->bfa = bfa;
1729        ioc->cbfn = cbfn;
1730        ioc->timer_mod = timer_mod;
1731        ioc->trcmod = trcmod;
1732        ioc->aen = aen;
1733        ioc->logm = logm;
1734        ioc->fcmode = BFA_FALSE;
1735        ioc->pllinit = BFA_FALSE;
1736        ioc->dbg_fwsave_once = BFA_TRUE;
1737
1738        bfa_ioc_mbox_attach(ioc);
1739        INIT_LIST_HEAD(&ioc->hb_notify_q);
1740
1741        bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1742}
1743
1744/**
1745 * Driver detach time IOC cleanup.
1746 */
1747void
1748bfa_ioc_detach(struct bfa_ioc_s *ioc)
1749{
1750        bfa_fsm_send_event(ioc, IOC_E_DETACH);
1751}
1752
1753/**
1754 * Setup IOC PCI properties.
1755 *
1756 * @param[in]   pcidev  PCI device information for this IOC
1757 */
1758void
1759bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
1760                 enum bfi_mclass mc)
1761{
1762        ioc->ioc_mc = mc;
1763        ioc->pcidev = *pcidev;
1764        ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT);
1765        ioc->cna = ioc->ctdev && !ioc->fcmode;
1766
1767        bfa_ioc_map_port(ioc);
1768        bfa_ioc_reg_init(ioc);
1769}
1770
1771/**
1772 * Initialize IOC dma memory
1773 *
1774 * @param[in]   dm_kva  kernel virtual address of IOC dma memory
1775 * @param[in]   dm_pa   physical address of IOC dma memory
1776 */
1777void
1778bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
1779{
1780        /**
1781         * dma memory for firmware attribute
1782         */
1783        ioc->attr_dma.kva = dm_kva;
1784        ioc->attr_dma.pa = dm_pa;
1785        ioc->attr = (struct bfi_ioc_attr_s *)dm_kva;
1786}
1787
1788/**
1789 * Return size of dma memory required.
1790 */
1791u32
1792bfa_ioc_meminfo(void)
1793{
1794        return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
1795}
1796
1797void
1798bfa_ioc_enable(struct bfa_ioc_s *ioc)
1799{
1800        bfa_ioc_stats(ioc, ioc_enables);
1801        ioc->dbg_fwsave_once = BFA_TRUE;
1802
1803        bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1804}
1805
1806void
1807bfa_ioc_disable(struct bfa_ioc_s *ioc)
1808{
1809        bfa_ioc_stats(ioc, ioc_disables);
1810        bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1811}
1812
1813/**
1814 * Returns memory required for saving firmware trace in case of crash.
1815 * Driver must call this interface to allocate memory required for
1816 * automatic saving of firmware trace. Driver should call
1817 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
1818 * trace memory.
1819 */
1820int
1821bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
1822{
1823return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1824}
1825
1826/**
1827 * Initialize memory for saving firmware trace. Driver must initialize
1828 * trace memory before call bfa_ioc_enable().
1829 */
1830void
1831bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
1832{
1833        bfa_assert(ioc->auto_recover);
1834        ioc->dbg_fwsave = dbg_fwsave;
1835        ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
1836}
1837
1838u32
1839bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
1840{
1841        return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1842}
1843
1844u32
1845bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
1846{
1847        return PSS_SMEM_PGOFF(fmaddr);
1848}
1849
1850/**
1851 * Register mailbox message handler functions
1852 *
1853 * @param[in]   ioc             IOC instance
1854 * @param[in]   mcfuncs         message class handler functions
1855 */
1856void
1857bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
1858{
1859        struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1860        int             mc;
1861
1862        for (mc = 0; mc < BFI_MC_MAX; mc++)
1863                mod->mbhdlr[mc].cbfn = mcfuncs[mc];
1864}
1865
1866/**
1867 * Register mailbox message handler function, to be called by common modules
1868 */
1869void
1870bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
1871                    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1872{
1873        struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1874
1875        mod->mbhdlr[mc].cbfn = cbfn;
1876        mod->mbhdlr[mc].cbarg = cbarg;
1877}
1878
1879/**
1880 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1881 * Responsibility of caller to serialize
1882 *
1883 * @param[in]   ioc     IOC instance
1884 * @param[i]    cmd     Mailbox command
1885 */
1886void
1887bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
1888{
1889        struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1890        u32        stat;
1891
1892        /**
1893         * If a previous command is pending, queue new command
1894         */
1895        if (!list_empty(&mod->cmd_q)) {
1896                list_add_tail(&cmd->qe, &mod->cmd_q);
1897                return;
1898        }
1899
1900        /**
1901         * If mailbox is busy, queue command for poll timer
1902         */
1903        stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1904        if (stat) {
1905                list_add_tail(&cmd->qe, &mod->cmd_q);
1906                return;
1907        }
1908
1909        /**
1910         * mailbox is free -- queue command to firmware
1911         */
1912        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1913}
1914
1915/**
1916 * Handle mailbox interrupts
1917 */
1918void
1919bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
1920{
1921        struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1922        struct bfi_mbmsg_s m;
1923        int             mc;
1924
1925        bfa_ioc_msgget(ioc, &m);
1926
1927        /**
1928         * Treat IOC message class as special.
1929         */
1930        mc = m.mh.msg_class;
1931        if (mc == BFI_MC_IOC) {
1932                bfa_ioc_isr(ioc, &m);
1933                return;
1934        }
1935
1936        if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1937                return;
1938
1939        mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1940}
1941
1942void
1943bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
1944{
1945        bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1946}
1947
1948#ifndef BFA_BIOS_BUILD
1949
1950/**
1951 * return true if IOC is disabled
1952 */
1953bfa_boolean_t
1954bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
1955{
1956        return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling)
1957                || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled));
1958}
1959
1960/**
1961 * return true if IOC firmware is different.
1962 */
1963bfa_boolean_t
1964bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
1965{
1966        return (bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset)
1967                || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck)
1968                || bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch));
1969}
1970
1971#define bfa_ioc_state_disabled(__sm)            \
1972        (((__sm) == BFI_IOC_UNINIT) ||          \
1973         ((__sm) == BFI_IOC_INITING) ||         \
1974         ((__sm) == BFI_IOC_HWINIT) ||          \
1975         ((__sm) == BFI_IOC_DISABLED) ||        \
1976         ((__sm) == BFI_IOC_HBFAIL) ||          \
1977         ((__sm) == BFI_IOC_CFG_DISABLED))
1978
1979/**
1980 * Check if adapter is disabled -- both IOCs should be in a disabled
1981 * state.
1982 */
1983bfa_boolean_t
1984bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
1985{
1986        u32        ioc_state;
1987        bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
1988
1989        if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
1990                return BFA_FALSE;
1991
1992        ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
1993        if (!bfa_ioc_state_disabled(ioc_state))
1994                return BFA_FALSE;
1995
1996        ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
1997        if (!bfa_ioc_state_disabled(ioc_state))
1998                return BFA_FALSE;
1999
2000        return BFA_TRUE;
2001}
2002
2003/**
2004 * Add to IOC heartbeat failure notification queue. To be used by common
2005 * modules such as
2006 */
2007void
2008bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
2009                        struct bfa_ioc_hbfail_notify_s *notify)
2010{
2011        list_add_tail(&notify->qe, &ioc->hb_notify_q);
2012}
2013
2014#define BFA_MFG_NAME "Brocade"
2015void
2016bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2017                         struct bfa_adapter_attr_s *ad_attr)
2018{
2019        struct bfi_ioc_attr_s *ioc_attr;
2020        char            model[BFA_ADAPTER_MODEL_NAME_LEN];
2021
2022        ioc_attr = ioc->attr;
2023        bfa_os_memcpy((void *)&ad_attr->serial_num,
2024                      (void *)ioc_attr->brcd_serialnum,
2025                      BFA_ADAPTER_SERIAL_NUM_LEN);
2026
2027        bfa_os_memcpy(&ad_attr->fw_ver, ioc_attr->fw_version, BFA_VERSION_LEN);
2028        bfa_os_memcpy(&ad_attr->optrom_ver, ioc_attr->optrom_version,
2029                      BFA_VERSION_LEN);
2030        bfa_os_memcpy(&ad_attr->manufacturer, BFA_MFG_NAME,
2031                      BFA_ADAPTER_MFG_NAME_LEN);
2032        bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2033                      sizeof(struct bfa_mfg_vpd_s));
2034
2035        ad_attr->nports = BFI_ADAPTER_GETP(NPORTS, ioc_attr->adapter_prop);
2036        ad_attr->max_speed = BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
2037
2038        /**
2039         * model name
2040         */
2041        if (BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop) == 10) {
2042                strcpy(model, "BR-10?0");
2043                model[5] = '0' + ad_attr->nports;
2044        } else {
2045                strcpy(model, "Brocade-??5");
2046                model[8] =
2047                        '0' + BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
2048                model[9] = '0' + ad_attr->nports;
2049        }
2050
2051        if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2052                ad_attr->prototype = 1;
2053        else
2054                ad_attr->prototype = 0;
2055
2056        bfa_os_memcpy(&ad_attr->model, model, BFA_ADAPTER_MODEL_NAME_LEN);
2057        bfa_os_memcpy(&ad_attr->model_descr, &ad_attr->model,
2058                      BFA_ADAPTER_MODEL_NAME_LEN);
2059
2060        ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2061        ad_attr->mac = bfa_ioc_get_mac(ioc);
2062
2063        ad_attr->pcie_gen = ioc_attr->pcie_gen;
2064        ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2065        ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2066        ad_attr->asic_rev = ioc_attr->asic_rev;
2067        ad_attr->hw_ver[0] = 'R';
2068        ad_attr->hw_ver[1] = 'e';
2069        ad_attr->hw_ver[2] = 'v';
2070        ad_attr->hw_ver[3] = '-';
2071        ad_attr->hw_ver[4] = ioc_attr->asic_rev;
2072        ad_attr->hw_ver[5] = '\0';
2073
2074        ad_attr->cna_capable = ioc->cna;
2075}
2076
2077void
2078bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2079{
2080        bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2081
2082        ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2083        ioc_attr->port_id = ioc->port_id;
2084
2085        if (!ioc->ctdev)
2086                ioc_attr->ioc_type = BFA_IOC_TYPE_FC;
2087        else if (ioc->ioc_mc == BFI_MC_IOCFC)
2088                ioc_attr->ioc_type = BFA_IOC_TYPE_FCoE;
2089        else if (ioc->ioc_mc == BFI_MC_LL)
2090                ioc_attr->ioc_type = BFA_IOC_TYPE_LL;
2091
2092        bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2093
2094        ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2095        ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2096        ioc_attr->pci_attr.chip_rev[0] = 'R';
2097        ioc_attr->pci_attr.chip_rev[1] = 'e';
2098        ioc_attr->pci_attr.chip_rev[2] = 'v';
2099        ioc_attr->pci_attr.chip_rev[3] = '-';
2100        ioc_attr->pci_attr.chip_rev[4] = ioc_attr->adapter_attr.asic_rev;
2101        ioc_attr->pci_attr.chip_rev[5] = '\0';
2102}
2103
2104/**
2105 *  hal_wwn_public
2106 */
2107wwn_t
2108bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
2109{
2110        union {
2111                wwn_t           wwn;
2112                u8         byte[sizeof(wwn_t)];
2113        }
2114        w;
2115
2116        w.wwn = ioc->attr->mfg_wwn;
2117
2118        if (bfa_ioc_portid(ioc) == 1)
2119                w.byte[7]++;
2120
2121        return w.wwn;
2122}
2123
2124wwn_t
2125bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
2126{
2127        union {
2128                wwn_t           wwn;
2129                u8         byte[sizeof(wwn_t)];
2130        }
2131        w;
2132
2133        w.wwn = ioc->attr->mfg_wwn;
2134
2135        if (bfa_ioc_portid(ioc) == 1)
2136                w.byte[7]++;
2137
2138        w.byte[0] = 0x20;
2139
2140        return w.wwn;
2141}
2142
2143wwn_t
2144bfa_ioc_get_wwn_naa5(struct bfa_ioc_s *ioc, u16 inst)
2145{
2146        union {
2147                wwn_t           wwn;
2148                u8         byte[sizeof(wwn_t)];
2149        }
2150        w              , w5;
2151
2152        bfa_trc(ioc, inst);
2153
2154        w.wwn = ioc->attr->mfg_wwn;
2155        w5.byte[0] = 0x50 | w.byte[2] >> 4;
2156        w5.byte[1] = w.byte[2] << 4 | w.byte[3] >> 4;
2157        w5.byte[2] = w.byte[3] << 4 | w.byte[4] >> 4;
2158        w5.byte[3] = w.byte[4] << 4 | w.byte[5] >> 4;
2159        w5.byte[4] = w.byte[5] << 4 | w.byte[6] >> 4;
2160        w5.byte[5] = w.byte[6] << 4 | w.byte[7] >> 4;
2161        w5.byte[6] = w.byte[7] << 4 | (inst & 0x0f00) >> 8;
2162        w5.byte[7] = (inst & 0xff);
2163
2164        return w5.wwn;
2165}
2166
2167u64
2168bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
2169{
2170        return ioc->attr->mfg_wwn;
2171}
2172
2173mac_t
2174bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2175{
2176        mac_t           mac;
2177
2178        mac = ioc->attr->mfg_mac;
2179        mac.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2180
2181        return mac;
2182}
2183
2184void
2185bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2186{
2187        ioc->fcmode = BFA_TRUE;
2188        ioc->port_id = bfa_ioc_pcifn(ioc);
2189}
2190
2191bfa_boolean_t
2192bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2193{
2194        return ioc->fcmode || (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT);
2195}
2196
2197/**
2198 * Return true if interrupt should be claimed.
2199 */
2200bfa_boolean_t
2201bfa_ioc_intx_claim(struct bfa_ioc_s *ioc)
2202{
2203        u32        isr, msk;
2204
2205        /**
2206         * Always claim if not catapult.
2207         */
2208        if (!ioc->ctdev)
2209                return BFA_TRUE;
2210
2211        /**
2212         * FALSE if next device is claiming interrupt.
2213         * TRUE if next device is not interrupting or not present.
2214         */
2215        msk = bfa_reg_read(ioc->ioc_regs.shirq_msk_next);
2216        isr = bfa_reg_read(ioc->ioc_regs.shirq_isr_next);
2217        return !(isr & ~msk);
2218}
2219
2220/**
2221 * Send AEN notification
2222 */
2223static void
2224bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2225{
2226        union bfa_aen_data_u aen_data;
2227        struct bfa_log_mod_s *logmod = ioc->logm;
2228        s32         inst_num = 0;
2229        struct bfa_ioc_attr_s ioc_attr;
2230
2231        switch (event) {
2232        case BFA_IOC_AEN_HBGOOD:
2233                bfa_log(logmod, BFA_AEN_IOC_HBGOOD, inst_num);
2234                break;
2235        case BFA_IOC_AEN_HBFAIL:
2236                bfa_log(logmod, BFA_AEN_IOC_HBFAIL, inst_num);
2237                break;
2238        case BFA_IOC_AEN_ENABLE:
2239                bfa_log(logmod, BFA_AEN_IOC_ENABLE, inst_num);
2240                break;
2241        case BFA_IOC_AEN_DISABLE:
2242                bfa_log(logmod, BFA_AEN_IOC_DISABLE, inst_num);
2243                break;
2244        case BFA_IOC_AEN_FWMISMATCH:
2245                bfa_log(logmod, BFA_AEN_IOC_FWMISMATCH, inst_num);
2246                break;
2247        default:
2248                break;
2249        }
2250
2251        memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
2252        memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
2253        bfa_ioc_get_attr(ioc, &ioc_attr);
2254        switch (ioc_attr.ioc_type) {
2255        case BFA_IOC_TYPE_FC:
2256                aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
2257                break;
2258        case BFA_IOC_TYPE_FCoE:
2259                aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
2260                aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2261                break;
2262        case BFA_IOC_TYPE_LL:
2263                aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2264                break;
2265        default:
2266                bfa_assert(ioc_attr.ioc_type == BFA_IOC_TYPE_FC);
2267                break;
2268        }
2269        aen_data.ioc.ioc_type = ioc_attr.ioc_type;
2270}
2271
2272/**
2273 * Retrieve saved firmware trace from a prior IOC failure.
2274 */
2275bfa_status_t
2276bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2277{
2278        int             tlen;
2279
2280        if (ioc->dbg_fwsave_len == 0)
2281                return BFA_STATUS_ENOFSAVE;
2282
2283        tlen = *trclen;
2284        if (tlen > ioc->dbg_fwsave_len)
2285                tlen = ioc->dbg_fwsave_len;
2286
2287        bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
2288        *trclen = tlen;
2289        return BFA_STATUS_OK;
2290}
2291
2292/**
2293 * Retrieve saved firmware trace from a prior IOC failure.
2294 */
2295bfa_status_t
2296bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2297{
2298        u32        pgnum;
2299        u32        loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2300        int             i, tlen;
2301        u32       *tbuf = trcdata, r32;
2302
2303        bfa_trc(ioc, *trclen);
2304
2305        pgnum = bfa_ioc_smem_pgnum(ioc, loff);
2306        loff = bfa_ioc_smem_pgoff(ioc, loff);
2307        bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2308
2309        tlen = *trclen;
2310        if (tlen > BFA_DBG_FWTRC_LEN)
2311                tlen = BFA_DBG_FWTRC_LEN;
2312        tlen /= sizeof(u32);
2313
2314        bfa_trc(ioc, tlen);
2315
2316        for (i = 0; i < tlen; i++) {
2317                r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2318                tbuf[i] = bfa_os_ntohl(r32);
2319                loff += sizeof(u32);
2320
2321                /**
2322                 * handle page offset wrap around
2323                 */
2324                loff = PSS_SMEM_PGOFF(loff);
2325                if (loff == 0) {
2326                        pgnum++;
2327                        bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
2328                }
2329        }
2330        bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
2331                      bfa_ioc_smem_pgnum(ioc, 0));
2332        bfa_trc(ioc, pgnum);
2333
2334        *trclen = tlen * sizeof(u32);
2335        return BFA_STATUS_OK;
2336}
2337
2338/**
2339 * Save firmware trace if configured.
2340 */
2341static void
2342bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2343{
2344        int             tlen;
2345
2346        if (ioc->dbg_fwsave_len) {
2347                tlen = ioc->dbg_fwsave_len;
2348                bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2349        }
2350}
2351
2352/**
2353 * Firmware failure detected. Start recovery actions.
2354 */
2355static void
2356bfa_ioc_recover(struct bfa_ioc_s *ioc)
2357{
2358        if (ioc->dbg_fwsave_once) {
2359                ioc->dbg_fwsave_once = BFA_FALSE;
2360                bfa_ioc_debug_save(ioc);
2361        }
2362
2363        bfa_ioc_stats(ioc, ioc_hbfails);
2364        bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2365}
2366
2367#else
2368
2369static void
2370bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2371{
2372}
2373
2374static void
2375bfa_ioc_recover(struct bfa_ioc_s *ioc)
2376{
2377        bfa_assert(0);
2378}
2379
2380#endif
2381
2382
2383