linux/drivers/scsi/bfa/bfa_core.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
   3 * All rights reserved
   4 * www.brocade.com
   5 *
   6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
   7 *
   8 * This program is free software; you can redistribute it and/or modify it
   9 * under the terms of the GNU General Public License (GPL) Version 2 as
  10 * published by the Free Software Foundation
  11 *
  12 * This program is distributed in the hope that it will be useful, but
  13 * WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  15 * General Public License for more details.
  16 */
  17
  18#include "bfad_drv.h"
  19#include "bfa_modules.h"
  20#include "bfi_reg.h"
  21
  22BFA_TRC_FILE(HAL, CORE);
  23
  24/*
  25 * BFA module list terminated by NULL
  26 */
  27static struct bfa_module_s *hal_mods[] = {
  28        &hal_mod_fcdiag,
  29        &hal_mod_sgpg,
  30        &hal_mod_fcport,
  31        &hal_mod_fcxp,
  32        &hal_mod_lps,
  33        &hal_mod_uf,
  34        &hal_mod_rport,
  35        &hal_mod_fcp,
  36        &hal_mod_dconf,
  37        NULL
  38};
  39
  40/*
  41 * Message handlers for various modules.
  42 */
  43static bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
  44        bfa_isr_unhandled,      /* NONE */
  45        bfa_isr_unhandled,      /* BFI_MC_IOC */
  46        bfa_fcdiag_intr,        /* BFI_MC_DIAG */
  47        bfa_isr_unhandled,      /* BFI_MC_FLASH */
  48        bfa_isr_unhandled,      /* BFI_MC_CEE */
  49        bfa_fcport_isr,         /* BFI_MC_FCPORT */
  50        bfa_isr_unhandled,      /* BFI_MC_IOCFC */
  51        bfa_isr_unhandled,      /* BFI_MC_LL */
  52        bfa_uf_isr,             /* BFI_MC_UF */
  53        bfa_fcxp_isr,           /* BFI_MC_FCXP */
  54        bfa_lps_isr,            /* BFI_MC_LPS */
  55        bfa_rport_isr,          /* BFI_MC_RPORT */
  56        bfa_itn_isr,            /* BFI_MC_ITN */
  57        bfa_isr_unhandled,      /* BFI_MC_IOIM_READ */
  58        bfa_isr_unhandled,      /* BFI_MC_IOIM_WRITE */
  59        bfa_isr_unhandled,      /* BFI_MC_IOIM_IO */
  60        bfa_ioim_isr,           /* BFI_MC_IOIM */
  61        bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
  62        bfa_tskim_isr,          /* BFI_MC_TSKIM */
  63        bfa_isr_unhandled,      /* BFI_MC_SBOOT */
  64        bfa_isr_unhandled,      /* BFI_MC_IPFC */
  65        bfa_isr_unhandled,      /* BFI_MC_PORT */
  66        bfa_isr_unhandled,      /* --------- */
  67        bfa_isr_unhandled,      /* --------- */
  68        bfa_isr_unhandled,      /* --------- */
  69        bfa_isr_unhandled,      /* --------- */
  70        bfa_isr_unhandled,      /* --------- */
  71        bfa_isr_unhandled,      /* --------- */
  72        bfa_isr_unhandled,      /* --------- */
  73        bfa_isr_unhandled,      /* --------- */
  74        bfa_isr_unhandled,      /* --------- */
  75        bfa_isr_unhandled,      /* --------- */
  76};
  77/*
  78 * Message handlers for mailbox command classes
  79 */
  80static bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
  81        NULL,
  82        NULL,           /* BFI_MC_IOC   */
  83        NULL,           /* BFI_MC_DIAG  */
  84        NULL,           /* BFI_MC_FLASH */
  85        NULL,           /* BFI_MC_CEE   */
  86        NULL,           /* BFI_MC_PORT  */
  87        bfa_iocfc_isr,  /* BFI_MC_IOCFC */
  88        NULL,
  89};
  90
  91
  92
  93static void
  94bfa_com_port_attach(struct bfa_s *bfa)
  95{
  96        struct bfa_port_s       *port = &bfa->modules.port;
  97        struct bfa_mem_dma_s    *port_dma = BFA_MEM_PORT_DMA(bfa);
  98
  99        bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
 100        bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
 101}
 102
 103/*
 104 * ablk module attach
 105 */
 106static void
 107bfa_com_ablk_attach(struct bfa_s *bfa)
 108{
 109        struct bfa_ablk_s       *ablk = &bfa->modules.ablk;
 110        struct bfa_mem_dma_s    *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
 111
 112        bfa_ablk_attach(ablk, &bfa->ioc);
 113        bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
 114}
 115
 116static void
 117bfa_com_cee_attach(struct bfa_s *bfa)
 118{
 119        struct bfa_cee_s        *cee = &bfa->modules.cee;
 120        struct bfa_mem_dma_s    *cee_dma = BFA_MEM_CEE_DMA(bfa);
 121
 122        cee->trcmod = bfa->trcmod;
 123        bfa_cee_attach(cee, &bfa->ioc, bfa);
 124        bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
 125}
 126
 127static void
 128bfa_com_sfp_attach(struct bfa_s *bfa)
 129{
 130        struct bfa_sfp_s        *sfp = BFA_SFP_MOD(bfa);
 131        struct bfa_mem_dma_s    *sfp_dma = BFA_MEM_SFP_DMA(bfa);
 132
 133        bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
 134        bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
 135}
 136
 137static void
 138bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
 139{
 140        struct bfa_flash_s      *flash = BFA_FLASH(bfa);
 141        struct bfa_mem_dma_s    *flash_dma = BFA_MEM_FLASH_DMA(bfa);
 142
 143        bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
 144        bfa_flash_memclaim(flash, flash_dma->kva_curp,
 145                           flash_dma->dma_curp, mincfg);
 146}
 147
 148static void
 149bfa_com_diag_attach(struct bfa_s *bfa)
 150{
 151        struct bfa_diag_s       *diag = BFA_DIAG_MOD(bfa);
 152        struct bfa_mem_dma_s    *diag_dma = BFA_MEM_DIAG_DMA(bfa);
 153
 154        bfa_diag_attach(diag, &bfa->ioc, bfa, bfa_fcport_beacon, bfa->trcmod);
 155        bfa_diag_memclaim(diag, diag_dma->kva_curp, diag_dma->dma_curp);
 156}
 157
 158static void
 159bfa_com_phy_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
 160{
 161        struct bfa_phy_s        *phy = BFA_PHY(bfa);
 162        struct bfa_mem_dma_s    *phy_dma = BFA_MEM_PHY_DMA(bfa);
 163
 164        bfa_phy_attach(phy, &bfa->ioc, bfa, bfa->trcmod, mincfg);
 165        bfa_phy_memclaim(phy, phy_dma->kva_curp, phy_dma->dma_curp, mincfg);
 166}
 167
 168/*
 169 * BFA IOC FC related definitions
 170 */
 171
 172/*
 173 * IOC local definitions
 174 */
 175#define BFA_IOCFC_TOV           5000    /* msecs */
 176
 177enum {
 178        BFA_IOCFC_ACT_NONE      = 0,
 179        BFA_IOCFC_ACT_INIT      = 1,
 180        BFA_IOCFC_ACT_STOP      = 2,
 181        BFA_IOCFC_ACT_DISABLE   = 3,
 182        BFA_IOCFC_ACT_ENABLE    = 4,
 183};
 184
 185#define DEF_CFG_NUM_FABRICS             1
 186#define DEF_CFG_NUM_LPORTS              256
 187#define DEF_CFG_NUM_CQS                 4
 188#define DEF_CFG_NUM_IOIM_REQS           (BFA_IOIM_MAX)
 189#define DEF_CFG_NUM_TSKIM_REQS          128
 190#define DEF_CFG_NUM_FCXP_REQS           64
 191#define DEF_CFG_NUM_UF_BUFS             64
 192#define DEF_CFG_NUM_RPORTS              1024
 193#define DEF_CFG_NUM_ITNIMS              (DEF_CFG_NUM_RPORTS)
 194#define DEF_CFG_NUM_TINS                256
 195
 196#define DEF_CFG_NUM_SGPGS               2048
 197#define DEF_CFG_NUM_REQQ_ELEMS          256
 198#define DEF_CFG_NUM_RSPQ_ELEMS          64
 199#define DEF_CFG_NUM_SBOOT_TGTS          16
 200#define DEF_CFG_NUM_SBOOT_LUNS          16
 201
 202/*
 203 * IOCFC state machine definitions/declarations
 204 */
 205bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
 206bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
 207bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
 208bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
 209                   struct bfa_iocfc_s, enum iocfc_event);
 210bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
 211                   struct bfa_iocfc_s, enum iocfc_event);
 212bfa_fsm_state_decl(bfa_iocfc, operational,
 213                   struct bfa_iocfc_s, enum iocfc_event);
 214bfa_fsm_state_decl(bfa_iocfc, dconf_write,
 215                   struct bfa_iocfc_s, enum iocfc_event);
 216bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
 217bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
 218bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
 219bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
 220bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
 221bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
 222bfa_fsm_state_decl(bfa_iocfc, init_failed,
 223                   struct bfa_iocfc_s, enum iocfc_event);
 224
 225/*
 226 * forward declaration for IOC FC functions
 227 */
 228static void bfa_iocfc_start_submod(struct bfa_s *bfa);
 229static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
 230static void bfa_iocfc_send_cfg(void *bfa_arg);
 231static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
 232static void bfa_iocfc_disable_cbfn(void *bfa_arg);
 233static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
 234static void bfa_iocfc_reset_cbfn(void *bfa_arg);
 235static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
 236static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
 237static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
 238static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
 239static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
 240
 241static void
 242bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
 243{
 244}
 245
 246static void
 247bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 248{
 249        bfa_trc(iocfc->bfa, event);
 250
 251        switch (event) {
 252        case IOCFC_E_INIT:
 253        case IOCFC_E_ENABLE:
 254                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
 255                break;
 256        default:
 257                bfa_sm_fault(iocfc->bfa, event);
 258                break;
 259        }
 260}
 261
 262static void
 263bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
 264{
 265        bfa_ioc_enable(&iocfc->bfa->ioc);
 266}
 267
 268static void
 269bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 270{
 271        bfa_trc(iocfc->bfa, event);
 272
 273        switch (event) {
 274        case IOCFC_E_IOC_ENABLED:
 275                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
 276                break;
 277        case IOCFC_E_IOC_FAILED:
 278                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
 279                break;
 280        default:
 281                bfa_sm_fault(iocfc->bfa, event);
 282                break;
 283        }
 284}
 285
 286static void
 287bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
 288{
 289        bfa_dconf_modinit(iocfc->bfa);
 290}
 291
 292static void
 293bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 294{
 295        bfa_trc(iocfc->bfa, event);
 296
 297        switch (event) {
 298        case IOCFC_E_DCONF_DONE:
 299                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
 300                break;
 301        case IOCFC_E_IOC_FAILED:
 302                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
 303                break;
 304        default:
 305                bfa_sm_fault(iocfc->bfa, event);
 306                break;
 307        }
 308}
 309
 310static void
 311bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
 312{
 313        bfa_iocfc_send_cfg(iocfc->bfa);
 314}
 315
 316static void
 317bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 318{
 319        bfa_trc(iocfc->bfa, event);
 320
 321        switch (event) {
 322        case IOCFC_E_CFG_DONE:
 323                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
 324                break;
 325        case IOCFC_E_IOC_FAILED:
 326                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
 327                break;
 328        default:
 329                bfa_sm_fault(iocfc->bfa, event);
 330                break;
 331        }
 332}
 333
 334static void
 335bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
 336{
 337        iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
 338        bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
 339                     bfa_iocfc_init_cb, iocfc->bfa);
 340}
 341
 342static void
 343bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 344{
 345        bfa_trc(iocfc->bfa, event);
 346
 347        switch (event) {
 348        case IOCFC_E_START:
 349                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
 350                break;
 351        case IOCFC_E_STOP:
 352                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
 353                break;
 354        case IOCFC_E_DISABLE:
 355                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
 356                break;
 357        case IOCFC_E_IOC_FAILED:
 358                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
 359                break;
 360        default:
 361                bfa_sm_fault(iocfc->bfa, event);
 362                break;
 363        }
 364}
 365
 366static void
 367bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
 368{
 369        bfa_fcport_init(iocfc->bfa);
 370        bfa_iocfc_start_submod(iocfc->bfa);
 371}
 372
 373static void
 374bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 375{
 376        bfa_trc(iocfc->bfa, event);
 377
 378        switch (event) {
 379        case IOCFC_E_STOP:
 380                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
 381                break;
 382        case IOCFC_E_DISABLE:
 383                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
 384                break;
 385        case IOCFC_E_IOC_FAILED:
 386                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
 387                break;
 388        default:
 389                bfa_sm_fault(iocfc->bfa, event);
 390                break;
 391        }
 392}
 393
 394static void
 395bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
 396{
 397        bfa_dconf_modexit(iocfc->bfa);
 398}
 399
 400static void
 401bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 402{
 403        bfa_trc(iocfc->bfa, event);
 404
 405        switch (event) {
 406        case IOCFC_E_DCONF_DONE:
 407        case IOCFC_E_IOC_FAILED:
 408                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
 409                break;
 410        default:
 411                bfa_sm_fault(iocfc->bfa, event);
 412                break;
 413        }
 414}
 415
 416static void
 417bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
 418{
 419        bfa_ioc_disable(&iocfc->bfa->ioc);
 420}
 421
 422static void
 423bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 424{
 425        bfa_trc(iocfc->bfa, event);
 426
 427        switch (event) {
 428        case IOCFC_E_IOC_DISABLED:
 429                bfa_isr_disable(iocfc->bfa);
 430                bfa_iocfc_disable_submod(iocfc->bfa);
 431                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
 432                iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
 433                bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
 434                             bfa_iocfc_stop_cb, iocfc->bfa);
 435                break;
 436        default:
 437                bfa_sm_fault(iocfc->bfa, event);
 438                break;
 439        }
 440}
 441
 442static void
 443bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
 444{
 445        bfa_ioc_enable(&iocfc->bfa->ioc);
 446}
 447
 448static void
 449bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 450{
 451        bfa_trc(iocfc->bfa, event);
 452
 453        switch (event) {
 454        case IOCFC_E_IOC_ENABLED:
 455                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
 456                break;
 457        case IOCFC_E_IOC_FAILED:
 458                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
 459
 460                if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
 461                        break;
 462
 463                iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
 464                bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
 465                             bfa_iocfc_enable_cb, iocfc->bfa);
 466                iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
 467                break;
 468        default:
 469                bfa_sm_fault(iocfc->bfa, event);
 470                break;
 471        }
 472}
 473
 474static void
 475bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
 476{
 477        bfa_iocfc_send_cfg(iocfc->bfa);
 478}
 479
 480static void
 481bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 482{
 483        bfa_trc(iocfc->bfa, event);
 484
 485        switch (event) {
 486        case IOCFC_E_CFG_DONE:
 487                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
 488                if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
 489                        break;
 490
 491                iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
 492                bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
 493                             bfa_iocfc_enable_cb, iocfc->bfa);
 494                iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
 495                break;
 496        case IOCFC_E_IOC_FAILED:
 497                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
 498                if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
 499                        break;
 500
 501                iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
 502                bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
 503                             bfa_iocfc_enable_cb, iocfc->bfa);
 504                iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
 505                break;
 506        default:
 507                bfa_sm_fault(iocfc->bfa, event);
 508                break;
 509        }
 510}
 511
 512static void
 513bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
 514{
 515        bfa_ioc_disable(&iocfc->bfa->ioc);
 516}
 517
 518static void
 519bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 520{
 521        bfa_trc(iocfc->bfa, event);
 522
 523        switch (event) {
 524        case IOCFC_E_IOC_DISABLED:
 525                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
 526                break;
 527        default:
 528                bfa_sm_fault(iocfc->bfa, event);
 529                break;
 530        }
 531}
 532
 533static void
 534bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
 535{
 536        bfa_isr_disable(iocfc->bfa);
 537        bfa_iocfc_disable_submod(iocfc->bfa);
 538        iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
 539        bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
 540                     bfa_iocfc_disable_cb, iocfc->bfa);
 541}
 542
 543static void
 544bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 545{
 546        bfa_trc(iocfc->bfa, event);
 547
 548        switch (event) {
 549        case IOCFC_E_STOP:
 550                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
 551                break;
 552        case IOCFC_E_ENABLE:
 553                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
 554                break;
 555        default:
 556                bfa_sm_fault(iocfc->bfa, event);
 557                break;
 558        }
 559}
 560
 561static void
 562bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
 563{
 564        bfa_isr_disable(iocfc->bfa);
 565        bfa_iocfc_disable_submod(iocfc->bfa);
 566}
 567
 568static void
 569bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 570{
 571        bfa_trc(iocfc->bfa, event);
 572
 573        switch (event) {
 574        case IOCFC_E_STOP:
 575                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
 576                break;
 577        case IOCFC_E_DISABLE:
 578                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
 579                break;
 580        case IOCFC_E_IOC_ENABLED:
 581                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
 582                break;
 583        case IOCFC_E_IOC_FAILED:
 584                break;
 585        default:
 586                bfa_sm_fault(iocfc->bfa, event);
 587                break;
 588        }
 589}
 590
 591static void
 592bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
 593{
 594        bfa_isr_disable(iocfc->bfa);
 595        iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
 596        bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
 597                     bfa_iocfc_init_cb, iocfc->bfa);
 598}
 599
 600static void
 601bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
 602{
 603        bfa_trc(iocfc->bfa, event);
 604
 605        switch (event) {
 606        case IOCFC_E_STOP:
 607                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
 608                break;
 609        case IOCFC_E_DISABLE:
 610                bfa_ioc_disable(&iocfc->bfa->ioc);
 611                break;
 612        case IOCFC_E_IOC_ENABLED:
 613                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
 614                break;
 615        case IOCFC_E_IOC_DISABLED:
 616                bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
 617                iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
 618                bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
 619                             bfa_iocfc_disable_cb, iocfc->bfa);
 620                break;
 621        case IOCFC_E_IOC_FAILED:
 622                break;
 623        default:
 624                bfa_sm_fault(iocfc->bfa, event);
 625                break;
 626        }
 627}
 628
 629/*
 630 * BFA Interrupt handling functions
 631 */
 632static void
 633bfa_reqq_resume(struct bfa_s *bfa, int qid)
 634{
 635        struct list_head *waitq, *qe, *qen;
 636        struct bfa_reqq_wait_s *wqe;
 637
 638        waitq = bfa_reqq(bfa, qid);
 639        list_for_each_safe(qe, qen, waitq) {
 640                /*
 641                 * Callback only as long as there is room in request queue
 642                 */
 643                if (bfa_reqq_full(bfa, qid))
 644                        break;
 645
 646                list_del(qe);
 647                wqe = (struct bfa_reqq_wait_s *) qe;
 648                wqe->qresume(wqe->cbarg);
 649        }
 650}
 651
 652bfa_boolean_t
 653bfa_isr_rspq(struct bfa_s *bfa, int qid)
 654{
 655        struct bfi_msg_s *m;
 656        u32     pi, ci;
 657        struct list_head *waitq;
 658        bfa_boolean_t ret;
 659
 660        ci = bfa_rspq_ci(bfa, qid);
 661        pi = bfa_rspq_pi(bfa, qid);
 662
 663        ret = (ci != pi);
 664
 665        while (ci != pi) {
 666                m = bfa_rspq_elem(bfa, qid, ci);
 667                WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
 668
 669                bfa_isrs[m->mhdr.msg_class] (bfa, m);
 670                CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
 671        }
 672
 673        /*
 674         * acknowledge RME completions and update CI
 675         */
 676        bfa_isr_rspq_ack(bfa, qid, ci);
 677
 678        /*
 679         * Resume any pending requests in the corresponding reqq.
 680         */
 681        waitq = bfa_reqq(bfa, qid);
 682        if (!list_empty(waitq))
 683                bfa_reqq_resume(bfa, qid);
 684
 685        return ret;
 686}
 687
 688static inline void
 689bfa_isr_reqq(struct bfa_s *bfa, int qid)
 690{
 691        struct list_head *waitq;
 692
 693        bfa_isr_reqq_ack(bfa, qid);
 694
 695        /*
 696         * Resume any pending requests in the corresponding reqq.
 697         */
 698        waitq = bfa_reqq(bfa, qid);
 699        if (!list_empty(waitq))
 700                bfa_reqq_resume(bfa, qid);
 701}
 702
 703void
 704bfa_msix_all(struct bfa_s *bfa, int vec)
 705{
 706        u32     intr, qintr;
 707        int     queue;
 708
 709        intr = readl(bfa->iocfc.bfa_regs.intr_status);
 710        if (!intr)
 711                return;
 712
 713        /*
 714         * RME completion queue interrupt
 715         */
 716        qintr = intr & __HFN_INT_RME_MASK;
 717        if (qintr && bfa->queue_process) {
 718                for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
 719                        bfa_isr_rspq(bfa, queue);
 720        }
 721
 722        intr &= ~qintr;
 723        if (!intr)
 724                return;
 725
 726        /*
 727         * CPE completion queue interrupt
 728         */
 729        qintr = intr & __HFN_INT_CPE_MASK;
 730        if (qintr && bfa->queue_process) {
 731                for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
 732                        bfa_isr_reqq(bfa, queue);
 733        }
 734        intr &= ~qintr;
 735        if (!intr)
 736                return;
 737
 738        bfa_msix_lpu_err(bfa, intr);
 739}
 740
 741bfa_boolean_t
 742bfa_intx(struct bfa_s *bfa)
 743{
 744        u32 intr, qintr;
 745        int queue;
 746        bfa_boolean_t rspq_comp = BFA_FALSE;
 747
 748        intr = readl(bfa->iocfc.bfa_regs.intr_status);
 749
 750        qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
 751        if (qintr)
 752                writel(qintr, bfa->iocfc.bfa_regs.intr_status);
 753
 754        /*
 755         * Unconditional RME completion queue interrupt
 756         */
 757        if (bfa->queue_process) {
 758                for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
 759                        if (bfa_isr_rspq(bfa, queue))
 760                                rspq_comp = BFA_TRUE;
 761        }
 762
 763        if (!intr)
 764                return (qintr | rspq_comp) ? BFA_TRUE : BFA_FALSE;
 765
 766        /*
 767         * CPE completion queue interrupt
 768         */
 769        qintr = intr & __HFN_INT_CPE_MASK;
 770        if (qintr && bfa->queue_process) {
 771                for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
 772                        bfa_isr_reqq(bfa, queue);
 773        }
 774        intr &= ~qintr;
 775        if (!intr)
 776                return BFA_TRUE;
 777
 778        bfa_msix_lpu_err(bfa, intr);
 779
 780        return BFA_TRUE;
 781}
 782
 783void
 784bfa_isr_enable(struct bfa_s *bfa)
 785{
 786        u32 umsk;
 787        int pci_func = bfa_ioc_pcifn(&bfa->ioc);
 788
 789        bfa_trc(bfa, pci_func);
 790
 791        bfa_msix_ctrl_install(bfa);
 792
 793        if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
 794                umsk = __HFN_INT_ERR_MASK_CT2;
 795                umsk |= pci_func == 0 ?
 796                        __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
 797        } else {
 798                umsk = __HFN_INT_ERR_MASK;
 799                umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
 800        }
 801
 802        writel(umsk, bfa->iocfc.bfa_regs.intr_status);
 803        writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
 804        bfa->iocfc.intr_mask = ~umsk;
 805        bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
 806}
 807
 808void
 809bfa_isr_disable(struct bfa_s *bfa)
 810{
 811        bfa_isr_mode_set(bfa, BFA_FALSE);
 812        writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
 813        bfa_msix_uninstall(bfa);
 814}
 815
 816void
 817bfa_msix_reqq(struct bfa_s *bfa, int vec)
 818{
 819        bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
 820}
 821
 822void
 823bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
 824{
 825        bfa_trc(bfa, m->mhdr.msg_class);
 826        bfa_trc(bfa, m->mhdr.msg_id);
 827        bfa_trc(bfa, m->mhdr.mtag.i2htok);
 828        WARN_ON(1);
 829        bfa_trc_stop(bfa->trcmod);
 830}
 831
 832void
 833bfa_msix_rspq(struct bfa_s *bfa, int vec)
 834{
 835        bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
 836}
 837
 838void
 839bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 840{
 841        u32 intr, curr_value;
 842        bfa_boolean_t lpu_isr, halt_isr, pss_isr;
 843
 844        intr = readl(bfa->iocfc.bfa_regs.intr_status);
 845
 846        if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
 847                halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
 848                pss_isr  = intr & __HFN_INT_ERR_PSS_CT2;
 849                lpu_isr  = intr & (__HFN_INT_MBOX_LPU0_CT2 |
 850                                   __HFN_INT_MBOX_LPU1_CT2);
 851                intr    &= __HFN_INT_ERR_MASK_CT2;
 852        } else {
 853                halt_isr = bfa_asic_id_ct(bfa->ioc.pcidev.device_id) ?
 854                                          (intr & __HFN_INT_LL_HALT) : 0;
 855                pss_isr  = intr & __HFN_INT_ERR_PSS;
 856                lpu_isr  = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
 857                intr    &= __HFN_INT_ERR_MASK;
 858        }
 859
 860        if (lpu_isr)
 861                bfa_ioc_mbox_isr(&bfa->ioc);
 862
 863        if (intr) {
 864                if (halt_isr) {
 865                        /*
 866                         * If LL_HALT bit is set then FW Init Halt LL Port
 867                         * Register needs to be cleared as well so Interrupt
 868                         * Status Register will be cleared.
 869                         */
 870                        curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
 871                        curr_value &= ~__FW_INIT_HALT_P;
 872                        writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
 873                }
 874
 875                if (pss_isr) {
 876                        /*
 877                         * ERR_PSS bit needs to be cleared as well in case
 878                         * interrups are shared so driver's interrupt handler is
 879                         * still called even though it is already masked out.
 880                         */
 881                        curr_value = readl(
 882                                        bfa->ioc.ioc_regs.pss_err_status_reg);
 883                        writel(curr_value,
 884                                bfa->ioc.ioc_regs.pss_err_status_reg);
 885                }
 886
 887                writel(intr, bfa->iocfc.bfa_regs.intr_status);
 888                bfa_ioc_error_isr(&bfa->ioc);
 889        }
 890}
 891
 892/*
 893 * BFA IOC FC related functions
 894 */
 895
 896/*
 897 *  BFA IOC private functions
 898 */
 899
 900/*
 901 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
 902 */
 903static void
 904bfa_iocfc_send_cfg(void *bfa_arg)
 905{
 906        struct bfa_s *bfa = bfa_arg;
 907        struct bfa_iocfc_s *iocfc = &bfa->iocfc;
 908        struct bfi_iocfc_cfg_req_s cfg_req;
 909        struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
 910        struct bfa_iocfc_cfg_s  *cfg = &iocfc->cfg;
 911        int             i;
 912
 913        WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
 914        bfa_trc(bfa, cfg->fwcfg.num_cqs);
 915
 916        bfa_iocfc_reset_queues(bfa);
 917
 918        /*
 919         * initialize IOC configuration info
 920         */
 921        cfg_info->single_msix_vec = 0;
 922        if (bfa->msix.nvecs == 1)
 923                cfg_info->single_msix_vec = 1;
 924        cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
 925        cfg_info->num_cqs = cfg->fwcfg.num_cqs;
 926        cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
 927        cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
 928
 929        bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
 930        /*
 931         * dma map REQ and RSP circular queues and shadow pointers
 932         */
 933        for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
 934                bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
 935                                    iocfc->req_cq_ba[i].pa);
 936                bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
 937                                    iocfc->req_cq_shadow_ci[i].pa);
 938                cfg_info->req_cq_elems[i] =
 939                        cpu_to_be16(cfg->drvcfg.num_reqq_elems);
 940
 941                bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
 942                                    iocfc->rsp_cq_ba[i].pa);
 943                bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
 944                                    iocfc->rsp_cq_shadow_pi[i].pa);
 945                cfg_info->rsp_cq_elems[i] =
 946                        cpu_to_be16(cfg->drvcfg.num_rspq_elems);
 947        }
 948
 949        /*
 950         * Enable interrupt coalescing if it is driver init path
 951         * and not ioc disable/enable path.
 952         */
 953        if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
 954                cfg_info->intr_attr.coalesce = BFA_TRUE;
 955
 956        /*
 957         * dma map IOC configuration itself
 958         */
 959        bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
 960                    bfa_fn_lpu(bfa));
 961        bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
 962
 963        bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
 964                          sizeof(struct bfi_iocfc_cfg_req_s));
 965}
 966
 967static void
 968bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 969                   struct bfa_pcidev_s *pcidev)
 970{
 971        struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
 972
 973        bfa->bfad = bfad;
 974        iocfc->bfa = bfa;
 975        iocfc->cfg = *cfg;
 976
 977        /*
 978         * Initialize chip specific handlers.
 979         */
 980        if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
 981                iocfc->hwif.hw_reginit = bfa_hwct_reginit;
 982                iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
 983                iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
 984                iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
 985                iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
 986                iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
 987                iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
 988                iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
 989                iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
 990                iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
 991                iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
 992                iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
 993        } else {
 994                iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
 995                iocfc->hwif.hw_reqq_ack = NULL;
 996                iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
 997                iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
 998                iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
 999                iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
1000                iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
1001                iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
1002                iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
1003                iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
1004                iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
1005                        bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
1006                iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
1007                        bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
1008        }
1009
1010        if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
1011                iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
1012                iocfc->hwif.hw_isr_mode_set = NULL;
1013                iocfc->hwif.hw_rspq_ack = bfa_hwct2_rspq_ack;
1014        }
1015
1016        iocfc->hwif.hw_reginit(bfa);
1017        bfa->msix.nvecs = 0;
1018}
1019
1020static void
1021bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
1022{
1023        u8      *dm_kva = NULL;
1024        u64     dm_pa = 0;
1025        int     i, per_reqq_sz, per_rspq_sz, dbgsz;
1026        struct bfa_iocfc_s  *iocfc = &bfa->iocfc;
1027        struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1028        struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1029        struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
1030
1031        /* First allocate dma memory for IOC */
1032        bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
1033                        bfa_mem_dma_phys(ioc_dma));
1034
1035        /* Claim DMA-able memory for the request/response queues */
1036        per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1037                                BFA_DMA_ALIGN_SZ);
1038        per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1039                                BFA_DMA_ALIGN_SZ);
1040
1041        for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
1042                reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
1043                iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
1044                iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
1045                memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
1046
1047                rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
1048                iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
1049                iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
1050                memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
1051        }
1052
1053        /* Claim IOCFC dma memory - for shadow CI/PI */
1054        dm_kva = bfa_mem_dma_virt(iocfc_dma);
1055        dm_pa  = bfa_mem_dma_phys(iocfc_dma);
1056
1057        for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
1058                iocfc->req_cq_shadow_ci[i].kva = dm_kva;
1059                iocfc->req_cq_shadow_ci[i].pa = dm_pa;
1060                dm_kva += BFA_CACHELINE_SZ;
1061                dm_pa += BFA_CACHELINE_SZ;
1062
1063                iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
1064                iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
1065                dm_kva += BFA_CACHELINE_SZ;
1066                dm_pa += BFA_CACHELINE_SZ;
1067        }
1068
1069        /* Claim IOCFC dma memory - for the config info page */
1070        bfa->iocfc.cfg_info.kva = dm_kva;
1071        bfa->iocfc.cfg_info.pa = dm_pa;
1072        bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
1073        dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1074        dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1075
1076        /* Claim IOCFC dma memory - for the config response */
1077        bfa->iocfc.cfgrsp_dma.kva = dm_kva;
1078        bfa->iocfc.cfgrsp_dma.pa = dm_pa;
1079        bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
1080        dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1081                        BFA_CACHELINE_SZ);
1082        dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1083                        BFA_CACHELINE_SZ);
1084
1085        /* Claim IOCFC kva memory */
1086        dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
1087        if (dbgsz > 0) {
1088                bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
1089                bfa_mem_kva_curp(iocfc) += dbgsz;
1090        }
1091}
1092
1093/*
1094 * Start BFA submodules.
1095 */
1096static void
1097bfa_iocfc_start_submod(struct bfa_s *bfa)
1098{
1099        int             i;
1100
1101        bfa->queue_process = BFA_TRUE;
1102        for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1103                bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i));
1104
1105        for (i = 0; hal_mods[i]; i++)
1106                hal_mods[i]->start(bfa);
1107
1108        bfa->iocfc.submod_enabled = BFA_TRUE;
1109}
1110
1111/*
1112 * Disable BFA submodules.
1113 */
1114static void
1115bfa_iocfc_disable_submod(struct bfa_s *bfa)
1116{
1117        int             i;
1118
1119        if (bfa->iocfc.submod_enabled == BFA_FALSE)
1120                return;
1121
1122        for (i = 0; hal_mods[i]; i++)
1123                hal_mods[i]->iocdisable(bfa);
1124
1125        bfa->iocfc.submod_enabled = BFA_FALSE;
1126}
1127
1128static void
1129bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
1130{
1131        struct bfa_s    *bfa = bfa_arg;
1132
1133        if (complete)
1134                bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
1135}
1136
1137static void
1138bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
1139{
1140        struct bfa_s  *bfa = bfa_arg;
1141        struct bfad_s *bfad = bfa->bfad;
1142
1143        if (compl)
1144                complete(&bfad->comp);
1145}
1146
1147static void
1148bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
1149{
1150        struct bfa_s    *bfa = bfa_arg;
1151        struct bfad_s *bfad = bfa->bfad;
1152
1153        if (compl)
1154                complete(&bfad->enable_comp);
1155}
1156
1157static void
1158bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
1159{
1160        struct bfa_s  *bfa = bfa_arg;
1161        struct bfad_s *bfad = bfa->bfad;
1162
1163        if (compl)
1164                complete(&bfad->disable_comp);
1165}
1166
1167/**
1168 * configure queue registers from firmware response
1169 */
1170static void
1171bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
1172{
1173        int     i;
1174        struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
1175        void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
1176
1177        for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
1178                bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
1179                r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
1180                r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
1181                r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
1182                r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
1183                r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
1184                r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
1185        }
1186}
1187
1188static void
1189bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
1190{
1191        bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
1192        bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
1193        bfa_rport_res_recfg(bfa, fwcfg->num_rports);
1194        bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
1195        bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
1196}
1197
1198/*
1199 * Update BFA configuration from firmware configuration.
1200 */
1201static void
1202bfa_iocfc_cfgrsp(struct bfa_s *bfa)
1203{
1204        struct bfa_iocfc_s              *iocfc   = &bfa->iocfc;
1205        struct bfi_iocfc_cfgrsp_s       *cfgrsp  = iocfc->cfgrsp;
1206        struct bfa_iocfc_fwcfg_s        *fwcfg   = &cfgrsp->fwcfg;
1207
1208        fwcfg->num_cqs        = fwcfg->num_cqs;
1209        fwcfg->num_ioim_reqs  = be16_to_cpu(fwcfg->num_ioim_reqs);
1210        fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
1211        fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
1212        fwcfg->num_fcxp_reqs  = be16_to_cpu(fwcfg->num_fcxp_reqs);
1213        fwcfg->num_uf_bufs    = be16_to_cpu(fwcfg->num_uf_bufs);
1214        fwcfg->num_rports     = be16_to_cpu(fwcfg->num_rports);
1215
1216        /*
1217         * configure queue register offsets as learnt from firmware
1218         */
1219        bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
1220
1221        /*
1222         * Re-configure resources as learnt from Firmware
1223         */
1224        bfa_iocfc_res_recfg(bfa, fwcfg);
1225
1226        /*
1227         * Install MSIX queue handlers
1228         */
1229        bfa_msix_queue_install(bfa);
1230
1231        if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
1232                bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
1233                bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
1234                bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
1235        }
1236}
1237
1238void
1239bfa_iocfc_reset_queues(struct bfa_s *bfa)
1240{
1241        int             q;
1242
1243        for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
1244                bfa_reqq_ci(bfa, q) = 0;
1245                bfa_reqq_pi(bfa, q) = 0;
1246                bfa_rspq_ci(bfa, q) = 0;
1247                bfa_rspq_pi(bfa, q) = 0;
1248        }
1249}
1250
1251/*
1252 *      Process FAA pwwn msg from fw.
1253 */
1254static void
1255bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
1256{
1257        struct bfa_iocfc_s              *iocfc   = &bfa->iocfc;
1258        struct bfi_iocfc_cfgrsp_s       *cfgrsp  = iocfc->cfgrsp;
1259
1260        cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
1261        cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
1262
1263        bfa->ioc.attr->pwwn = msg->pwwn;
1264        bfa->ioc.attr->nwwn = msg->nwwn;
1265        bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
1266}
1267
1268/* Fabric Assigned Address specific functions */
1269
1270/*
1271 *      Check whether IOC is ready before sending command down
1272 */
1273static bfa_status_t
1274bfa_faa_validate_request(struct bfa_s *bfa)
1275{
1276        enum bfa_ioc_type_e     ioc_type = bfa_get_type(bfa);
1277        u32     card_type = bfa->ioc.attr->card_type;
1278
1279        if (bfa_ioc_is_operational(&bfa->ioc)) {
1280                if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
1281                        return BFA_STATUS_FEATURE_NOT_SUPPORTED;
1282        } else {
1283                return BFA_STATUS_IOC_NON_OP;
1284        }
1285
1286        return BFA_STATUS_OK;
1287}
1288
1289bfa_status_t
1290bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
1291                bfa_cb_iocfc_t cbfn, void *cbarg)
1292{
1293        struct bfi_faa_query_s  faa_attr_req;
1294        struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
1295        bfa_status_t            status;
1296
1297        iocfc->faa_args.faa_attr = attr;
1298        iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
1299        iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
1300
1301        status = bfa_faa_validate_request(bfa);
1302        if (status != BFA_STATUS_OK)
1303                return status;
1304
1305        if (iocfc->faa_args.busy == BFA_TRUE)
1306                return BFA_STATUS_DEVBUSY;
1307
1308        iocfc->faa_args.busy = BFA_TRUE;
1309        memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
1310        bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
1311                BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
1312
1313        bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
1314                sizeof(struct bfi_faa_query_s));
1315
1316        return BFA_STATUS_OK;
1317}
1318
1319/*
1320 *      FAA query response
1321 */
1322static void
1323bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
1324                bfi_faa_query_rsp_t *rsp)
1325{
1326        void    *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
1327
1328        if (iocfc->faa_args.faa_attr) {
1329                iocfc->faa_args.faa_attr->faa = rsp->faa;
1330                iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
1331                iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
1332        }
1333
1334        WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
1335
1336        iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
1337        iocfc->faa_args.busy = BFA_FALSE;
1338}
1339
1340/*
1341 * IOC enable request is complete
1342 */
1343static void
1344bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
1345{
1346        struct bfa_s    *bfa = bfa_arg;
1347
1348        if (status == BFA_STATUS_OK)
1349                bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
1350        else
1351                bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
1352}
1353
1354/*
1355 * IOC disable request is complete
1356 */
1357static void
1358bfa_iocfc_disable_cbfn(void *bfa_arg)
1359{
1360        struct bfa_s    *bfa = bfa_arg;
1361
1362        bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
1363}
1364
1365/*
1366 * Notify sub-modules of hardware failure.
1367 */
1368static void
1369bfa_iocfc_hbfail_cbfn(void *bfa_arg)
1370{
1371        struct bfa_s    *bfa = bfa_arg;
1372
1373        bfa->queue_process = BFA_FALSE;
1374        bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
1375}
1376
1377/*
1378 * Actions on chip-reset completion.
1379 */
1380static void
1381bfa_iocfc_reset_cbfn(void *bfa_arg)
1382{
1383        struct bfa_s    *bfa = bfa_arg;
1384
1385        bfa_iocfc_reset_queues(bfa);
1386        bfa_isr_enable(bfa);
1387}
1388
1389/*
1390 * Query IOC memory requirement information.
1391 */
1392void
1393bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1394                  struct bfa_s *bfa)
1395{
1396        int q, per_reqq_sz, per_rspq_sz;
1397        struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1398        struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1399        struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
1400        u32     dm_len = 0;
1401
1402        /* dma memory setup for IOC */
1403        bfa_mem_dma_setup(meminfo, ioc_dma,
1404                BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
1405
1406        /* dma memory setup for REQ/RSP queues */
1407        per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1408                                BFA_DMA_ALIGN_SZ);
1409        per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1410                                BFA_DMA_ALIGN_SZ);
1411
1412        for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
1413                bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
1414                                per_reqq_sz);
1415                bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
1416                                per_rspq_sz);
1417        }
1418
1419        /* IOCFC dma memory - calculate Shadow CI/PI size */
1420        for (q = 0; q < cfg->fwcfg.num_cqs; q++)
1421                dm_len += (2 * BFA_CACHELINE_SZ);
1422
1423        /* IOCFC dma memory - calculate config info / rsp size */
1424        dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1425        dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1426                        BFA_CACHELINE_SZ);
1427
1428        /* dma memory setup for IOCFC */
1429        bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
1430
1431        /* kva memory setup for IOCFC */
1432        bfa_mem_kva_setup(meminfo, iocfc_kva,
1433                        ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
1434}
1435
1436/*
1437 * Query IOC memory requirement information.
1438 */
1439void
1440bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1441                 struct bfa_pcidev_s *pcidev)
1442{
1443        int             i;
1444        struct bfa_ioc_s *ioc = &bfa->ioc;
1445
1446        bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
1447        bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
1448        bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
1449        bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
1450
1451        ioc->trcmod = bfa->trcmod;
1452        bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
1453
1454        bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
1455        bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
1456
1457        bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
1458        bfa_iocfc_mem_claim(bfa, cfg);
1459        INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
1460
1461        INIT_LIST_HEAD(&bfa->comp_q);
1462        for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1463                INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
1464
1465        bfa->iocfc.cb_reqd = BFA_FALSE;
1466        bfa->iocfc.op_status = BFA_STATUS_OK;
1467        bfa->iocfc.submod_enabled = BFA_FALSE;
1468
1469        bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
1470}
1471
1472/*
1473 * Query IOC memory requirement information.
1474 */
1475void
1476bfa_iocfc_init(struct bfa_s *bfa)
1477{
1478        bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
1479}
1480
1481/*
1482 * IOC start called from bfa_start(). Called to start IOC operations
1483 * at driver instantiation for this instance.
1484 */
1485void
1486bfa_iocfc_start(struct bfa_s *bfa)
1487{
1488        bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
1489}
1490
1491/*
1492 * IOC stop called from bfa_stop(). Called only when driver is unloaded
1493 * for this instance.
1494 */
1495void
1496bfa_iocfc_stop(struct bfa_s *bfa)
1497{
1498        bfa->queue_process = BFA_FALSE;
1499        bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
1500}
1501
1502void
1503bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
1504{
1505        struct bfa_s            *bfa = bfaarg;
1506        struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
1507        union bfi_iocfc_i2h_msg_u       *msg;
1508
1509        msg = (union bfi_iocfc_i2h_msg_u *) m;
1510        bfa_trc(bfa, msg->mh.msg_id);
1511
1512        switch (msg->mh.msg_id) {
1513        case BFI_IOCFC_I2H_CFG_REPLY:
1514                bfa_iocfc_cfgrsp(bfa);
1515                break;
1516        case BFI_IOCFC_I2H_UPDATEQ_RSP:
1517                iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
1518                break;
1519        case BFI_IOCFC_I2H_ADDR_MSG:
1520                bfa_iocfc_process_faa_addr(bfa,
1521                                (struct bfi_faa_addr_msg_s *)msg);
1522                break;
1523        case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1524                bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
1525                break;
1526        default:
1527                WARN_ON(1);
1528        }
1529}
1530
1531void
1532bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
1533{
1534        struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
1535
1536        attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
1537
1538        attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
1539                                be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
1540                                be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
1541
1542        attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
1543                        be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
1544                        be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
1545
1546        attr->config    = iocfc->cfg;
1547}
1548
1549bfa_status_t
1550bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
1551{
1552        struct bfa_iocfc_s              *iocfc = &bfa->iocfc;
1553        struct bfi_iocfc_set_intr_req_s *m;
1554
1555        iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
1556        iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
1557        iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
1558
1559        if (!bfa_iocfc_is_operational(bfa))
1560                return BFA_STATUS_OK;
1561
1562        m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
1563        if (!m)
1564                return BFA_STATUS_DEVBUSY;
1565
1566        bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
1567                    bfa_fn_lpu(bfa));
1568        m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
1569        m->delay    = iocfc->cfginfo->intr_attr.delay;
1570        m->latency  = iocfc->cfginfo->intr_attr.latency;
1571
1572        bfa_trc(bfa, attr->delay);
1573        bfa_trc(bfa, attr->latency);
1574
1575        bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
1576        return BFA_STATUS_OK;
1577}
1578
1579void
1580bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
1581{
1582        struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
1583
1584        iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
1585        bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
1586}
1587/*
1588 * Enable IOC after it is disabled.
1589 */
1590void
1591bfa_iocfc_enable(struct bfa_s *bfa)
1592{
1593        bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1594                     "IOC Enable");
1595        bfa->iocfc.cb_reqd = BFA_TRUE;
1596        bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
1597}
1598
1599void
1600bfa_iocfc_disable(struct bfa_s *bfa)
1601{
1602        bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1603                     "IOC Disable");
1604
1605        bfa->queue_process = BFA_FALSE;
1606        bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
1607}
1608
1609bfa_boolean_t
1610bfa_iocfc_is_operational(struct bfa_s *bfa)
1611{
1612        return bfa_ioc_is_operational(&bfa->ioc) &&
1613                bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
1614}
1615
1616/*
1617 * Return boot target port wwns -- read from boot information in flash.
1618 */
1619void
1620bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1621{
1622        struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1623        struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1624        int i;
1625
1626        if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1627                bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1628                *nwwns = cfgrsp->pbc_cfg.nbluns;
1629                for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1630                        wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1631
1632                return;
1633        }
1634
1635        *nwwns = cfgrsp->bootwwns.nwwns;
1636        memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1637}
1638
1639int
1640bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1641{
1642        struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1643        struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1644
1645        memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1646        return cfgrsp->pbc_cfg.nvports;
1647}
1648
1649
1650/*
1651 * Use this function query the memory requirement of the BFA library.
1652 * This function needs to be called before bfa_attach() to get the
1653 * memory required of the BFA layer for a given driver configuration.
1654 *
1655 * This call will fail, if the cap is out of range compared to pre-defined
1656 * values within the BFA library
1657 *
1658 * @param[in] cfg -     pointer to bfa_ioc_cfg_t. Driver layer should indicate
1659 *                      its configuration in this structure.
1660 *                      The default values for struct bfa_iocfc_cfg_s can be
1661 *                      fetched using bfa_cfg_get_default() API.
1662 *
1663 *                      If cap's boundary check fails, the library will use
1664 *                      the default bfa_cap_t values (and log a warning msg).
1665 *
1666 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
1667 *                      indicates the memory type (see bfa_mem_type_t) and
1668 *                      amount of memory required.
1669 *
1670 *                      Driver should allocate the memory, populate the
1671 *                      starting address for each block and provide the same
1672 *                      structure as input parameter to bfa_attach() call.
1673 *
1674 * @param[in] bfa -     pointer to the bfa structure, used while fetching the
1675 *                      dma, kva memory information of the bfa sub-modules.
1676 *
1677 * @return void
1678 *
1679 * Special Considerations: @note
1680 */
1681void
1682bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1683                struct bfa_s *bfa)
1684{
1685        int             i;
1686        struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
1687        struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
1688        struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
1689        struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
1690        struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
1691        struct bfa_mem_dma_s *diag_dma = BFA_MEM_DIAG_DMA(bfa);
1692        struct bfa_mem_dma_s *phy_dma = BFA_MEM_PHY_DMA(bfa);
1693
1694        WARN_ON((cfg == NULL) || (meminfo == NULL));
1695
1696        memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1697
1698        /* Initialize the DMA & KVA meminfo queues */
1699        INIT_LIST_HEAD(&meminfo->dma_info.qe);
1700        INIT_LIST_HEAD(&meminfo->kva_info.qe);
1701
1702        bfa_iocfc_meminfo(cfg, meminfo, bfa);
1703
1704        for (i = 0; hal_mods[i]; i++)
1705                hal_mods[i]->meminfo(cfg, meminfo, bfa);
1706
1707        /* dma info setup */
1708        bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
1709        bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
1710        bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
1711        bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
1712        bfa_mem_dma_setup(meminfo, flash_dma,
1713                          bfa_flash_meminfo(cfg->drvcfg.min_cfg));
1714        bfa_mem_dma_setup(meminfo, diag_dma, bfa_diag_meminfo());
1715        bfa_mem_dma_setup(meminfo, phy_dma,
1716                          bfa_phy_meminfo(cfg->drvcfg.min_cfg));
1717}
1718
1719/*
1720 * Use this function to do attach the driver instance with the BFA
1721 * library. This function will not trigger any HW initialization
1722 * process (which will be done in bfa_init() call)
1723 *
1724 * This call will fail, if the cap is out of range compared to
1725 * pre-defined values within the BFA library
1726 *
1727 * @param[out]  bfa     Pointer to bfa_t.
1728 * @param[in]   bfad    Opaque handle back to the driver's IOC structure
1729 * @param[in]   cfg     Pointer to bfa_ioc_cfg_t. Should be same structure
1730 *                      that was used in bfa_cfg_get_meminfo().
1731 * @param[in]   meminfo Pointer to bfa_meminfo_t. The driver should
1732 *                      use the bfa_cfg_get_meminfo() call to
1733 *                      find the memory blocks required, allocate the
1734 *                      required memory and provide the starting addresses.
1735 * @param[in]   pcidev  pointer to struct bfa_pcidev_s
1736 *
1737 * @return
1738 * void
1739 *
1740 * Special Considerations:
1741 *
1742 * @note
1743 *
1744 */
1745void
1746bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1747               struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1748{
1749        int     i;
1750        struct bfa_mem_dma_s *dma_info, *dma_elem;
1751        struct bfa_mem_kva_s *kva_info, *kva_elem;
1752        struct list_head *dm_qe, *km_qe;
1753
1754        bfa->fcs = BFA_FALSE;
1755
1756        WARN_ON((cfg == NULL) || (meminfo == NULL));
1757
1758        /* Initialize memory pointers for iterative allocation */
1759        dma_info = &meminfo->dma_info;
1760        dma_info->kva_curp = dma_info->kva;
1761        dma_info->dma_curp = dma_info->dma;
1762
1763        kva_info = &meminfo->kva_info;
1764        kva_info->kva_curp = kva_info->kva;
1765
1766        list_for_each(dm_qe, &dma_info->qe) {
1767                dma_elem = (struct bfa_mem_dma_s *) dm_qe;
1768                dma_elem->kva_curp = dma_elem->kva;
1769                dma_elem->dma_curp = dma_elem->dma;
1770        }
1771
1772        list_for_each(km_qe, &kva_info->qe) {
1773                kva_elem = (struct bfa_mem_kva_s *) km_qe;
1774                kva_elem->kva_curp = kva_elem->kva;
1775        }
1776
1777        bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
1778
1779        for (i = 0; hal_mods[i]; i++)
1780                hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
1781
1782        bfa_com_port_attach(bfa);
1783        bfa_com_ablk_attach(bfa);
1784        bfa_com_cee_attach(bfa);
1785        bfa_com_sfp_attach(bfa);
1786        bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
1787        bfa_com_diag_attach(bfa);
1788        bfa_com_phy_attach(bfa, cfg->drvcfg.min_cfg);
1789}
1790
1791/*
1792 * Use this function to delete a BFA IOC. IOC should be stopped (by
1793 * calling bfa_stop()) before this function call.
1794 *
1795 * @param[in] bfa - pointer to bfa_t.
1796 *
1797 * @return
1798 * void
1799 *
1800 * Special Considerations:
1801 *
1802 * @note
1803 */
1804void
1805bfa_detach(struct bfa_s *bfa)
1806{
1807        int     i;
1808
1809        for (i = 0; hal_mods[i]; i++)
1810                hal_mods[i]->detach(bfa);
1811        bfa_ioc_detach(&bfa->ioc);
1812}
1813
1814void
1815bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1816{
1817        INIT_LIST_HEAD(comp_q);
1818        list_splice_tail_init(&bfa->comp_q, comp_q);
1819}
1820
1821void
1822bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1823{
1824        struct list_head                *qe;
1825        struct list_head                *qen;
1826        struct bfa_cb_qe_s      *hcb_qe;
1827        bfa_cb_cbfn_status_t    cbfn;
1828
1829        list_for_each_safe(qe, qen, comp_q) {
1830                hcb_qe = (struct bfa_cb_qe_s *) qe;
1831                if (hcb_qe->pre_rmv) {
1832                        /* qe is invalid after return, dequeue before cbfn() */
1833                        list_del(qe);
1834                        cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
1835                        cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
1836                } else
1837                        hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1838        }
1839}
1840
1841void
1842bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1843{
1844        struct list_head                *qe;
1845        struct bfa_cb_qe_s      *hcb_qe;
1846
1847        while (!list_empty(comp_q)) {
1848                bfa_q_deq(comp_q, &qe);
1849                hcb_qe = (struct bfa_cb_qe_s *) qe;
1850                WARN_ON(hcb_qe->pre_rmv);
1851                hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1852        }
1853}
1854
1855/*
1856 * Return the list of PCI vendor/device id lists supported by this
1857 * BFA instance.
1858 */
1859void
1860bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1861{
1862        static struct bfa_pciid_s __pciids[] = {
1863                {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1864                {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1865                {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
1866                {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
1867        };
1868
1869        *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
1870        *pciids = __pciids;
1871}
1872
1873/*
1874 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1875 * into BFA layer). The OS driver can then turn back and overwrite entries that
1876 * have been configured by the user.
1877 *
1878 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1879 *
1880 * @return
1881 *      void
1882 *
1883 * Special Considerations:
1884 * note
1885 */
1886void
1887bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1888{
1889        cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1890        cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1891        cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1892        cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1893        cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1894        cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1895        cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1896        cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1897        cfg->fwcfg.num_fwtio_reqs = 0;
1898
1899        cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1900        cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1901        cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1902        cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1903        cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1904        cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1905        cfg->drvcfg.ioc_recover = BFA_FALSE;
1906        cfg->drvcfg.delay_comp = BFA_FALSE;
1907
1908}
1909
1910void
1911bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1912{
1913        bfa_cfg_get_default(cfg);
1914        cfg->fwcfg.num_ioim_reqs   = BFA_IOIM_MIN;
1915        cfg->fwcfg.num_tskim_reqs  = BFA_TSKIM_MIN;
1916        cfg->fwcfg.num_fcxp_reqs   = BFA_FCXP_MIN;
1917        cfg->fwcfg.num_uf_bufs     = BFA_UF_MIN;
1918        cfg->fwcfg.num_rports      = BFA_RPORT_MIN;
1919        cfg->fwcfg.num_fwtio_reqs = 0;
1920
1921        cfg->drvcfg.num_sgpgs      = BFA_SGPG_MIN;
1922        cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1923        cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
1924        cfg->drvcfg.min_cfg        = BFA_TRUE;
1925}
1926